diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/__init__.py deleted file mode 100644 index 95a9f08b259bbcabf7512dda0fe633d96686fd4d..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -import urllib.request -import json - - -class Completion: - @staticmethod - def create( - systemMessage: str = "You are a helpful assistant", - prompt: str = "", - parentMessageId: str = "", - temperature: float = 0.8, - top_p: float = 1, - ): - json_data = { - "prompt": prompt, - "options": {"parentMessageId": parentMessageId}, - "systemMessage": systemMessage, - "temperature": temperature, - "top_p": top_p, - } - - url = "http://43.153.7.56:8080/api/chat-process" - headers = {"Content-type": "application/json"} - - data = json.dumps(json_data).encode("utf-8") - req = urllib.request.Request(url, data=data, headers=headers) - response = urllib.request.urlopen(req) - content = response.read().decode() - - return Completion.__load_json(content) - - @classmethod - def __load_json(cls, content) -> dict: - split = content.rsplit("\n", 1)[1] - to_json = json.loads(split) - return to_json diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clickteam Fusion 2.5 Developer Upgrade Download] [crack] How to Create Amazing Games with Ease.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clickteam Fusion 2.5 Developer Upgrade Download] [crack] How to Create Amazing Games with Ease.md deleted file mode 100644 index 6cb3e88372155fc683ca51978ac2d27e50c5f8f7..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clickteam Fusion 2.5 Developer Upgrade Download] [crack] How to Create Amazing Games with Ease.md +++ /dev/null @@ -1,144 +0,0 @@ -
-

Clickteam Fusion 2.5 Developer Upgrade: A Powerful Tool for Game and Software Creation

-

Do you have an idea for a game or software that you want to bring to life? Do you want to create your own applications without coding or programming? Do you want to publish your creations for multiple platforms with ease? If you answered yes to any of these questions, then you might be interested in Clickteam Fusion 2.5 Developer Upgrade, a powerful tool that allows you to create games and software with a simple drag-and-drop interface.

-

Clickteam Fusion 2.5 Developer Upgrade Download] [crack]


Download Ziphttps://byltly.com/2uKz8A



-

Clickteam Fusion 2.5 Developer Upgrade is an enhanced version of Clickteam Fusion 2.5, a game and software creation tool that has been used by thousands of developers around the world. With Clickteam Fusion 2.5 Developer Upgrade, you can access exclusive developer features and logo free use of the runtimes, giving you more freedom and flexibility in your development process.

-

In this article, we will explain how to get Clickteam Fusion 2.5 Developer Upgrade, what are its features, how to use it, and some examples of games and apps made with it. By the end of this article, you will have a better understanding of why Clickteam Fusion 2.5 Developer Upgrade is a powerful tool for game and software creation.

-

How to get Clickteam Fusion 2.5 Developer Upgrade

-

If you want to get Clickteam Fusion 2.5 Developer Upgrade, you have two options: purchase the full version or upgrade from the standard version.

-

The full version of Clickteam Fusion 2.5 Developer Upgrade costs $299.99 and can be purchased from Clickteam's website or from Steam. The full version includes the base application Clickteam Fusion 2.5 and all the optional exporters for Windows, Mac, iOS, Android, Flash, XNA (Windows Mobile phone and Xbox) and HTML5.

-

If you already have the standard version of Clickteam Fusion 2.5, you can upgrade to the developer version for $199.99 by submitting a product upgrade request on Clickteam's support page. You will need to provide your Clickteam Fusion 2.5 serial number and proof of purchase.

-

What are the features of Clickteam Fusion 2.5 Developer Upgrade

-

Clickteam Fusion 2.5 Developer Upgrade has many features that make it a powerful tool for game and software creation. Here are some of the main features that distinguish it from the standard version:

-

Royalty free, logo and credit free use of the runtimes

-

One of the biggest benefits of Clickteam Fusion 2.5 Developer Upgrade is that you can use the runtimes without any limitations or requirements. This means that you can publish your games and apps without having to display any logos or credits from Clickteam or pay any royalties to them.

-

How to get Clickteam Fusion 2.5 Developer for free
-Clickteam Fusion 2.5 Developer full version download
-Clickteam Fusion 2.5 Developer crack serial keygen
-Clickteam Fusion 2.5 Developer patch download
-Clickteam Fusion 2.5 Developer activation code
-Clickteam Fusion 2.5 Developer license key
-Clickteam Fusion 2.5 Developer torrent download
-Clickteam Fusion 2.5 Developer review
-Clickteam Fusion 2.5 Developer tutorial
-Clickteam Fusion 2.5 Developer features
-Clickteam Fusion 2.5 Developer system requirements
-Clickteam Fusion 2.5 Developer alternatives
-Clickteam Fusion 2.5 Developer vs GameMaker Studio
-Clickteam Fusion 2.5 Developer vs Construct 3
-Clickteam Fusion 2.5 Developer vs Unity
-Clickteam Fusion 2.5 Developer vs Unreal Engine
-Clickteam Fusion 2.5 Developer vs Godot Engine
-Clickteam Fusion 2.5 Developer export options
-Clickteam Fusion 2.5 Developer extensions
-Clickteam Fusion 2.5 Developer examples
-Clickteam Fusion 2.5 Developer games
-Clickteam Fusion 2.5 Developer tips and tricks
-Clickteam Fusion 2.5 Developer documentation
-Clickteam Fusion 2.5 Developer forum
-Clickteam Fusion 2.5 Developer support
-Clickteam Fusion 2.5 Developer online course
-Clickteam Fusion 2.5 Developer cheat sheet
-Clickteam Fusion 2.5 Developer keyboard shortcuts
-Clickteam Fusion 2.5 Developer best practices
-Clickteam Fusion 2.5 Developer bugs and fixes
-Clickteam Fusion 2.5 Developer roadmap
-Clickteam Fusion 2.5 Developer update history
-Clickteam Fusion 2.5 Developer comparison chart
-Clickteam Fusion 2.5 Developer pros and cons
-Clickteam Fusion 2.5 Developer discount code
-Clickteam Fusion 2.5 Developer coupon code
-Clickteam Fusion 2.5 Developer free trial
-Clickteam Fusion 2.5 Developer refund policy
-Clickteam Fusion 2.5 Developer testimonials
-Clickteam Fusion 2.5 Developer case studies
-How to make a platformer game with Clickteam Fusion 2.5 Developer
-How to make a shooter game with Clickteam Fusion 2.5 Developer
-How to make a puzzle game with Clickteam Fusion 2.5 Developer
-How to make a RPG game with Clickteam Fusion 2.5 Developer
-How to make a racing game with Clickteam Fusion 2.5 Developer
-How to make a strategy game with Clickteam Fusion 2.5 Developer
-How to make a simulation game with Clickteam Fusion 2.5 Developer
-How to make a horror game with Clickteam Fusion 2.5 Developer
-How to make a multiplayer game with Clickteam Fusion 2.5 Developer
-How to make a mobile game with Clickteam Fusion 2.5 Developer

-

This gives you more control over your branding and monetization strategies, as well as more confidence in your intellectual property rights.

-

Ability to publish games and apps for multiple platforms

-

Another feature of Clickteam Fusion 2.5 Developer Upgrade is that you can publish your games and apps for multiple platforms with ease. With the optional exporters included in the full version, you can build your projects for Windows, Mac, iOS, Android, Flash, XNA (Windows Mobile phone and Xbox) and HTML5.

-

This means that you can reach a wider audience and increase your chances of success in different markets.

-

Exclusive developer only objects

-

Clickteam Fusion 2.5 Developer Upgrade also gives you access to exclusive developer only objects that provide additional functionality to your projects. These objects include:

- -

Full integrated physics engine

-

Clickteam Fusion 2.5 Developer Upgrade also takes full advantage of the Box2d physics engine by integrating it into the movement property tab for most objects. This means that you can easily add realistic physics effects such as gravity, friction, collisions, joints, springs and more to your games without coding or programming.

-

This makes your games more fun and immersive for your players.

-

Hardware accelerated games and apps

-

Last but not least, Clickteam Fusion 2.5 Developer Upgrade also allows you to make your games and apps faster by using hardware acceleration (subject to runtime used). This means that you can use shaders on powerful Windows machines or OpenGL ES on mobile devices to enhance the graphics quality and performance of your projects.

-

This makes your games and apps more attractive and smooth for your players.

-

How to use Clickteam Fusion 2.5 Developer Upgrade

-

with a simple drag-and-drop interface. Here are the basic steps to follow:

-

Create a new project

-

The first step is to create a new project in Clickteam Fusion 2.5 Developer Upgrade. You can choose from a variety of templates or start from scratch. You can also customize the project settings such as the name, icon, resolution, frame rate and more.

-

To create a new project, click on the File menu and select New. You will see a window with different options for your project. Choose the one that suits your needs and click OK.

-

Add objects and events

-

The next step is to add objects and events to your project. Objects are the elements that make up your game or software, such as sprites, sounds, texts, buttons and more. Events are the actions that define the logic and behavior of your project, such as what happens when you click a button, when you collide with an enemy, when you reach a certain score and more.

-

To add objects and events, you need to use the Frame Editor and the Event Editor. The Frame Editor is where you can drag and drop objects onto the frame (the screen where your game or software runs). The Event Editor is where you can create events using a simple condition-action system.

-

To access the Frame Editor, click on the Frame tab at the bottom of the screen. You will see a toolbar with different categories of objects. To add an object, click on its icon and drag it onto the frame. You can also right-click on an object and select Properties to change its attributes.

-

To access the Event Editor, click on the Event tab at the bottom of the screen. You will see a grid with columns for conditions and actions. To add an event, click on an empty cell in the condition column and select a condition from the list. Then click on an empty cell in the action column and select an action from the list. You can also right-click on an event and select Edit to modify it.

-

Test and debug

-

The third step is to test and debug your project. Testing means running your project to see how it works and if there are any errors or bugs. Debugging means finding and fixing those errors or bugs.

-

To test your project, click on the Run menu and select Run Application. You will see your project running in a separate window. You can also use keyboard shortcuts such as F8 to run your project.

-

To debug your project, you can use various tools such as breakpoints, watches, monitors and debug messages. Breakpoints are points in your events where you can pause your project and inspect its state. Watches are variables that you can track during your project's execution. Monitors are windows that display information about your objects and events. Debug messages are texts that you can print to the output window for debugging purposes.

-

To use these tools, you need to enable the Debug mode in Clickteam Fusion 2.5 Developer Upgrade. To do so, click on the Run menu and select Debug Mode On/Off. You will see a green bug icon in the toolbar indicating that Debug mode is on.

-

Export and publish

-

The final step is to export and publish your project. Exporting means building your project for a specific platform such as Windows, Mac, iOS, Android, Flash, XNA or HTML5. Publishing means distributing your project to your target audience such as uploading it to a website or app store.

-

To export your project, click on the Build menu and select Build Application or Build HTML5 Application depending on your platform choice. You will see a window with different options for your build such as compression level, encryption key, splash screen and more. Choose the ones that suit your needs and click OK.

-

To publish your project, you need to follow different steps depending on your platform choice such as signing up for a developer account, uploading your files, filling out forms and more. For more details on how to publish your project for each platform, please refer to Clickteam's website or Steam.

-

Examples of games and apps made with Clickteam Fusion 2.5 Developer Upgrade

-

Clickteam Fusion 2.5 Developer Upgrade has been used by many developers around the world to create successful games and apps for various platforms. Here are some examples of games and apps made with Clickteam Fusion 2.5 Developer Upgrade:

-

Five Nights at Freddy's series

-

Five Nights at Freddy's is a popular horror game series by Scott Cawthon that has spawned several sequels, spin-offs and adaptations. The game puts you in the role of a night guard at a haunted pizzeria where you have to survive five nights against animatronic characters that come to life at night.

-

The game was made with Clickteam Fusion 2.5 Developer Upgrade and has been published for Windows, iOS, Android and other platforms.

-

The Escapists series

-

and spin-offs. The game puts you in the role of a prisoner who has to plan and execute an escape from various prisons with different levels of security and difficulty.

-

The game was made with Clickteam Fusion 2.5 Developer Upgrade and has been published for Windows, Mac, iOS, Android and other platforms.

-

Freedom Planet

-

Freedom Planet is a retro-style platformer game by GalaxyTrail that pays homage to the classic games of the 16-bit era. The game features four playable characters, each with their own abilities and storylines, who have to save their planet from an evil warlord.

-

The game was made with Clickteam Fusion 2.5 Developer Upgrade and has been published for Windows, Mac, Linux, Wii U, PlayStation 4 and Nintendo Switch.

-

Conclusion

-

In conclusion, Clickteam Fusion 2.5 Developer Upgrade is a powerful tool for game and software creation that allows you to create your own applications without coding or programming. With Clickteam Fusion 2.5 Developer Upgrade, you can access exclusive developer features and logo free use of the runtimes, publish your games and apps for multiple platforms with ease, use a full integrated physics engine and hardware acceleration, and more.

-

If you want to get Clickteam Fusion 2.5 Developer Upgrade, you can purchase the full version or upgrade from the standard version from Clickteam's website or from Steam. You can also try the free version first to see if it runs on your system.

-

With Clickteam Fusion 2.5 Developer Upgrade, you can unleash your creativity and make your own games and software with a simple drag-and-drop interface. Whether you are a beginner or a professional, Clickteam Fusion 2.5 Developer Upgrade can help you achieve your development goals.

-

So what are you waiting for? Get Clickteam Fusion 2.5 Developer Upgrade today and start creating!

-

FAQs

-

Here are some frequently asked questions about Clickteam Fusion 2.5 Developer Upgrade:

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download C-Free and Enjoy Multiple Compilers and Features for C and C.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download C-Free and Enjoy Multiple Compilers and Features for C and C.md deleted file mode 100644 index 556b357de404754bdc82b806f4d003ed5e937a21..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download C-Free and Enjoy Multiple Compilers and Features for C and C.md +++ /dev/null @@ -1,25 +0,0 @@ - -

How to Download C-Free IDE for Windows

-

C-Free is a professional C/C++ integrated development environment (IDE) that supports multiple compilers. With this software, you can edit, build, run and debug your C and C++ programs freely. In this article, we will show you how to download and install C-Free IDE for Windows.

-

c free crack download


Download File ->>->>->> https://byltly.com/2uKvYb



-

Step 1: Download C-Free

-

You can download C-Free from its official website or from other software download sites . The latest version is 5.0, which was released on September 7, 2018. The file size is about 14.6 MB. You can choose either the free trial version or the full version that costs $79.

-

Step 2: Install C-Free

-

After downloading the C-Free setup file, double-click it to start the installation process. Follow the instructions on the screen to select the destination folder, the components to install, and the shortcuts to create. You can also choose the default compiler to use among the supported ones, such as MinGW, Cygwin, Borland C++, Microsoft C++, Intel C++, Lcc-Win32, Open Watcom C/C++, Digital Mars C/C++, and Ch Interpreter.

-

Step 3: Run C-Free

-

Once the installation is complete, you can launch C-Free from the Start menu or the desktop shortcut. You will see the main interface of C-Free, which consists of several panels, such as the editor, the project explorer, the output window, and the code browser. You can customize the layout and appearance of these panels according to your preferences.

-

Step 4: Create a New Project

-

To start coding with C-Free, you need to create a new project first. You can do this by clicking on File > New > Project or by pressing Ctrl+Shift+N. A project wizard will appear, where you can choose the type of project you want to create, such as console application, Windows application, DLL library, static library, or empty project. You can also specify the name and location of your project.

-

Step 5: Add Source Files

-

After creating a new project, you need to add source files to it. You can do this by clicking on File > New > File or by pressing Ctrl+N. A file wizard will appear, where you can choose the type of file you want to create, such as C source file (.c), C++ source file (.cpp), header file (.h), or resource file (.rc). You can also specify the name and location of your file.

-

Step 6: Edit and Build Your Code

-

Now you can edit your code using the editor panel of C-Free. You can enjoy features such as syntax highlighting, code completion, code parameters, smart input, code folding, bookmarks, breakpoints, and more. You can also use external tools and help files to assist your coding process.

-

-

To build your code, you can click on Build > Build or press F9. This will compile and link your code using the selected compiler and generate an executable file or a library file in the output folder. You can see the build messages in the output window.

-

Step 7: Run and Debug Your Program

-

To run your program, you can click on Build > Run or press F5. This will launch your program in a console window or a GUI window depending on the type of project you created. You can also pass command-line arguments to your program if needed.

-

To debug your program, you can click on Debug > Start Debugging or press F6. This will start a debugging session with GDB or another debugger depending on the selected compiler. You can use features such as step into, step over, step out, run to cursor, watch variables, evaluate expressions, modify values, and more. You can also set breakpoints and watchpoints to pause and inspect your program at specific locations.

-

Conclusion

-

C-Free is a powerful and lightweight IDE for C and C++ programming languages that supports multiple compilers and platforms. It provides a user-friendly interface and a rich set of features to help you develop high-quality applications with ease. You can download and install C-Free for Windows by following

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ayyappa Songs Lyrics In Tamil Pdf 97.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ayyappa Songs Lyrics In Tamil Pdf 97.md deleted file mode 100644 index 7266bba64b37c77dc4c04d0d90b2e619db0610a8..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Ayyappa Songs Lyrics In Tamil Pdf 97.md +++ /dev/null @@ -1,84 +0,0 @@ -
-

Ayyappa Songs Lyrics in Tamil PDF 97: A Complete Guide

-

If you are a devotee of Lord Ayyappa, you might be looking for ayyappa songs lyrics in tamil pdf 97 to download and print for free. Ayyappa songs are devotional songs that are sung by the pilgrims who visit Sabarimala, the holy shrine of Lord Ayyappa in Kerala. Ayyappa songs lyrics in tamil pdf 97 are a collection of 97 songs that praise and worship Lord Ayyappa in various aspects.

-

ayyappa songs lyrics in tamil pdf 97


Downloadhttps://imgfil.com/2uxYVY



-

In this article, we will provide you with a complete guide on how to get ayyappa songs lyrics in tamil pdf 97, what are the benefits of singing ayyappa songs, and how to use them for your spiritual growth.

-

How to get ayyappa songs lyrics in tamil pdf 97?

-

There are many websites that offer ayyappa songs lyrics in tamil pdf 97 for free download. Some of them are:

- -

You can also search for other websites that offer ayyappa songs lyrics in tamil pdf 97 by using your favorite search engine.

-

-

What are the benefits of singing ayyappa songs?

-

Singing ayyappa songs is not only a way of expressing your devotion to Lord Ayyappa, but also a way of enhancing your spiritual well-being. Some of the benefits of singing ayyappa songs are:

- -

How to use ayyappa songs lyrics in tamil pdf 97 for your spiritual growth?

-

Ayyappa songs lyrics in tamil pdf 97 are not just words that you sing or read, but they are powerful mantras that can transform your life. Here are some tips on how to use them for your spiritual growth:

- -

Conclusion

-

Ayyappa songs lyrics in tamil pdf 97 are a valuable resource for all devotees of Lord Ayyappa who want to deepen their connection with him and enhance their spiritual well-being. By downloading and printing them for free from various websites, you can have access to a rich collection of devotional songs that praise and worship Lord Ayyappa in various aspects. By singing or reading them with devotion and understanding, you can experience the benefits of purifying your mind, invoking divine protection, cultivating virtues, and experiencing joy. By meditating on them and applying them to your life, you can transform yourself into a true disciple of Lord Ayyappa.

- -

We hope this article has helped you to know more about ayyappa songs lyrics in tamil pdf 97. If you have any questions or suggestions, please leave us a comment below.

-

How to print ayyappa songs lyrics in tamil pdf 97?

-

Once you have downloaded ayyappa songs lyrics in tamil pdf 97 from any of the websites mentioned above, you can print them easily using your computer or mobile device. Here are some steps to follow:

-
    -
  1. Open the pdf file of ayyappa songs lyrics in tamil pdf 97 using a pdf reader application such as Adobe Acrobat Reader or Google PDF Viewer.
  2. -
  3. Select the print option from the file menu or the toolbar. You can also use the keyboard shortcut Ctrl+P or Command+P.
  4. -
  5. Choose your printer settings such as paper size, orientation, margins, and number of copies. You can also select the pages you want to print or print all pages.
  6. -
  7. Click on the print button or the OK button to start printing.
  8. -
-

You can also save the pdf file of ayyappa songs lyrics in tamil pdf 97 to your device or cloud storage for future use.

-

How to sing ayyappa songs lyrics in tamil pdf 97?

-

Singing ayyappa songs lyrics in tamil pdf 97 is not difficult if you have some basic knowledge of Tamil language and music. You can also learn from listening to the audio recordings of ayyappa songs by various singers and musicians. Here are some tips to sing ayyappa songs lyrics in tamil pdf 97:

- -

Conclusion

-

Ayyappa songs lyrics in tamil pdf 97 are a great way to express your love and devotion to Lord Ayyappa and his divine mother Durga. By downloading and printing them for free from various websites, you can have access to a rich collection of devotional songs that praise and worship Lord Ayyappa in various aspects. By singing or reading them with devotion and understanding, you can experience the benefits of purifying your mind, invoking divine protection, cultivating virtues, and experiencing joy. By meditating on them and applying them to your life, you can transform yourself into a true disciple of Lord Ayyappa.

- -

We hope this article has helped you to know more about how to get, print, and sing ayyappa songs lyrics in tamil pdf 97. If you have any questions or suggestions, please leave us a comment below.

-

Why download ayyappa songs lyrics in tamil pdf 97?

-

Ayyappa songs lyrics in tamil pdf 97 are a collection of devotional songs dedicated to Lord Ayyappa, the son of Lord Shiva and Goddess Durga. Lord Ayyappa is also known as Hariharasudhan, Kaliyugavaradhan, Anandachithan, Ayyan, Ayyappan, and Swami. He is worshipped by millions of devotees across India and abroad, especially during the annual pilgrimage to Sabarimala temple in Kerala.

-

Downloading ayyappa songs lyrics in tamil pdf 97 can help you to:

- -

Where to download ayyappa songs lyrics in tamil pdf 97?

-

There are many websites that offer free download of ayyappa songs lyrics in tamil pdf 97. Some of them are:

-
    -
  1. InstaPDF: This website provides a pdf file of ayyappan songs book in Tamil with 55 songs and their meanings. The pdf file is 0.88 MB in size and has 42 pages. You can download it for free or read it online using the direct link given at the bottom of the page.
  2. -
  3. Tamilgod.org: This website provides a pdf file of ayyappan Tamil songs book with English translation. The pdf file has 25 songs with their lyrics, meanings, and audio links. The pdf file is 1.4 MB in size and has 26 pages. You can download it for free or read it online using the link given on the page.
  4. -
  5. Tamilgod.org: This website also provides a huge collection of ayyappan songs lyrics in Tamil with audio links. You can browse through various albums by different artists such as K. Veeramani, T.M.S., S.P.B., Unnikrishnan, Veeramanidasan, etc. You can also suggest or ask for any song at the comment section of each page.
  6. -
  7. Tamilgod.org: This website also provides links to download free ayyappan songs Tamil lyrics ebooks with collection of albums by various artists. You can choose from different formats such as pdf, epub, mobi, etc. You can also request for any ebook at the comment section of the page.
  8. -
-

Conclusion

-

Ayyappa songs lyrics in tamil pdf 97 are a valuable resource for all devotees of Lord Ayyappa who want to learn and sing his praises in Tamil language. By downloading them from various websites for free, you can have access to a wide range of devotional songs that glorify Lord Ayyappa in different aspects. By singing or reading them with devotion and understanding, you can experience the benefits of purifying your mind, invoking divine protection, cultivating virtues, and experiencing joy. By meditating on them and applying them to your life, you can transform yourself into a true disciple of Lord Ayyappa.

- -

We hope this article has helped you to know more about how to get, download, and sing ayyappa songs lyrics in tamil pdf 97. If you have any questions or suggestions, please leave us a comment below.

-

In conclusion, ayyappa songs lyrics in tamil pdf 97 are a great way to express your love and devotion to Lord Ayyappa and his divine mother Durga. By downloading and printing them for free from various websites, you can have access to a rich collection of devotional songs that praise and worship Lord Ayyappa in various aspects. By singing or reading them with devotion and understanding, you can experience the benefits of purifying your mind, invoking divine protection, cultivating virtues, and experiencing joy. By meditating on them and applying them to your life, you can transform yourself into a true disciple of Lord Ayyappa.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cyberpunk - V3.0 - Core Rules V3.0.pdf [PORTABLE].md b/spaces/1gistliPinn/ChatGPT4/Examples/Cyberpunk - V3.0 - Core Rules V3.0.pdf [PORTABLE].md deleted file mode 100644 index 800ef395b18cbd78a0f5651d7eeb6fc8e3b3c61b..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Cyberpunk - V3.0 - Core Rules V3.0.pdf [PORTABLE].md +++ /dev/null @@ -1,6 +0,0 @@ -

Cyberpunk - V3.0 - Core Rules V3.0.pdf


Download File 🌟 https://imgfil.com/2uxXiY



-
-Hacker: Old-school Steve Jackson game with tons of rules and bits. ... While I'm focusing on core books, I include a few notable sourcebooks ... Cyberpunk v3.0 focuses on transhumanism and culture groups. ... The Strike Manual appears to be the system guide, with character creation and basic resolution. 1fdad05405
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Download Film Yossi And Jagger.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/Download Film Yossi And Jagger.md deleted file mode 100644 index 0405fdee9c211f1eef24a480b20cc9d7a79d3691..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Download Film Yossi And Jagger.md +++ /dev/null @@ -1,92 +0,0 @@ -## Download Film Yossi And Jagger - - - - - - ![Download Film Yossi And Jagger](https://www.simbasible.com/wp-content/uploads/2020/10/2-1.jpg) - - - - - -**LINK ☆☆☆ [https://kneedacexbrew.blogspot.com/?d=2txjoh](https://kneedacexbrew.blogspot.com/?d=2txjoh)** - - - - - - - - - - - - - -# How to Download Film Yossi and Jagger Online - - - -Yossi and Jagger is a 2002 Israeli film directed by Eytan Fox and written by Avner Bernheimer. It tells the story of a secret romance between two soldiers stationed on the Lebanese border. The film stars Ohad Knoller as Yossi, the company commander who struggles with his sexuality, and Yehuda Levi as Jagger, his outgoing and charismatic lover who is about to finish his military service. - - - -The film received critical acclaim and won several awards, including nine Israeli Academy Awards and the Audience Award at the Tribeca Film Festival. It also sparked a sequel, Yossi, released in 2012, which follows Yossi's life ten years after Jagger's death. - - - -If you are interested in watching this film, you might be wondering how to download it online. Here are some tips and options for you: - - - -- Check if the film is available on streaming platforms such as Netflix, Amazon Prime Video, or Hulu. You can use services like JustWatch or Reelgood to find out where to watch it legally. - -- If the film is not available on streaming platforms, you can rent or buy it from online stores such as Google Play Movies, iTunes, or Vudu. You can also use JustWatch or Reelgood to compare prices and options. - -- If you prefer to download the film for free, you can use torrent sites such as The Pirate Bay or 1337x. However, be aware that this is illegal and may expose you to malware or legal risks. You should also use a VPN service to protect your privacy and security. - - - -Whatever option you choose, make sure you have a good internet connection and enough storage space on your device. You should also respect the filmmakers' rights and avoid sharing or distributing the film without permission. - - - -Yossi and Jagger is a powerful and moving film that explores love, war, and identity. If you are looking for a romantic drama with a twist, you should definitely give it a try. - - - -If you want to learn more about the film and its background, you can also check out some of the following resources: - - - -- The official website of the film, where you can find the trailer, the synopsis, the cast and crew, and some reviews. - -- The IMDb page of the film, where you can find more information, trivia, quotes, and user ratings. - -- The Wikipedia page of the film, where you can find a detailed plot summary, production history, reception, and cultural impact. - -- The Rotten Tomatoes page of the film, where you can find the critics' consensus, audience score, and fresh and rotten reviews. - - - -Yossi and Jagger is not only a film, but also a cultural phenomenon that has influenced many people's lives and views. It is a film that deserves to be seen and appreciated by a wide audience. - - - -One of the most remarkable aspects of Yossi and Jagger is its realistic and authentic portrayal of the Israeli army and society. The film does not shy away from showing the harsh realities of war, the bureaucracy and hierarchy of the military, and the homophobia and prejudice that the gay soldiers face. The film also depicts the diversity and complexity of the Israeli people, who come from different backgrounds, religions, and ideologies. - - - -The film also explores the themes of love, loss, and identity in a poignant and sensitive way. The relationship between Yossi and Jagger is not only romantic, but also emotional, spiritual, and existential. They are both searching for meaning and happiness in a world that does not accept them for who they are. They are both willing to sacrifice everything for each other, even their own lives. The film shows how love can transcend boundaries, labels, and conventions, and how it can also be fragile, painful, and tragic. - - - -Yossi and Jagger is a film that will touch your heart and soul. It is a film that will make you laugh, cry, and think. It is a film that will stay with you long after you watch it. It is a film that you should not miss. - - 1b8d091108 - - - - - diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DJ Smallz 732 - Cupid Pt. 1 The Latest Dance Hit.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DJ Smallz 732 - Cupid Pt. 1 The Latest Dance Hit.md deleted file mode 100644 index 7d5339e08b5619a57ba9c80fdff714de258d1087..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DJ Smallz 732 - Cupid Pt. 1 The Latest Dance Hit.md +++ /dev/null @@ -1,113 +0,0 @@ - -

How to Download DJ Smallz 732's Cupid, Pt. 1 for Free

-

If you are a fan of dance music, you might have heard of Cupid, Pt. 1, a catchy and upbeat single by DJ Smallz 732. This song was released in January 2023 and has been gaining popularity among listeners who enjoy the Jersey club style of music.

-

dj smallz cupid p1 mp3 download


Download Ziphttps://urlin.us/2uSWms



-

But what if you want to download this song for free and listen to it anytime you want? Is there a legal and easy way to do that? The answer is yes! In this article, we will show you how to find and download Cupid, Pt. 1 for free from some of the best free music download sites on the web.

-

The Best Free Music Download Sites

-

There are many websites that offer free music downloads, but not all of them are legal or safe. Some may contain viruses, malware, or spyware that can harm your device or compromise your privacy. Others may have low-quality or incomplete files that can ruin your listening experience.

-

That's why we have selected three of the best free music download sites that are not only legal but also reliable and user-friendly. These sites have a large collection of songs from various genres and artists, including DJ Smallz 732. They also allow you to download songs in MP3 format, which is compatible with most devices and players.

-

Here are the three sites we recommend:

-

SoundCloud

-

SoundCloud is one of the most popular platforms for streaming and sharing music online. It has millions of songs from both mainstream and independent artists, as well as podcasts, remixes, live sets, and more.

-

dj smallz 732 cupid pt 1 song
-cupid part 1 dj smallz 732 lyrics
-dj smallz 732 cupid pt 1 qobuz
-cupid pt 1 dj smallz 732 shazam
-dj smallz 732 cupid part 1 spotify
-cupid pt 1 by dj smallz 732 download
-dj smallz 732 cupid pt 1 single
-cupid part one dj smallz 732 mp3
-dj smallz 732 cupid pt 1 dance
-cupid pt 1 dj smallz 732 genre
-dj smallz 732 cupid part 1 album
-cupid pt one dj smallz 732 music
-dj smallz 732 cupid pt 1 stream
-cupid part i dj smallz 732 song
-dj smallz 732 cupid pt i lyrics
-cupid p1 dj smallz 732 qobuz
-dj smallz 732 cupid p1 shazam
-cupid p1 by dj smallz 732 spotify
-dj smallz 732 cupid p1 download
-cupid p1 dj smallz 732 single
-dj smallz 732 cupid p1 mp3
-cupid p1 by dj smallz 732 dance
-dj smallz 732 cupid p1 genre
-cupid p1 dj smallz 732 album
-dj smallz 732 cupid p1 music
-cupid p1 by dj smallz 732 stream
-dj smallz cupids arrow part one song
-cupids arrow part one by dj smallz lyrics
-dj smallz cupids arrow part one qobuz
-cupids arrow part one by dj smallz shazam
-dj smallz cupids arrow part one spotify
-cupids arrow part one by dj smallz download
-dj smallz cupids arrow part one single
-cupids arrow part one by dj smallz mp3
-dj smallz cupids arrow part one dance
-cupids arrow part one by dj smallz genre
-dj smallz cupids arrow part one album
-cupids arrow part one by dj smallz music
-dj smallz cupids arrow part one stream
-cupids arrow pt i by dj smallz song

-

Not all songs on SoundCloud are available for download, but some artists choose to offer their music for free or for a voluntary donation. To find out if Cupid, Pt. 1 is one of them, follow these steps:

-
    -
  1. Go to SoundCloud and type "Cupid, Pt. 1" in the search box.
  2. -
  3. Click on the song title to open its page.
  4. -
  5. Look at the bottom of the page beside the share options. If you see a link that says "Buy" or "Download", click on it.
  6. -
  7. If the link takes you to another website, follow the instructions there to complete your download.
  8. -
  9. If the link allows you to download the song directly from SoundCloud, enter your email address and postal code if prompted.
  10. -
  11. Click on "Download file" and save it to your device.
  12. -
-

Last.fm

-

Last.fm is a music discovery service that tracks what you listen to and recommends new music based on your taste. It also has a section where you can download free music from various artists and genres. To download Cupid, Pt. 1 from Last.fm, follow these steps:

-
    -
  1. Go to Last.fm and type "Cupid, Pt. 1" in the search box.
  2. -
  3. Click on the song title to open its page.
  4. -
  5. Look at the right side of the page under the album cover. If you see a link that says "Free MP3 Download", click on it.
  6. -
  7. A new tab will open with a download button. Click on it and save the file to your device.
  8. -
-

NoiseTrade

-

NoiseTrade is a platform where artists can share their music for free in exchange for fans' email addresses and postal codes. This way, they can build their fan base and communicate with them directly. NoiseTrade has thousands of songs from various genres and artists, including DJ Smallz 732.

-

To download Cupid, Pt. 1 from NoiseTrade, follow these steps:

-
    -
  1. Go to NoiseTrade and type "DJ Smallz 732" in the search box.
  2. -
  3. Click on the artist name to open his page.
  4. -
  5. Scroll down to find the album that contains Cupid, Pt. 1. It is called Cupid and it has four songs.
  6. -
  7. Click on the album cover to open its page.
  8. -
  9. Click on the orange button that says "Download Music".
  10. -
  11. Enter your email address and postal code if prompted.
  12. -
  13. Check your email for a download link and click on it.
  14. -
  15. Select the song you want to download and save it to your device.
  16. -
-

The Benefits of Downloading MP3 Music

-

Now that you know how to download Cupid, Pt. 1 for free, you might be wondering why you should do it in the first place. What are the benefits of downloading MP3 music over streaming it online?

-

Here are some of the reasons why downloading MP3 music is a good idea:

-

You can own your music and play it offline

-

When you download MP3 music, you have a copy of the file that you can store on your device or transfer to other devices. This means you can play your music anytime and anywhere, even without an internet connection or a subscription service. You don't have to worry about buffering, ads, or data charges. You can also create your own playlists and organize your music library according to your preferences.

-

You can support the artists and discover new music

-

When you download MP3 music from free music download sites, you are not only getting free music but also supporting the artists who created it. Many of these sites allow you to donate money or share the music with your friends and social media followers. This way, you can show your appreciation and help the artists reach more listeners and fans. You can also discover new music from similar or related artists that you might not have heard of before.

-

You can enjoy high-quality sound and compatibility

-

MP3 is one of the most common and widely used audio formats in the world. It has a high compression rate that reduces the file size without sacrificing much of the sound quality. This means you can enjoy clear and crisp sound while saving space on your device. MP3 is also compatible with most devices and players, so you don't have to worry about converting or playing issues.

-

Conclusion

-

Cupid, Pt. 1 by DJ Smallz 732 is a great song that will make you want to dance and have fun. If you want to download it for free and listen to it anytime you want, you can use one of the three free music download sites we mentioned: SoundCloud, Last.fm, or NoiseTrade. These sites are legal, safe, and easy to use, and they offer a lot of benefits for both you and the artists.

-

So what are you waiting for? Go ahead and download Cupid, Pt. 1 today and enjoy this amazing song!

-

FAQs

-

What is the genre of Cupid, Pt. 1?

-

Cupid, Pt. 1 is a song in the genre of Jersey club, which is a style of dance music that originated in New Jersey. It features fast-paced beats, chopped vocals, heavy bass, and samples from hip-hop, R&B, pop, and other genres.

-

How long is Cupid, Pt . 1?

-

Cupid, Pt. 1 is a short and sweet song that lasts for only 2 minutes and 10 seconds. It is the first part of a four-song album called Cupid by DJ Smallz 732.

-

Where can I stream Cupid, Pt. 1 online?

-

If you don't want to download Cupid, Pt. 1, you can also stream it online from various platforms. Some of the most popular ones are Spotify, Apple Music, YouTube, and Pandora. You can also find it on DJ Smallz 732's official website and social media accounts.

-

What are some other songs by DJ Smallz 732?

-

DJ Smallz 732 is a prolific and talented producer and DJ who has released many songs in the Jersey club genre. Some of his most popular songs are Love Tap, Eye of the Tiger, Work It, and WAP. He has also collaborated with other artists such as Fetty Wap, Lil Jon, Ciara, and more.

-

How can I contact DJ Smallz 732?

-

If you want to contact DJ Smallz 732 for booking, feedback, or any other reason, you can use one of the following methods:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Burger Please Mod APK Download Make Your Own Burgers and Earn Money.md b/spaces/1phancelerku/anime-remove-background/Burger Please Mod APK Download Make Your Own Burgers and Earn Money.md deleted file mode 100644 index 49200b801d641ba1e72d444fe10c3fc6b07b5766..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Burger Please Mod APK Download Make Your Own Burgers and Earn Money.md +++ /dev/null @@ -1,109 +0,0 @@ - -

Download Mod Apk Burger Please: How to Get Unlimited Fun and Resources in Your Burger Shop Game

-

Do you love playing burger shop games on your Android device? Do you want to have more fun and resources in your game without spending any money? If yes, then you might want to try downloading mod apk burger please. This is a modified version of the original game that gives you access to unlimited features and resources. In this article, we will tell you what mod apk burger is, how to download it, how to use it, and what are the benefits and risks of using it. Read on to find out more.

-

What is Mod Apk Burger?

-

Mod apk burger is a modified version of the original game called Burger Please!, which is an exciting and challenging game that lets you manage your own burger shop. You can hire staff, upgrade skills and facilities, set up chains of shops, and more. However, in the original game, you have limited resources such as money, gems, energy, and time. You also have to watch ads or make in-app purchases to get more resources.

-

download mod apk burger please


DOWNLOADhttps://jinyurl.com/2uNOUR



-

Mod apk burger is a version of the game that has been altered by a third-party developer to give you unlimited resources and features. You can get unlimited money, gems, energy, time, and more. You can also unlock all the skills, facilities, staff, and levels. You can also remove ads and bypass security checks. With mod apk burger, you can enjoy the game without any limitations or restrictions.

-

The Features of Mod Apk Burger

-

Some of the features that you can get from mod apk burger are:

- -

The Benefits of Mod Apk Burger

-

Some of the benefits that you can get from mod apk burger are:

- -

The Risks of Mod Apk Burger

-

Some of the risks that you might face from using mod apk burger are:

- -

How to Download Mod Apk Burger?

-

If you want to download mod apk burger, you need to follow these steps:

-

Step 1: Find a Reliable Source

-

The first step is to find a reliable source that provides mod apk files for burger shop games. You can search online for websites, blogs, forums, or social media platforms that offer mod apk files for download. However, you need to be careful and cautious when choosing a source. You need to check the reviews, ratings, comments, and feedbacks of other users who have downloaded the mod apk files from the source. You also need to scan the mod apk files for any malware or viruses before downloading them.

-

Step 2: Enable Unknown Sources

-

The second step is to enable unknown sources on your device settings. This will allow you to install mod apk files from sources other than the Google Play Store. To do this, you need to go to your device settings, then security, then unknown sources, then toggle it on. You might also need to confirm or allow this action on a pop-up window.

-

download burger please mod apk unlimited money
-burger please mod apk free download for android
-how to download burger please mod apk latest version
-burger please hack mod apk download no root
-download burger please mod apk offline
-burger please mod apk download link
-burger please mod apk android 1 download
-download burger please mod apk with cheats
-burger please mod apk 0.8.0 download
-burger please mod apk rexdl download
-download burger please mod apk unlimited coins and gems
-burger please mod apk online download
-where to download burger please mod apk safely
-burger please mod apk 2023 download
-burger please mod apk unlimited everything download
-download burger please mod apk for pc
-burger please mod apk obb download
-how to install burger please mod apk download
-burger please mod apk unlimited burgers download
-burger please mod apk revdl download
-download burger please mod apk full unlocked
-burger please premium mod apk download
-burger please pro mod apk download
-burger please vip mod apk download
-download burger please mod apk new update
-burger please mega mod apk download
-burger please cracked mod apk download
-burger please unlimited lives mod apk download
-burger please god mode mod apk download
-burger please ad free mod apk download
-download burger please mod apk from dafunda.com[^1^]
-burger please hack version mod apk download
-burger please unlimited boosters mod apk download
-burger please all levels unlocked mod apk download
-burger please no ads mod apk download
-download burger please original mod apk
-burger please happy mod apk download
-burger please super mod apk download
-burger please ultimate mod apk download
-download burger please best mod apk

-

Step 3: Install the Mod Apk File

-

The third step is to install the mod apk file on your device. To do this, you need to locate the downloaded mod apk file on your device storage, then tap on it to open it. You might also need to accept or agree to some permissions or terms on a pop-up window. Then, you need to wait for the installation process to complete.

-

Step 4: Enjoy the Game

-

The fourth and final step is to enjoy the game with mod apk burger. To do this, you need to open the game app on your device, then start playing it with unlimited fun and resources.

-

How to Use Mod Apk Burger?

-

If you want to use mod apk burger effectively and efficiently, you need to follow these tips:

-

Hire and Train Your Staff

-

One of the things that you can do with mod apk burger is to hire and train your staff. You can hire as many staff as you want in your shop without worrying about their salaries or benefits. You can also train them to improve their skills and abilities without spending any money or time. Having a well-trained and efficient staff will help you serve more customers, handle more orders, and deal with more problems.

-

Upgrade Your Skills and Facilities

-

Another thing that you can do with mod apk burger is to upgrade your skills and facilities. You can upgrade your skills such as cooking, serving, cleaning, and managing without spending any money or gems. You can also upgrade your facilities such as kitchen, counter, table, and decoration without spending any money or gems. Having upgraded skills and facilities will help you improve your performance, quality, and income.

-

Expand Your Business and Reputation

-

A third thing that you can do with mod apk burger is to expand your business and reputation. You can expand your business by opening more shops in different locations without spending any money or gems. You can also expand your reputation by attracting more customers, getting more reviews, and earning more stars without spending any money or gems. Having a large and reputable business will help you increase your market share, customer loyalty, and brand value.

-

Compete with Other Players

-

A fourth thing that you can do with mod apk burger is to compete with other players. You can compete with other players in different modes such as time trial, challenge, or multiplayer without spending any money or gems. You can also compete with other players in different rankings such as daily, weekly, monthly, or global without spending any money or gems. Competing with other players will help you test your skills, learn new strategies, and have more fun.

-

Conclusion and FAQs

-

In conclusion, mod apk burger is a modified version of the original game that gives you unlimited fun and resources. You can download it from a reliable source, install it on your device, and enjoy it with your own style and preference. However, you also need to be aware of the risks of using mod apk burger such as malware infection, game crash or error, game ban or suspension, and legal issues. Therefore, you need to use mod apk burger at your own risk and discretion.

-

Here are some FAQs that you might have about mod apk burger:

- - - - - - -
Q: Is mod apk burger safe to use?A: Mod apk burger is not 100% safe to use. It might contain malware or viruses that can harm your device or data. It might also cause your game to crash or malfunction. It might also violate the terms and conditions of the original game and result in your game account being banned or suspended. It might also infringe the intellectual property rights of the original game developer or publisher and result in legal actions or lawsuits against you.
Q: Is mod apk burger free to use?A: Mod apk burger is free to use. You don't have to pay any money or make any in-app purchases to get unlimited resources and features in the game. However, you might have to watch ads or complete surveys to download the mod apk file from some sources.
Q: Is mod apk burger compatible with my device?A: Mod apk burger might not be compatible with all devices or game versions. It might depend on the specifications of your device such as operating system, processor, memory, storage, etc. It might also depend on the version of the game that you have installed on your device such as updates, patches, etc.
Q: Is mod apk burger legal to use?A: Mod apk burger is not legal to use. It is a modified version of the original game that has been altered by a third-party developer without the permission or authorization of the original game developer or publisher. It is a violation of the intellectual property rights of the original game owner or authority. It is also a breach of the terms and conditions of the original game that you have agreed to when you downloaded or installed it on your device.
Q: Is mod apk burger worth using?A: Mod apk burger might be worth using if you want to have more fun and resources in your game without spending any money or time. However, you also need to consider the risks and consequences of using mod apk burger such as malware infection, game crash or error, game ban or suspension, and legal issues. Therefore, you need to weigh the pros and cons of using mod apk burger before deciding whether to use it or not.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Car Parking Driving How to Master the Open World Multiplayer Mode.md b/spaces/1phancelerku/anime-remove-background/Car Parking Driving How to Master the Open World Multiplayer Mode.md deleted file mode 100644 index 15baeb8df7ea27eeb76c6ffc165be67d3b07002b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Car Parking Driving How to Master the Open World Multiplayer Mode.md +++ /dev/null @@ -1,19 +0,0 @@ -
-

Perpendicular Parking

- Perpendicular parking is when you park your car at a 90-degree angle to the curb or the wall. This is the most common type of parking space in car parks and supermarkets. To park your car in a perpendicular space, follow these steps: - Approach the parking space slowly and keep your car as far to the opposite side as possible. This will give you more room to turn. - Stop your car when your bumper is aligned with the first line of the parking space. You can use your shoulder or your wing mirror as a reference point. - Turn on your indicator to signal your intention to park. - Turn your steering wheel hand over hand in the direction of the space. Aim for the middle or far side of the space so you have room to straighten out. - Check your mirrors and blind spots for any obstacles or pedestrians. If there are any, stop and wait for them to pass. - Straighten out your wheels when the sides of your car are parallel to the lines of the space. Pull forward until your car is centered in the space. - Put your car in park and check that it is completely inside the lines. Make sure you have enough room on each side to open your doors.

Angled Parking

- Angled parking is when you park your car at an angle to the curb or the wall. This type of parking space is less common than perpendicular parking, but it can be easier to enter and exit. Angled parking spaces are usually marked with arrows that indicate the direction of traffic flow. To park your car in an angled space, follow these steps: - Approach the parking space slowly and stay in the same lane as the arrows. This will help you align your car with the angle of the space. - Stop your car when the front corner of your car is aligned with the first line of the parking space. You can use your wing mirror or a point on your bonnet as a reference point. - Turn on your indicator to signal your intention to park. - Turn your steering wheel slightly in the direction of the space. Aim for the center of the space so you have room to adjust. - Check your mirrors and blind spots for any obstacles or pedestrians. If there are any, stop and wait for them to pass. - Adjust your position if needed by moving forward or backward until your car is centered in the space. - Put your car in park and check that it is completely inside the lines. Make sure you have enough room on each side to open your doors.

Parallel Parking

- Parallel parking is when you park your car parallel to the curb or the wall. This type of parking space is often found on busy streets and can be challenging for beginners. Parallel parking requires good judgment of distance and angle. To park your car in a parallel space, follow these steps: - Find a space that is big enough for your car. A good rule of thumb is to look for a space that is at least one and a half times the length of your car. - Pull up next to the car in front of the space, leaving about one meter of space between them. Align your rear wheels with their rear bumper. - Turn on your indicator to signal your intention to park. - Shift into reverse and turn your steering wheel all the way in the direction of the curb. Start moving backward slowly until you see the rear corner of the car behind you in your side mirror. - Straighten out your wheels and continue moving backward until you are parallel to the curb. You should be about 30 centimeters away from it. - Turn your steering wheel all the way in the opposite direction and move forward slightly until you are centered in the space. - Put your car in park and check that it is completely inside the lines. Make sure you have enough room on each side to open your doors.

How to Use Reference Points

- Reference points are visual cues that help you judge the position and size of your car in relation to the parking space and the surroundings. They can be parts of your car, such as mirrors, windows, bumpers, or wheels, or external objects, such as lines, poles, or other cars. Using reference points can help you park your car more accurately and avoid hitting anything. Here are some examples of how to use reference points for different types of parking: - For perpendicular parking, you can use your shoulder or your wing mirror as a reference point to align your bumper with the first line of the space. You can also use the rear window or the rearview mirror as a reference point to center your car in the space. - For angled parking, you can use your wing mirror or a point on your bonnet as a reference point to align your front corner with the first line of the space. You can also use the side window or the side mirror as a reference point to center your car in the space. - For parallel parking, you can use your rear wheels or your rear bumper as a reference point to align your car with the car in front of the space. You can also use your side mirror or your rear corner as a reference point to align your car with the car behind the space.

How to Use Mirrors and Signals

- Mirrors and signals are essential tools for car parking driving. They help you see what is behind and around you and communicate your intentions to other drivers and pedestrians. You should always check your mirrors and blind spots before and during any parking maneuver. You should also always use your indicator to signal which way you are turning or which space you are entering. Here are some tips on how to use mirrors and signals for different types of parking: - For perpendicular parking, you should check your rearview mirror and side mirrors before turning into the space. You should also check your blind spots for any obstacles or pedestrians. You should signal in the direction of the space as soon as you stop your car next to it. - For angled parking, you should check your rearview mirror and side mirrors before turning into the space. You should also check your blind spots for any obstacles or pedestrians. You should signal in the direction of the space as soon as you align your front corner with it. - For parallel parking, you should check your rearview mirror and side mirrors before reversing into the space. You should also check your blind spots for any obstacles or pedestrians. You should signal in the direction of the curb as soon as you pull up next to the car in front of the space.

How to Practice Car Parking Driving

- Practice makes perfect when it comes to car parking driving. The more you practice, the more confident and skilled you will become. There are many ways to practice car parking driving, such as: - Practicing in an empty car park or a quiet street with plenty of spaces. You can use cones, boxes, or other objects to mark the spaces and practice different types of parking. - Practicing with a friend, a family member, or an instructor who can give you feedback and advice. They can also act as a spotter and help you avoid any collisions. - Practicing with a car parking driving game or simulator that can simulate realistic scenarios and challenges. You can play online or on your phone and improve your skills in a fun and safe way.

Best Car Parking Driving Games and Simulators

- There are many car parking driving games and simulators available online or on your phone that can help you practice your skills. Some of them are: - Real Car Parking: This is an online game that lets you park various cars in different environments and levels. You can choose from different camera angles and controls and earn coins to unlock new cars. - Real Car Parking 2: This is an app that lets you park realistic 3D cars in various scenarios and modes. You can customize your car, adjust your settings, and enjoy realistic graphics and sounds. - Driving Academy - Car School Driver Simulator 2021: This is an app that lets you learn how to drive and park different cars in various situations and rules. You can earn badges, unlock new cars, and test your skills in challenges and tests.

Conclusion

- Car parking driving is a skill that every driver needs to master. It can be tricky at first, but with some tips and tricks, it can become easier and more enjoyable . In this article, we have shared some tips and tricks for car parking driving that will help you improve your confidence and accuracy. We have covered different types of parking spaces, such as perpendicular, angled and parallel parking, and how to use reference points, mirrors and signals to park your car smoothly. We have also shown you some of the best car parking driving games and simulators that you can play online or on your phone to practice your skills. We hope you have found this article helpful and informative. Happy parking!

FAQs

-

What is the best way to park a car?

- There is no definitive answer to this question, as different types of parking spaces require different techniques and skills. However, some general tips that can help you park your car better are: - Approach the parking space slowly and carefully. - Use reference points to align your car with the space and the surroundings. - Use mirrors and signals to check for any obstacles or pedestrians and communicate your intentions. - Adjust your position if needed by moving forward or backward until your car is centered in the space. - Put your car in park and check that it is completely inside the lines.

How do I know if a parking space is big enough for my car?

- A good rule of thumb is to look for a space that is at least one and a half times the length of your car. You can also use reference points to estimate the size of the space, such as the lines, the curb, or other cars. If you are not sure, you can always drive past the space and check how much room there is behind and in front of it.

How do I avoid hitting anything when parking?

- The best way to avoid hitting anything when parking is to check your mirrors and blind spots before and during any parking maneuver. You should also use signals to alert other drivers and pedestrians of your intentions. If you see any obstacles or pedestrians, stop and wait for them to pass. You can also ask someone to act as a spotter and guide you into the space.

How do I get out of a tight parking space?

- To get out of a tight parking space, you need to reverse slowly and carefully until you have enough room to turn. You should check your mirrors and blind spots for any obstacles or pedestrians and use signals to indicate which way you are going. You should also turn your steering wheel hand over hand in the direction you want to go. If you are in a perpendicular or angled space, you should aim for the opposite side of the lane. If you are in a parallel space, you should pull forward until your front bumper clears the rear bumper of the car in front of you.

How do I improve my car parking driving skills?

- The best way to improve your car parking driving skills is to practice as much as possible. You can practice in an empty car park or a quiet street with plenty of spaces. You can also practice with a friend, a family member, or an instructor who can give you feedback and advice. Another way to improve your skills is to play car parking driving games or simulators that can simulate realistic scenarios and challenges.

-

car parking driving


DOWNLOADhttps://jinyurl.com/2uNRnO



401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/4com/README/README.md b/spaces/4com/README/README.md deleted file mode 100644 index a62f60dd2d723afbb5812bd0c71e74b02e6f6f77..0000000000000000000000000000000000000000 --- a/spaces/4com/README/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: README -emoji: 📚 -colorFrom: indigo -colorTo: purple -sdk: static -pinned: false ---- -

4COM

- - -
companies
-
universities
-
classrooms
-
communities
-
non-profit organizations
\ No newline at end of file diff --git a/spaces/7hao/bingo/src/app/layout.tsx b/spaces/7hao/bingo/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
- {/* @ts-ignore */} -
-
{children}
-
- -
- - - ) -} diff --git a/spaces/801artistry/RVC801/demucs/model.py b/spaces/801artistry/RVC801/demucs/model.py deleted file mode 100644 index e9d932f4d014f7b95b394d2e24ed5edc379ded8d..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/demucs/model.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import julius -from torch import nn - -from .utils import capture_init, center_trim - - -class BLSTM(nn.Module): - def __init__(self, dim, layers=1): - super().__init__() - self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) - self.linear = nn.Linear(2 * dim, dim) - - def forward(self, x): - x = x.permute(2, 0, 1) - x = self.lstm(x)[0] - x = self.linear(x) - x = x.permute(1, 2, 0) - return x - - -def rescale_conv(conv, reference): - std = conv.weight.std().detach() - scale = (std / reference)**0.5 - conv.weight.data /= scale - if conv.bias is not None: - conv.bias.data /= scale - - -def rescale_module(module, reference): - for sub in module.modules(): - if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): - rescale_conv(sub, reference) - - -class Demucs(nn.Module): - @capture_init - def __init__(self, - sources, - audio_channels=2, - channels=64, - depth=6, - rewrite=True, - glu=True, - rescale=0.1, - resample=True, - kernel_size=8, - stride=4, - growth=2., - lstm_layers=2, - context=3, - normalize=False, - samplerate=44100, - segment_length=4 * 10 * 44100): - """ - Args: - sources (list[str]): list of source names - audio_channels (int): stereo or mono - channels (int): first convolution channels - depth (int): number of encoder/decoder layers - rewrite (bool): add 1x1 convolution to each encoder layer - and a convolution to each decoder layer. - For the decoder layer, `context` gives the kernel size. - glu (bool): use glu instead of ReLU - resample_input (bool): upsample x2 the input and downsample /2 the output. - rescale (int): rescale initial weights of convolutions - to get their standard deviation closer to `rescale` - kernel_size (int): kernel size for convolutions - stride (int): stride for convolutions - growth (float): multiply (resp divide) number of channels by that - for each layer of the encoder (resp decoder) - lstm_layers (int): number of lstm layers, 0 = no lstm - context (int): kernel size of the convolution in the - decoder before the transposed convolution. If > 1, - will provide some context from neighboring time - steps. - samplerate (int): stored as meta information for easing - future evaluations of the model. - segment_length (int): stored as meta information for easing - future evaluations of the model. Length of the segments on which - the model was trained. - """ - - super().__init__() - self.audio_channels = audio_channels - self.sources = sources - self.kernel_size = kernel_size - self.context = context - self.stride = stride - self.depth = depth - self.resample = resample - self.channels = channels - self.normalize = normalize - self.samplerate = samplerate - self.segment_length = segment_length - - self.encoder = nn.ModuleList() - self.decoder = nn.ModuleList() - - if glu: - activation = nn.GLU(dim=1) - ch_scale = 2 - else: - activation = nn.ReLU() - ch_scale = 1 - in_channels = audio_channels - for index in range(depth): - encode = [] - encode += [nn.Conv1d(in_channels, channels, kernel_size, stride), nn.ReLU()] - if rewrite: - encode += [nn.Conv1d(channels, ch_scale * channels, 1), activation] - self.encoder.append(nn.Sequential(*encode)) - - decode = [] - if index > 0: - out_channels = in_channels - else: - out_channels = len(self.sources) * audio_channels - if rewrite: - decode += [nn.Conv1d(channels, ch_scale * channels, context), activation] - decode += [nn.ConvTranspose1d(channels, out_channels, kernel_size, stride)] - if index > 0: - decode.append(nn.ReLU()) - self.decoder.insert(0, nn.Sequential(*decode)) - in_channels = channels - channels = int(growth * channels) - - channels = in_channels - - if lstm_layers: - self.lstm = BLSTM(channels, lstm_layers) - else: - self.lstm = None - - if rescale: - rescale_module(self, reference=rescale) - - def valid_length(self, length): - """ - Return the nearest valid length to use with the model so that - there is no time steps left over in a convolutions, e.g. for all - layers, size of the input - kernel_size % stride = 0. - - If the mixture has a valid length, the estimated sources - will have exactly the same length when context = 1. If context > 1, - the two signals can be center trimmed to match. - - For training, extracts should have a valid length.For evaluation - on full tracks we recommend passing `pad = True` to :method:`forward`. - """ - if self.resample: - length *= 2 - for _ in range(self.depth): - length = math.ceil((length - self.kernel_size) / self.stride) + 1 - length = max(1, length) - length += self.context - 1 - for _ in range(self.depth): - length = (length - 1) * self.stride + self.kernel_size - - if self.resample: - length = math.ceil(length / 2) - return int(length) - - def forward(self, mix): - x = mix - - if self.normalize: - mono = mix.mean(dim=1, keepdim=True) - mean = mono.mean(dim=-1, keepdim=True) - std = mono.std(dim=-1, keepdim=True) - else: - mean = 0 - std = 1 - - x = (x - mean) / (1e-5 + std) - - if self.resample: - x = julius.resample_frac(x, 1, 2) - - saved = [] - for encode in self.encoder: - x = encode(x) - saved.append(x) - if self.lstm: - x = self.lstm(x) - for decode in self.decoder: - skip = center_trim(saved.pop(-1), x) - x = x + skip - x = decode(x) - - if self.resample: - x = julius.resample_frac(x, 2, 1) - x = x * std + mean - x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1)) - return x diff --git a/spaces/801artistry/RVC801/lib/uvr5_pack/utils.py b/spaces/801artistry/RVC801/lib/uvr5_pack/utils.py deleted file mode 100644 index 0fafe8793b0d539fa58dd024342250b24b6187a9..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/uvr5_pack/utils.py +++ /dev/null @@ -1,120 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm -import json - - -def load_data(file_name: str = "./lib/uvr5_pack/name_params.json") -> dict: - with open(file_name, "r") as f: - data = json.load(f) - - return data - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def inference(X_spec, device, model, aggressiveness, data): - """ - data : dic configs - """ - - def _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True - ): - model.eval() - with torch.no_grad(): - preds = [] - - iterations = [n_window] - - total_iterations = sum(iterations) - for i in tqdm(range(n_window)): - start = i * roi_size - X_mag_window = X_mag_pad[ - None, :, :, start : start + data["window_size"] - ] - X_mag_window = torch.from_numpy(X_mag_window) - if is_half: - X_mag_window = X_mag_window.half() - X_mag_window = X_mag_window.to(device) - - pred = model.predict(X_mag_window, aggressiveness) - - pred = pred.detach().cpu().numpy() - preds.append(pred[0]) - - pred = np.concatenate(preds, axis=2) - return pred - - def preprocess(X_spec): - X_mag = np.abs(X_spec) - X_phase = np.angle(X_spec) - - return X_mag, X_phase - - X_mag, X_phase = preprocess(X_spec) - - coef = X_mag.max() - X_mag_pre = X_mag / coef - - n_frame = X_mag_pre.shape[2] - pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) - n_window = int(np.ceil(n_frame / roi_size)) - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - if list(model.state_dict().values())[0].dtype == torch.float16: - is_half = True - else: - is_half = False - pred = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred = pred[:, :, :n_frame] - - if data["tta"]: - pad_l += roi_size // 2 - pad_r += roi_size // 2 - n_window += 1 - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - pred_tta = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred_tta = pred_tta[:, :, roi_size // 2 :] - pred_tta = pred_tta[:, :, :n_frame] - - return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) - else: - return pred * coef, X_mag, np.exp(1.0j * X_phase) - - -def _get_name_params(model_path, model_hash): - data = load_data() - flag = False - ModelName = model_path - for type in list(data): - for model in list(data[type][0]): - for i in range(len(data[type][0][model])): - if str(data[type][0][model][i]["hash_name"]) == model_hash: - flag = True - elif str(data[type][0][model][i]["hash_name"]) in ModelName: - flag = True - - if flag: - model_params_auto = data[type][0][model][i]["model_params"] - param_name_auto = data[type][0][model][i]["param_name"] - if type == "equivalent": - return param_name_auto, model_params_auto - else: - flag = False - return param_name_auto, model_params_auto diff --git a/spaces/A00001/bingothoo/src/pages/api/kblob.ts b/spaces/A00001/bingothoo/src/pages/api/kblob.ts deleted file mode 100644 index 0ce7e6063cdc06838e76f1cff1d5982d34ef52de..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/pages/api/kblob.ts +++ /dev/null @@ -1,56 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import FormData from 'form-data' -import { fetch } from '@/lib/isomorphic' -import { KBlobRequest } from '@/lib/bots/bing/types' - -const API_DOMAIN = 'https://bing.vcanbb.top' - -export const config = { - api: { - bodyParser: { - sizeLimit: '10mb' // Set desired value here - } - } -} - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { knowledgeRequest, imageBase64 } = req.body as KBlobRequest - - const formData = new FormData() - formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - if (imageBase64) { - formData.append('imageBase64', imageBase64) - } - - const response = await fetch(`${API_DOMAIN}/images/kblob`, - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": `${API_DOMAIN}/web/index.html`, - "Referrer-Policy": "origin-when-cross-origin", - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - ...formData.getHeaders() - } - } - ).then(res => res.text()) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - res.end(response || JSON.stringify({ result: { value: 'UploadFailed', message: '请更换 IP 或代理后重试' } })) - } catch (e) { - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/AI-Dashboards/Topic-Modeling-Clusters-Free-Text/README.md b/spaces/AI-Dashboards/Topic-Modeling-Clusters-Free-Text/README.md deleted file mode 100644 index 730ba0002a25b8d64e896a4c6f0f23368c2f1400..0000000000000000000000000000000000000000 --- a/spaces/AI-Dashboards/Topic-Modeling-Clusters-Free-Text/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Topic Modeling Clusters Free Text -emoji: 🐨 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIConsultant/MusicGen/tests/__init__.py b/spaces/AIConsultant/MusicGen/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/AIConsultant/MusicGen/tests/modules/test_codebooks_patterns.py b/spaces/AIConsultant/MusicGen/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/pretrained.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/pretrained.py deleted file mode 100644 index e211d8b5b59320a599e62605f1dee6199f317253..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/pretrained.py +++ /dev/null @@ -1,167 +0,0 @@ -import hashlib -import os -import urllib -import warnings - -from tqdm import tqdm - -_RN50 = dict( - openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt", - cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt", -) - -_RN50_quickgelu = dict( - openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt", - cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt", -) - -_RN101 = dict( - openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt", -) - -_RN101_quickgelu = dict( - openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt", -) - -_RN50x4 = dict( - openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", -) - -_RN50x16 = dict( - openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", -) - -_RN50x64 = dict( - openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt", -) - -_VITB32 = dict( - openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt", - laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt", - laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt", -) - -_VITB32_quickgelu = dict( - openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt", - laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt", - laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt", -) - -_VITB16 = dict( - openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", -) - -_VITL14 = dict( - openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", -) - -_PRETRAINED = { - "RN50": _RN50, - "RN50-quickgelu": _RN50_quickgelu, - "RN101": _RN101, - "RN101-quickgelu": _RN101_quickgelu, - "RN50x4": _RN50x4, - "RN50x16": _RN50x16, - "ViT-B-32": _VITB32, - "ViT-B-32-quickgelu": _VITB32_quickgelu, - "ViT-B-16": _VITB16, - "ViT-L-14": _VITL14, -} - - -def list_pretrained(as_str: bool = False): - """returns list of pretrained models - Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True - """ - return [ - ":".join([k, t]) if as_str else (k, t) - for k in _PRETRAINED.keys() - for t in _PRETRAINED[k].keys() - ] - - -def list_pretrained_tag_models(tag: str): - """return all models having the specified pretrain tag""" - models = [] - for k in _PRETRAINED.keys(): - if tag in _PRETRAINED[k]: - models.append(k) - return models - - -def list_pretrained_model_tags(model: str): - """return all pretrain tags for the specified model architecture""" - tags = [] - if model in _PRETRAINED: - tags.extend(_PRETRAINED[model].keys()) - return tags - - -def get_pretrained_url(model: str, tag: str): - if model not in _PRETRAINED: - return "" - model_pretrained = _PRETRAINED[model] - if tag not in model_pretrained: - return "" - return model_pretrained[tag] - - -def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - if "openaipublic" in url: - expected_sha256 = url.split("/")[-2] - else: - expected_sha256 = "" - - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if expected_sha256: - if ( - hashlib.sha256(open(download_target, "rb").read()).hexdigest() - == expected_sha256 - ): - return download_target - else: - warnings.warn( - f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" - ) - else: - return download_target - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm( - total=int(source.info().get("Content-Length")), - ncols=80, - unit="iB", - unit_scale=True, - ) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if ( - expected_sha256 - and hashlib.sha256(open(download_target, "rb").read()).hexdigest() - != expected_sha256 - ): - raise RuntimeError( - f"Model has been downloaded but the SHA256 checksum does not not match" - ) - - return download_target diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/tokenizer.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/tokenizer.py deleted file mode 100644 index ee4d28450ec5dd12a79daf38cf3088e9e73c2cd5..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/tokenizer.py +++ /dev/null @@ -1,197 +0,0 @@ -""" CLIP tokenizer - -Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. -""" -import gzip -import html -import os -from functools import lru_cache -from typing import Union, List - -import ftfy -import regex as re -import torch - - -@lru_cache() -def default_bpe(): - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz" - ) - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -def basic_clean(text): - text = ftfy.fix_text(text) - text = html.unescape(html.unescape(text)) - return text.strip() - - -def whitespace_clean(text): - text = re.sub(r"\s+", " ", text) - text = text.strip() - return text - - -class SimpleTokenizer(object): - def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - merges = gzip.open(bpe_path).read().decode("utf-8").split("\n") - merges = merges[1 : 49152 - 256 - 2 + 1] - merges = [tuple(merge.split()) for merge in merges] - vocab = list(bytes_to_unicode().values()) - vocab = vocab + [v + "" for v in vocab] - for merge in merges: - vocab.append("".join(merge)) - if not special_tokens: - special_tokens = ["", ""] - else: - special_tokens = ["", ""] + special_tokens - vocab.extend(special_tokens) - self.encoder = dict(zip(vocab, range(len(vocab)))) - self.decoder = {v: k for k, v in self.encoder.items()} - self.bpe_ranks = dict(zip(merges, range(len(merges)))) - self.cache = {t: t for t in special_tokens} - special = "|".join(special_tokens) - self.pat = re.compile( - special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", - re.IGNORECASE, - ) - - self.vocab_size = len(self.encoder) - self.all_special_ids = [self.encoder[t] for t in special_tokens] - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token[:-1]) + (token[-1] + "",) - pairs = get_pairs(word) - - if not pairs: - return token + "" - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - text = whitespace_clean(basic_clean(text)).lower() - for token in re.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend( - self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") - ) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = ( - bytearray([self.byte_decoder[c] for c in text]) - .decode("utf-8", errors="replace") - .replace("", " ") - ) - return text - - -_tokenizer = SimpleTokenizer() - - -def tokenize( - texts: Union[str, List[str]], context_length: int = 77 -) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - context_length : int - The context length to use; all CLIP models use 77 as the context length - - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder[""] - eot_token = _tokenizer.encoder[""] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - tokens = tokens[:context_length] # Truncate - result[i, : len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/__init__.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py deleted file mode 100644 index bdab7eb6b94ac21e950e2870b89da7bbac1f4a8e..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py +++ /dev/null @@ -1,98 +0,0 @@ -import logging -import os -from pathlib import Path - -import albumentations -import numpy as np -import torch -from tqdm import tqdm - -logger = logging.getLogger(f'main.{__name__}') - - -class StandardNormalizeAudio(object): - ''' - Frequency-wise normalization - ''' - def __init__(self, specs_dir, train_ids_path='./data/vggsound_train.txt', cache_path='./data/'): - self.specs_dir = specs_dir - self.train_ids_path = train_ids_path - # making the stats filename to match the specs dir name - self.cache_path = os.path.join(cache_path, f'train_means_stds_{Path(specs_dir).stem}.txt') - logger.info('Assuming that the input stats are calculated using preprocessed spectrograms (log)') - self.train_stats = self.calculate_or_load_stats() - - def __call__(self, item): - # just to generalizat the input handling. Useful for FID, IS eval and training other staff - if isinstance(item, dict): - if 'input' in item: - input_key = 'input' - elif 'image' in item: - input_key = 'image' - else: - raise NotImplementedError - item[input_key] = (item[input_key] - self.train_stats['means']) / self.train_stats['stds'] - elif isinstance(item, torch.Tensor): - # broadcasts np.ndarray (80, 1) to (1, 80, 1) because item is torch.Tensor (B, 80, T) - item = (item - self.train_stats['means']) / self.train_stats['stds'] - else: - raise NotImplementedError - return item - - def calculate_or_load_stats(self): - try: - # (F, 2) - train_stats = np.loadtxt(self.cache_path) - means, stds = train_stats.T - logger.info('Trying to load train stats for Standard Normalization of inputs') - except OSError: - logger.info('Could not find the precalculated stats for Standard Normalization. Calculating...') - train_vid_ids = open(self.train_ids_path) - specs_paths = [os.path.join(self.specs_dir, f'{i.rstrip()}_mel.npy') for i in train_vid_ids] - means = [None] * len(specs_paths) - stds = [None] * len(specs_paths) - for i, path in enumerate(tqdm(specs_paths)): - spec = np.load(path) - means[i] = spec.mean(axis=1) - stds[i] = spec.std(axis=1) - # (F) <- (num_files, F) - means = np.array(means).mean(axis=0) - stds = np.array(stds).mean(axis=0) - # saving in two columns - np.savetxt(self.cache_path, np.vstack([means, stds]).T, fmt='%0.8f') - means = means.reshape(-1, 1) - stds = stds.reshape(-1, 1) - return {'means': means, 'stds': stds} - -class ToTensor(object): - - def __call__(self, item): - item['input'] = torch.from_numpy(item['input']).float() - # if 'target' in item: - item['target'] = torch.tensor(item['target']) - return item - -class Crop(object): - - def __init__(self, cropped_shape=None, random_crop=False): - self.cropped_shape = cropped_shape - if cropped_shape is not None: - mel_num, spec_len = cropped_shape - if random_crop: - self.cropper = albumentations.RandomCrop - else: - self.cropper = albumentations.CenterCrop - self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)]) - else: - self.preprocessor = lambda **kwargs: kwargs - - def __call__(self, item): - item['input'] = self.preprocessor(image=item['input'])['image'] - return item - - -if __name__ == '__main__': - cropper = Crop([80, 848]) - item = {'input': torch.rand([80, 860])} - outputs = cropper(item) - print(outputs['input'].shape) diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb16_cifar10.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb16_cifar10.py deleted file mode 100644 index 669e5de27e526dd46d9f06c99e478dce16f0ac9a..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb16_cifar10.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/resnet50_cifar.py', '../_base_/datasets/cifar10_bs16.py', - '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' -] diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.d.ts deleted file mode 100644 index 28a06b015f607770849c417a9fa37287905eb8bc..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.d.ts +++ /dev/null @@ -1,5 +0,0 @@ -import Custom from './Custom'; - -export default function Factory( - config?: Custom.IConfig -): Custom; \ No newline at end of file diff --git a/spaces/Agusbs98/automatic-ecg-diagnosis/nets/bblocks.py b/spaces/Agusbs98/automatic-ecg-diagnosis/nets/bblocks.py deleted file mode 100644 index c9ca173116718a4fd977f92d419f4b45407873fa..0000000000000000000000000000000000000000 --- a/spaces/Agusbs98/automatic-ecg-diagnosis/nets/bblocks.py +++ /dev/null @@ -1,55 +0,0 @@ - -import os, sys -from libs import * -from .layers import * -from .modules import * - -class LightSEResBlock(nn.Module): - def __init__(self, - in_channels, - downsample = False, - ): - super(LightSEResBlock, self).__init__() - if downsample: - self.out_channels = in_channels*2 - self.conv_1 = DSConv1d( - in_channels, self.out_channels, - kernel_size = 7, padding = 3, stride = 2, - ) - self.identity = nn.Sequential( - DSConv1d( - in_channels, self.out_channels, - kernel_size = 1, padding = 0, stride = 2, - ), - nn.BatchNorm1d(self.out_channels), - ) - else: - self.out_channels = in_channels - self.conv_1 = DSConv1d( - in_channels, self.out_channels, - kernel_size = 7, padding = 3, stride = 1, - ) - self.identity = nn.Identity() - self.conv_2 = DSConv1d( - self.out_channels, self.out_channels, - kernel_size = 7, padding = 3, stride = 1, - ) - - self.convs = nn.Sequential( - self.conv_1, - nn.BatchNorm1d(self.out_channels), - nn.ReLU(), - nn.Dropout(0.3), - self.conv_2, - nn.BatchNorm1d(self.out_channels), - LightSEModule(self.out_channels), - ) - self.act_fn = nn.ReLU() - - def forward(self, - input, - ): - output = self.convs(input) + self.identity(input) - output = self.act_fn(output) - - return output \ No newline at end of file diff --git a/spaces/Aki004/herta-so-vits/onnxexport/model_onnx_speaker_mix.py b/spaces/Aki004/herta-so-vits/onnxexport/model_onnx_speaker_mix.py deleted file mode 100644 index c4b443162b0c82418286fd3834b4b5b010a454a8..0000000000000000000000000000000000000000 --- a/spaces/Aki004/herta-so-vits/onnxexport/model_onnx_speaker_mix.py +++ /dev/null @@ -1,363 +0,0 @@ -import torch -from torch import nn -from torch.nn import functional as F -import cluster -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import utils -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - kernel_size, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.gin_channels = gin_channels - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_mask, f0=None, z=None): - x = x + self.f0_emb(f0).transpose(1, 2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + z * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class F0Decoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=0): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.spk_channels = spk_channels - - self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1) - self.decoder = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1) - self.cond = nn.Conv1d(spk_channels, hidden_channels, 1) - - def forward(self, x, norm_f0, x_mask, spk_emb=None): - x = torch.detach(x) - if spk_emb is not None: - x = x + self.cond(spk_emb) - x += self.f0_prenet(norm_f0) - x = self.prenet(x) * x_mask - x = self.decoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - sampling_rate=44100, - **kwargs): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2) - - self.enc_p = TextEncoder( - inter_channels, - hidden_channels, - filter_channels=filter_channels, - n_heads=n_heads, - n_layers=n_layers, - kernel_size=kernel_size, - p_dropout=p_dropout - ) - hps = { - "sampling_rate": sampling_rate, - "inter_channels": inter_channels, - "resblock": resblock, - "resblock_kernel_sizes": resblock_kernel_sizes, - "resblock_dilation_sizes": resblock_dilation_sizes, - "upsample_rates": upsample_rates, - "upsample_initial_channel": upsample_initial_channel, - "upsample_kernel_sizes": upsample_kernel_sizes, - "gin_channels": gin_channels, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) - self.emb_uv = nn.Embedding(2, hidden_channels) - self.predict_f0 = False - cluster_model_path="kmeans_10000.pt" - if os.path.exists(cluster_model_path): - self.cluster_model = cluster.get_cluster_model(cluster_model_path) - else: - self.cluster_model = None - self.speaker_map = [] - self.export_mix = False - - def export_chara_mix(self, n_speakers_mix): - spkmap = [] - for i in range(n_speakers_mix): - spkmap.append(self.emb_g(torch.LongTensor([[i]])).transpose(1, 2).detach().numpy()) - self.speaker_map = torch.tensor(spkmap) - self.export_mix = True - - def forward(self, c, f0, mel2ph, uv, noise=None, g=None, cluster_infer_ratio=0.1): - - decoder_inp = F.pad(c, [0, 0, 1, 0]) - mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]]) - c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H] - - if self.cluster_model is not None: - predict = self.cluster_model[speaker].predict(c.transpose(0, 1)) - model[speaker].cluster_centers_[predict] - cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T - cluster_c = torch.FloatTensor(cluster_c).to(self.dev) - c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c - - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - - if self.export_mix: - spk_mix = spk_mix.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) - g = torch.sum(spk_mix * self.speaker_map, dim=0).transpose(1, 2) - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2) - - if self.predict_f0: - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) - - z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0) - return o diff --git a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py b/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py deleted file mode 100644 index 55bd4c5d1889a1a998b52eb56793bbc1eef1b691..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200 -from .mobilefacenet import get_mbf - - -def get_model(name, **kwargs): - # resnet - if name == "r18": - return iresnet18(False, **kwargs) - elif name == "r34": - return iresnet34(False, **kwargs) - elif name == "r50": - return iresnet50(False, **kwargs) - elif name == "r100": - return iresnet100(False, **kwargs) - elif name == "r200": - return iresnet200(False, **kwargs) - elif name == "r2060": - from .iresnet2060 import iresnet2060 - return iresnet2060(False, **kwargs) - elif name == "mbf": - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf(fp16=fp16, num_features=num_features) - else: - raise ValueError() \ No newline at end of file diff --git a/spaces/Alpaca233/SadTalker/src/facerender/modules/dense_motion.py b/spaces/Alpaca233/SadTalker/src/facerender/modules/dense_motion.py deleted file mode 100644 index a286ead2e84ed1961335d34a3b50ab38f25e4495..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/facerender/modules/dense_motion.py +++ /dev/null @@ -1,121 +0,0 @@ -from torch import nn -import torch.nn.functional as F -import torch -from src.facerender.modules.util import Hourglass, make_coordinate_grid, kp2gaussian - -from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d - - -class DenseMotionNetwork(nn.Module): - """ - Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving - """ - - def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, - estimate_occlusion_map=False): - super(DenseMotionNetwork, self).__init__() - # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks) - self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks) - - self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3) - - self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1) - self.norm = BatchNorm3d(compress, affine=True) - - if estimate_occlusion_map: - # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3) - self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3) - else: - self.occlusion = None - - self.num_kp = num_kp - - - def create_sparse_motions(self, feature, kp_driving, kp_source): - bs, _, d, h, w = feature.shape - identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type()) - identity_grid = identity_grid.view(1, 1, d, h, w, 3) - coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3) - - # if 'jacobian' in kp_driving: - if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None: - jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian'])) - jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3) - jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1) - coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1)) - coordinate_grid = coordinate_grid.squeeze(-1) - - - driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3) - - #adding background feature - identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1) - sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) #bs num_kp+1 d h w 3 - - # sparse_motions = driving_to_source - - return sparse_motions - - def create_deformed_feature(self, feature, sparse_motions): - bs, _, d, h, w = feature.shape - feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w) - feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w) - sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3) !!!! - sparse_deformed = F.grid_sample(feature_repeat, sparse_motions) - sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w) - return sparse_deformed - - def create_heatmap_representations(self, feature, kp_driving, kp_source): - spatial_size = feature.shape[3:] - gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01) - gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01) - heatmap = gaussian_driving - gaussian_source - - # adding background feature - zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type()) - heatmap = torch.cat([zeros, heatmap], dim=1) - heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w) - return heatmap - - def forward(self, feature, kp_driving, kp_source): - bs, _, d, h, w = feature.shape - - feature = self.compress(feature) - feature = self.norm(feature) - feature = F.relu(feature) - - out_dict = dict() - sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source) - deformed_feature = self.create_deformed_feature(feature, sparse_motion) - - heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source) - - input_ = torch.cat([heatmap, deformed_feature], dim=2) - input_ = input_.view(bs, -1, d, h, w) - - # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w) - - prediction = self.hourglass(input_) - - - mask = self.mask(prediction) - mask = F.softmax(mask, dim=1) - out_dict['mask'] = mask - mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w) - - zeros_mask = torch.zeros_like(mask) - mask = torch.where(mask < 1e-3, zeros_mask, mask) - - sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w) - deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w) - deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3) - - out_dict['deformation'] = deformation - - if self.occlusion: - bs, c, d, h, w = prediction.shape - prediction = prediction.view(bs, -1, h, w) - occlusion_map = torch.sigmoid(self.occlusion(prediction)) - out_dict['occlusion_map'] = occlusion_map - - return out_dict diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/repaint.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/repaint.md deleted file mode 100644 index b7e2bcf119c12ce63fde95a2c5c689bb97da8db5..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/repaint.md +++ /dev/null @@ -1,23 +0,0 @@ - - -# RePaint scheduler - -## Overview - -DDPM-based inpainting scheduler for unsupervised inpainting with extreme masks. -Intended for use with [`RePaintPipeline`]. -Based on the paper [RePaint: Inpainting using Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2201.09865) -and the original implementation by Andreas Lugmayr et al.: https://github.com/andreas128/RePaint - -## RePaintScheduler -[[autodoc]] RePaintScheduler \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/reusing_seeds.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/reusing_seeds.md deleted file mode 100644 index 1ff84f02596ecc9cdfee2b0865d8d6a6ef34ce2e..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/reusing_seeds.md +++ /dev/null @@ -1,65 +0,0 @@ - - -# Improve image quality with deterministic generation - -[[open-in-colab]] - -A common way to improve the quality of generated images is with *deterministic batch generation*, generate a batch of images and select one image to improve with a more detailed prompt in a second round of inference. The key is to pass a list of [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator)'s to the pipeline for batched image generation, and tie each `Generator` to a seed so you can reuse it for an image. - -Let's use [`runwayml/stable-diffusion-v1-5`](runwayml/stable-diffusion-v1-5) for example, and generate several versions of the following prompt: - -```py -prompt = "Labrador in the style of Vermeer" -``` - -Instantiate a pipeline with [`DiffusionPipeline.from_pretrained`] and place it on a GPU (if available): - -```python ->>> from diffusers import DiffusionPipeline - ->>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) ->>> pipe = pipe.to("cuda") -``` - -Now, define four different `Generator`'s and assign each `Generator` a seed (`0` to `3`) so you can reuse a `Generator` later for a specific image: - -```python ->>> import torch - ->>> generator = [torch.Generator(device="cuda").manual_seed(i) for i in range(4)] -``` - -Generate the images and have a look: - -```python ->>> images = pipe(prompt, generator=generator, num_images_per_prompt=4).images ->>> images -``` - -![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds.jpg) - -In this example, you'll improve upon the first image - but in reality, you can use any image you want (even the image with double sets of eyes!). The first image used the `Generator` with seed `0`, so you'll reuse that `Generator` for the second round of inference. To improve the quality of the image, add some additional text to the prompt: - -```python -prompt = [prompt + t for t in [", highly realistic", ", artsy", ", trending", ", colorful"]] -generator = [torch.Generator(device="cuda").manual_seed(0) for i in range(4)] -``` - -Create four generators with seed `0`, and generate another batch of images, all of which should look like the first image from the previous round! - -```python ->>> images = pipe(prompt, generator=generator).images ->>> images -``` - -![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds_2.jpg) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index 8fc39beaac540a8d3e00bf968f1af08450f9d4cc..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py deleted file mode 100644 index b17c7a12b547ee4e1cd60d667c575eab06eb071c..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_512x512_40k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/AnimalEquality/chatbot/setup.py b/spaces/AnimalEquality/chatbot/setup.py deleted file mode 100644 index e3281ae9bd7b98568e77014dba1b7b353d409205..0000000000000000000000000000000000000000 --- a/spaces/AnimalEquality/chatbot/setup.py +++ /dev/null @@ -1,57 +0,0 @@ -from pkg_resources import parse_version -from configparser import ConfigParser -import setuptools, shlex -assert parse_version(setuptools.__version__)>=parse_version('36.2') - -# note: all settings are in settings.ini; edit there, not here -config = ConfigParser(delimiters=['=']) -config.read('settings.ini', encoding='utf-8') -cfg = config['DEFAULT'] - -cfg_keys = 'version description keywords author author_email'.split() -expected = cfg_keys + "lib_name user branch license status min_python audience language".split() -for o in expected: assert o in cfg, "missing expected setting: {}".format(o) -setup_cfg = {o:cfg[o] for o in cfg_keys} - -licenses = { - 'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'), - 'mit': ('MIT License', 'OSI Approved :: MIT License'), - 'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'), - 'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'), - 'bsd3': ('BSD License', 'OSI Approved :: BSD License'), -} -statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha', - '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ] -py_versions = '3.6 3.7 3.8 3.9 3.10'.split() - -requirements = shlex.split(cfg.get('requirements', '')) -if cfg.get('pip_requirements'): requirements += shlex.split(cfg.get('pip_requirements', '')) -min_python = cfg['min_python'] -lic = licenses.get(cfg['license'].lower(), (cfg['license'], None)) -dev_requirements = (cfg.get('dev_requirements') or '').split() - -setuptools.setup( - name = cfg['lib_name'], - license = lic[0], - classifiers = [ - 'Development Status :: ' + statuses[int(cfg['status'])], - 'Intended Audience :: ' + cfg['audience'].title(), - 'Natural Language :: ' + cfg['language'].title(), - ] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []), - url = cfg['git_url'], - packages = setuptools.find_packages(), - include_package_data = True, - install_requires = requirements, - extras_require={ 'dev': dev_requirements }, - dependency_links = cfg.get('dep_links','').split(), - python_requires = '>=' + cfg['min_python'], - long_description = open('README.md', encoding='utf-8').read(), - long_description_content_type = 'text/markdown', - zip_safe = False, - entry_points = { - 'console_scripts': cfg.get('console_scripts','').split(), - 'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d'] - }, - **setup_cfg) - - diff --git a/spaces/AnnasBlackHat/Image-Downloader/README.md b/spaces/AnnasBlackHat/Image-Downloader/README.md deleted file mode 100644 index 107f5f376c58d03a1d5059613dba4542c2a435b0..0000000000000000000000000000000000000000 --- a/spaces/AnnasBlackHat/Image-Downloader/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image Downloader -emoji: 🐠 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/fields.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/fields.py deleted file mode 100644 index 9d630f491d9a39644ae65564dac88eb51f0bbe78..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/fields.py +++ /dev/null @@ -1,274 +0,0 @@ -from __future__ import absolute_import - -import email.utils -import mimetypes -import re - -from .packages import six - - -def guess_content_type(filename, default="application/octet-stream"): - """ - Guess the "Content-Type" of a file. - - :param filename: - The filename to guess the "Content-Type" of using :mod:`mimetypes`. - :param default: - If no "Content-Type" can be guessed, default to `default`. - """ - if filename: - return mimetypes.guess_type(filename)[0] or default - return default - - -def format_header_param_rfc2231(name, value): - """ - Helper function to format and quote a single header parameter using the - strategy defined in RFC 2231. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows - `RFC 2388 Section 4.4 `_. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as ``bytes`` or `str``. - :ret: - An RFC-2231-formatted unicode string. - """ - if isinstance(value, six.binary_type): - value = value.decode("utf-8") - - if not any(ch in value for ch in '"\\\r\n'): - result = u'%s="%s"' % (name, value) - try: - result.encode("ascii") - except (UnicodeEncodeError, UnicodeDecodeError): - pass - else: - return result - - if six.PY2: # Python 2: - value = value.encode("utf-8") - - # encode_rfc2231 accepts an encoded string and returns an ascii-encoded - # string in Python 2 but accepts and returns unicode strings in Python 3 - value = email.utils.encode_rfc2231(value, "utf-8") - value = "%s*=%s" % (name, value) - - if six.PY2: # Python 2: - value = value.decode("utf-8") - - return value - - -_HTML5_REPLACEMENTS = { - u"\u0022": u"%22", - # Replace "\" with "\\". - u"\u005C": u"\u005C\u005C", -} - -# All control characters from 0x00 to 0x1F *except* 0x1B. -_HTML5_REPLACEMENTS.update( - { - six.unichr(cc): u"%{:02X}".format(cc) - for cc in range(0x00, 0x1F + 1) - if cc not in (0x1B,) - } -) - - -def _replace_multiple(value, needles_and_replacements): - def replacer(match): - return needles_and_replacements[match.group(0)] - - pattern = re.compile( - r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()]) - ) - - result = pattern.sub(replacer, value) - - return result - - -def format_header_param_html5(name, value): - """ - Helper function to format and quote a single header parameter using the - HTML5 strategy. - - Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows the `HTML5 Working Draft - Section 4.10.22.7`_ and matches the behavior of curl and modern browsers. - - .. _HTML5 Working Draft Section 4.10.22.7: - https://w3c.github.io/html/sec-forms.html#multipart-form-data - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as ``bytes`` or `str``. - :ret: - A unicode string, stripped of troublesome characters. - """ - if isinstance(value, six.binary_type): - value = value.decode("utf-8") - - value = _replace_multiple(value, _HTML5_REPLACEMENTS) - - return u'%s="%s"' % (name, value) - - -# For backwards-compatibility. -format_header_param = format_header_param_html5 - - -class RequestField(object): - """ - A data container for request body parameters. - - :param name: - The name of this request field. Must be unicode. - :param data: - The data/value body. - :param filename: - An optional filename of the request field. Must be unicode. - :param headers: - An optional dict-like object of headers to initially use for the field. - :param header_formatter: - An optional callable that is used to encode and format the headers. By - default, this is :func:`format_header_param_html5`. - """ - - def __init__( - self, - name, - data, - filename=None, - headers=None, - header_formatter=format_header_param_html5, - ): - self._name = name - self._filename = filename - self.data = data - self.headers = {} - if headers: - self.headers = dict(headers) - self.header_formatter = header_formatter - - @classmethod - def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5): - """ - A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. - - Supports constructing :class:`~urllib3.fields.RequestField` from - parameter of key/value strings AND key/filetuple. A filetuple is a - (filename, data, MIME type) tuple where the MIME type is optional. - For example:: - - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - - Field names and filenames must be unicode. - """ - if isinstance(value, tuple): - if len(value) == 3: - filename, data, content_type = value - else: - filename, data = value - content_type = guess_content_type(filename) - else: - filename = None - content_type = None - data = value - - request_param = cls( - fieldname, data, filename=filename, header_formatter=header_formatter - ) - request_param.make_multipart(content_type=content_type) - - return request_param - - def _render_part(self, name, value): - """ - Overridable helper function to format a single header parameter. By - default, this calls ``self.header_formatter``. - - :param name: - The name of the parameter, a string expected to be ASCII only. - :param value: - The value of the parameter, provided as a unicode string. - """ - - return self.header_formatter(name, value) - - def _render_parts(self, header_parts): - """ - Helper function to format and quote a single header. - - Useful for single headers that are composed of multiple items. E.g., - 'Content-Disposition' fields. - - :param header_parts: - A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format - as `k1="v1"; k2="v2"; ...`. - """ - parts = [] - iterable = header_parts - if isinstance(header_parts, dict): - iterable = header_parts.items() - - for name, value in iterable: - if value is not None: - parts.append(self._render_part(name, value)) - - return u"; ".join(parts) - - def render_headers(self): - """ - Renders the headers for this request field. - """ - lines = [] - - sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] - for sort_key in sort_keys: - if self.headers.get(sort_key, False): - lines.append(u"%s: %s" % (sort_key, self.headers[sort_key])) - - for header_name, header_value in self.headers.items(): - if header_name not in sort_keys: - if header_value: - lines.append(u"%s: %s" % (header_name, header_value)) - - lines.append(u"\r\n") - return u"\r\n".join(lines) - - def make_multipart( - self, content_disposition=None, content_type=None, content_location=None - ): - """ - Makes this request field into a multipart request field. - - This method overrides "Content-Disposition", "Content-Type" and - "Content-Location" headers to the request parameter. - - :param content_type: - The 'Content-Type' of the request body. - :param content_location: - The 'Content-Location' of the request body. - - """ - self.headers["Content-Disposition"] = content_disposition or u"form-data" - self.headers["Content-Disposition"] += u"; ".join( - [ - u"", - self._render_parts( - ((u"name", self._name), (u"filename", self._filename)) - ), - ] - ) - self.headers["Content-Type"] = content_type - self.headers["Content-Location"] = content_location diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/__init__.py deleted file mode 100644 index d59226af9d7fe1b5279e99ff6e333032d1cec274..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/__init__.py +++ /dev/null @@ -1,3296 +0,0 @@ -""" -Package resource API --------------------- - -A resource is a logical file contained within a package, or a logical -subdirectory thereof. The package resource API expects resource names -to have their path parts separated with ``/``, *not* whatever the local -path separator is. Do not use os.path operations to manipulate resource -names being passed into the API. - -The package resource API is designed to work with normal filesystem packages, -.egg files, and unpacked .egg files. It can also work in a limited way with -.zip files and with custom PEP 302 loaders that support the ``get_data()`` -method. -""" - -import sys -import os -import io -import time -import re -import types -import zipfile -import zipimport -import warnings -import stat -import functools -import pkgutil -import operator -import platform -import collections -import plistlib -import email.parser -import errno -import tempfile -import textwrap -import itertools -import inspect -import ntpath -import posixpath -import importlib -from pkgutil import get_importer - -try: - import _imp -except ImportError: - # Python 3.2 compatibility - import imp as _imp - -try: - FileExistsError -except NameError: - FileExistsError = OSError - -# capture these to bypass sandboxing -from os import utime -try: - from os import mkdir, rename, unlink - WRITE_SUPPORT = True -except ImportError: - # no write support, probably under GAE - WRITE_SUPPORT = False - -from os import open as os_open -from os.path import isdir, split - -try: - import importlib.machinery as importlib_machinery - # access attribute to force import under delayed import mechanisms. - importlib_machinery.__name__ -except ImportError: - importlib_machinery = None - -from pkg_resources.extern.jaraco.text import ( - yield_lines, - drop_comment, - join_continuation, -) - -from pkg_resources.extern import appdirs -from pkg_resources.extern import packaging -__import__('pkg_resources.extern.packaging.version') -__import__('pkg_resources.extern.packaging.specifiers') -__import__('pkg_resources.extern.packaging.requirements') -__import__('pkg_resources.extern.packaging.markers') -__import__('pkg_resources.extern.packaging.utils') - -if sys.version_info < (3, 5): - raise RuntimeError("Python 3.5 or later is required") - -# declare some globals that will be defined later to -# satisfy the linters. -require = None -working_set = None -add_activation_listener = None -resources_stream = None -cleanup_resources = None -resource_dir = None -resource_stream = None -set_extraction_path = None -resource_isdir = None -resource_string = None -iter_entry_points = None -resource_listdir = None -resource_filename = None -resource_exists = None -_distribution_finders = None -_namespace_handlers = None -_namespace_packages = None - - -class PEP440Warning(RuntimeWarning): - """ - Used when there is an issue with a version or specifier not complying with - PEP 440. - """ - - -def parse_version(v): - try: - return packaging.version.Version(v) - except packaging.version.InvalidVersion: - warnings.warn( - f"{v} is an invalid version and will not be supported in " - "a future release", - PkgResourcesDeprecationWarning, - ) - return packaging.version.LegacyVersion(v) - - -_state_vars = {} - - -def _declare_state(vartype, **kw): - globals().update(kw) - _state_vars.update(dict.fromkeys(kw, vartype)) - - -def __getstate__(): - state = {} - g = globals() - for k, v in _state_vars.items(): - state[k] = g['_sget_' + v](g[k]) - return state - - -def __setstate__(state): - g = globals() - for k, v in state.items(): - g['_sset_' + _state_vars[k]](k, g[k], v) - return state - - -def _sget_dict(val): - return val.copy() - - -def _sset_dict(key, ob, state): - ob.clear() - ob.update(state) - - -def _sget_object(val): - return val.__getstate__() - - -def _sset_object(key, ob, state): - ob.__setstate__(state) - - -_sget_none = _sset_none = lambda *args: None - - -def get_supported_platform(): - """Return this platform's maximum compatible version. - - distutils.util.get_platform() normally reports the minimum version - of macOS that would be required to *use* extensions produced by - distutils. But what we want when checking compatibility is to know the - version of macOS that we are *running*. To allow usage of packages that - explicitly require a newer version of macOS, we must also know the - current version of the OS. - - If this condition occurs for any other platform with a version in its - platform strings, this function should be extended accordingly. - """ - plat = get_build_platform() - m = macosVersionString.match(plat) - if m is not None and sys.platform == "darwin": - try: - plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) - except ValueError: - # not macOS - pass - return plat - - -__all__ = [ - # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', - 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - - # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', - 'get_default_cache', - - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - - # Exceptions - 'ResolutionError', 'VersionConflict', 'DistributionNotFound', - 'UnknownExtra', 'ExtractionError', - - # Warnings - 'PEP440Warning', - - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', - - # filesystem utilities - 'ensure_directory', 'normalize_path', - - # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - - # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - - # Warnings - 'PkgResourcesDeprecationWarning', - - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', -] - - -class ResolutionError(Exception): - """Abstract base for dependency resolution errors""" - - def __repr__(self): - return self.__class__.__name__ + repr(self.args) - - -class VersionConflict(ResolutionError): - """ - An already-installed version conflicts with the requested version. - - Should be initialized with the installed Distribution and the requested - Requirement. - """ - - _template = "{self.dist} is installed but {self.req} is required" - - @property - def dist(self): - return self.args[0] - - @property - def req(self): - return self.args[1] - - def report(self): - return self._template.format(**locals()) - - def with_context(self, required_by): - """ - If required_by is non-empty, return a version of self that is a - ContextualVersionConflict. - """ - if not required_by: - return self - args = self.args + (required_by,) - return ContextualVersionConflict(*args) - - -class ContextualVersionConflict(VersionConflict): - """ - A VersionConflict that accepts a third parameter, the set of the - requirements that required the installed Distribution. - """ - - _template = VersionConflict._template + ' by {self.required_by}' - - @property - def required_by(self): - return self.args[2] - - -class DistributionNotFound(ResolutionError): - """A requested distribution was not found""" - - _template = ("The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}") - - @property - def req(self): - return self.args[0] - - @property - def requirers(self): - return self.args[1] - - @property - def requirers_str(self): - if not self.requirers: - return 'the application' - return ', '.join(self.requirers) - - def report(self): - return self._template.format(**locals()) - - def __str__(self): - return self.report() - - -class UnknownExtra(ResolutionError): - """Distribution doesn't have an "extra feature" of the given name""" - - -_provider_factories = {} - -PY_MAJOR = '{}.{}'.format(*sys.version_info) -EGG_DIST = 3 -BINARY_DIST = 2 -SOURCE_DIST = 1 -CHECKOUT_DIST = 0 -DEVELOP_DIST = -1 - - -def register_loader_type(loader_type, provider_factory): - """Register `provider_factory` to make providers for `loader_type` - - `loader_type` is the type or class of a PEP 302 ``module.__loader__``, - and `provider_factory` is a function that, passed a *module* object, - returns an ``IResourceProvider`` for that module. - """ - _provider_factories[loader_type] = provider_factory - - -def get_provider(moduleOrReq): - """Return an IResourceProvider for the named module or requirement""" - if isinstance(moduleOrReq, Requirement): - return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] - try: - module = sys.modules[moduleOrReq] - except KeyError: - __import__(moduleOrReq) - module = sys.modules[moduleOrReq] - loader = getattr(module, '__loader__', None) - return _find_adapter(_provider_factories, loader)(module) - - -def _macos_vers(_cache=[]): - if not _cache: - version = platform.mac_ver()[0] - # fallback for MacPorts - if version == '': - plist = '/System/Library/CoreServices/SystemVersion.plist' - if os.path.exists(plist): - if hasattr(plistlib, 'readPlist'): - plist_content = plistlib.readPlist(plist) - if 'ProductVersion' in plist_content: - version = plist_content['ProductVersion'] - - _cache.append(version.split('.')) - return _cache[0] - - -def _macos_arch(machine): - return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) - - -def get_build_platform(): - """Return this platform's string for platform-specific distributions - - XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and macOS. - """ - from sysconfig import get_platform - - plat = get_platform() - if sys.platform == "darwin" and not plat.startswith('macosx-'): - try: - version = _macos_vers() - machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % ( - int(version[0]), int(version[1]), - _macos_arch(machine), - ) - except ValueError: - # if someone is running a non-Mac darwin system, this will fall - # through to the default implementation - pass - return plat - - -macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") -darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") -# XXX backward compat -get_platform = get_build_platform - - -def compatible_platforms(provided, required): - """Can code for the `provided` platform run on the `required` platform? - - Returns true if either platform is ``None``, or the platforms are equal. - - XXX Needs compatibility checks for Linux and other unixy OSes. - """ - if provided is None or required is None or provided == required: - # easy case - return True - - # macOS special cases - reqMac = macosVersionString.match(required) - if reqMac: - provMac = macosVersionString.match(provided) - - # is this a Mac package? - if not provMac: - # this is backwards compatibility for packages built before - # setuptools 0.6. All packages built after this point will - # use the new macOS designation. - provDarwin = darwinVersionString.match(provided) - if provDarwin: - dversion = int(provDarwin.group(1)) - macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": - return True - # egg isn't macOS or legacy darwin - return False - - # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): - return False - - # is the required OS major update >= the provided one? - if int(provMac.group(2)) > int(reqMac.group(2)): - return False - - return True - - # XXX Linux and other platforms' special cases should go here - return False - - -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - -def get_distribution(dist): - """Return a current distribution object for a Requirement or string""" - if isinstance(dist, str): - dist = Requirement.parse(dist) - if isinstance(dist, Requirement): - dist = get_provider(dist) - if not isinstance(dist, Distribution): - raise TypeError("Expected string, Requirement, or Distribution", dist) - return dist - - -def load_entry_point(dist, group, name): - """Return `name` entry point of `group` for `dist` or raise ImportError""" - return get_distribution(dist).load_entry_point(group, name) - - -def get_entry_map(dist, group=None): - """Return the entry point map for `group`, or the full entry map""" - return get_distribution(dist).get_entry_map(group) - - -def get_entry_info(dist, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return get_distribution(dist).get_entry_info(group, name) - - -class IMetadataProvider: - def has_metadata(name): - """Does the package's distribution contain the named metadata?""" - - def get_metadata(name): - """The named metadata resource as a string""" - - def get_metadata_lines(name): - """Yield named metadata resource as list of non-blank non-comment lines - - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" - - def metadata_isdir(name): - """Is the named metadata a directory? (like ``os.path.isdir()``)""" - - def metadata_listdir(name): - """List of metadata names in the directory (like ``os.listdir()``)""" - - def run_script(script_name, namespace): - """Execute the named script in the supplied namespace dictionary""" - - -class IResourceProvider(IMetadataProvider): - """An object that provides access to package resources""" - - def get_resource_filename(manager, resource_name): - """Return a true filesystem path for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_stream(manager, resource_name): - """Return a readable file-like object for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` - - `manager` must be an ``IResourceManager``""" - - def has_resource(resource_name): - """Does the package contain the named resource?""" - - def resource_isdir(resource_name): - """Is the named resource a directory? (like ``os.path.isdir()``)""" - - def resource_listdir(resource_name): - """List of resource names in the directory (like ``os.listdir()``)""" - - -class WorkingSet: - """A collection of active distributions on sys.path (or a similar list)""" - - def __init__(self, entries=None): - """Create working set from list of path entries (default=sys.path)""" - self.entries = [] - self.entry_keys = {} - self.by_key = {} - self.normalized_to_canonical_keys = {} - self.callbacks = [] - - if entries is None: - entries = sys.path - - for entry in entries: - self.add_entry(entry) - - @classmethod - def _build_master(cls): - """ - Prepare the master working set. - """ - ws = cls() - try: - from __main__ import __requires__ - except ImportError: - # The main program does not list any requirements - return ws - - # ensure the requirements are met - try: - ws.require(__requires__) - except VersionConflict: - return cls._build_from_requirements(__requires__) - - return ws - - @classmethod - def _build_from_requirements(cls, req_spec): - """ - Build a working set from a requirement spec. Rewrites sys.path. - """ - # try it without defaults already on sys.path - # by starting with an empty path - ws = cls([]) - reqs = parse_requirements(req_spec) - dists = ws.resolve(reqs, Environment()) - for dist in dists: - ws.add(dist) - - # add any missing entries from sys.path - for entry in sys.path: - if entry not in ws.entries: - ws.add_entry(entry) - - # then copy back to sys.path - sys.path[:] = ws.entries - return ws - - def add_entry(self, entry): - """Add a path item to ``.entries``, finding any distributions on it - - ``find_distributions(entry, True)`` is used to find distributions - corresponding to the path entry, and they are added. `entry` is - always appended to ``.entries``, even if it is already present. - (This is because ``sys.path`` can contain the same value more than - once, and the ``.entries`` of the ``sys.path`` WorkingSet should always - equal ``sys.path``.) - """ - self.entry_keys.setdefault(entry, []) - self.entries.append(entry) - for dist in find_distributions(entry, True): - self.add(dist, entry, False) - - def __contains__(self, dist): - """True if `dist` is the active distribution for its project""" - return self.by_key.get(dist.key) == dist - - def find(self, req): - """Find a distribution matching requirement `req` - - If there is an active distribution for the requested project, this - returns it as long as it meets the version requirement specified by - `req`. But, if there is an active distribution for the project and it - does *not* meet the `req` requirement, ``VersionConflict`` is raised. - If there is no active distribution for the requested project, ``None`` - is returned. - """ - dist = self.by_key.get(req.key) - - if dist is None: - canonical_key = self.normalized_to_canonical_keys.get(req.key) - - if canonical_key is not None: - req.key = canonical_key - dist = self.by_key.get(canonical_key) - - if dist is not None and dist not in req: - # XXX add more info - raise VersionConflict(dist, req) - return dist - - def iter_entry_points(self, group, name=None): - """Yield entry point objects from `group` matching `name` - - If `name` is None, yields all entry points in `group` from all - distributions in the working set, otherwise only ones matching - both `group` and `name` are yielded (in distribution order). - """ - return ( - entry - for dist in self - for entry in dist.get_entry_map(group).values() - if name is None or name == entry.name - ) - - def run_script(self, requires, script_name): - """Locate distribution for `requires` and run `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - self.require(requires)[0].run_script(script_name, ns) - - def __iter__(self): - """Yield distributions for non-duplicate projects in the working set - - The yield order is the order in which the items' path entries were - added to the working set. - """ - seen = {} - for item in self.entries: - if item not in self.entry_keys: - # workaround a cache issue - continue - - for key in self.entry_keys[item]: - if key not in seen: - seen[key] = 1 - yield self.by_key[key] - - def add(self, dist, entry=None, insert=True, replace=False): - """Add `dist` to working set, associated with `entry` - - If `entry` is unspecified, it defaults to the ``.location`` of `dist`. - On exit from this routine, `entry` is added to the end of the working - set's ``.entries`` (if it wasn't already present). - - `dist` is only added to the working set if it's for a project that - doesn't already have a distribution in the set, unless `replace=True`. - If it's added, any callbacks registered with the ``subscribe()`` method - will be called. - """ - if insert: - dist.insert_on(self.entries, entry, replace=replace) - - if entry is None: - entry = dist.location - keys = self.entry_keys.setdefault(entry, []) - keys2 = self.entry_keys.setdefault(dist.location, []) - if not replace and dist.key in self.by_key: - # ignore hidden distros - return - - self.by_key[dist.key] = dist - normalized_name = packaging.utils.canonicalize_name(dist.key) - self.normalized_to_canonical_keys[normalized_name] = dist.key - if dist.key not in keys: - keys.append(dist.key) - if dist.key not in keys2: - keys2.append(dist.key) - self._added_new(dist) - - # FIXME: 'WorkingSet.resolve' is too complex (11) - def resolve(self, requirements, env=None, installer=None, # noqa: C901 - replace_conflicting=False, extras=None): - """List all distributions needed to (recursively) meet `requirements` - - `requirements` must be a sequence of ``Requirement`` objects. `env`, - if supplied, should be an ``Environment`` instance. If - not supplied, it defaults to all distributions available within any - entry or distribution in the working set. `installer`, if supplied, - will be invoked with each requirement that cannot be met by an - already-installed distribution; it should return a ``Distribution`` or - ``None``. - - Unless `replace_conflicting=True`, raises a VersionConflict exception - if - any requirements are found on the path that have the correct name but - the wrong version. Otherwise, if an `installer` is supplied it will be - invoked to obtain the correct version of the requirement and activate - it. - - `extras` is a list of the extras to be used with these requirements. - This is important because extra requirements may look like `my_req; - extra = "my_extra"`, which would otherwise be interpreted as a purely - optional requirement. Instead, we want to be able to assert that these - requirements are truly required. - """ - - # set up the stack - requirements = list(requirements)[::-1] - # set of processed requirements - processed = {} - # key -> dist - best = {} - to_activate = [] - - req_extras = _ReqExtras() - - # Mapping of requirement to set of distributions that required it; - # useful for reporting info about conflicts. - required_by = collections.defaultdict(set) - - while requirements: - # process dependencies breadth-first - req = requirements.pop(0) - if req in processed: - # Ignore cyclic or redundant dependencies - continue - - if not req_extras.markers_pass(req, extras): - continue - - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, - replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) - - # push the new requirements onto the stack - new_requirements = dist.requires(req.extras)[::-1] - requirements.extend(new_requirements) - - # Register the new requirements needed by req - for new_requirement in new_requirements: - required_by[new_requirement].add(req.project_name) - req_extras[new_requirement] = req.extras - - processed[req] = True - - # return list of distros to activate - return to_activate - - def find_plugins( - self, plugin_env, full_env=None, installer=None, fallback=True): - """Find all activatable distributions in `plugin_env` - - Example usage:: - - distributions, errors = working_set.find_plugins( - Environment(plugin_dirlist) - ) - # add plugins+libs to sys.path - map(working_set.add, distributions) - # display errors - print('Could not load', errors) - - The `plugin_env` should be an ``Environment`` instance that contains - only distributions that are in the project's "plugin directory" or - directories. The `full_env`, if supplied, should be an ``Environment`` - contains all currently-available distributions. If `full_env` is not - supplied, one is created automatically from the ``WorkingSet`` this - method is called on, which will typically mean that every directory on - ``sys.path`` will be scanned for distributions. - - `installer` is a standard installer callback as used by the - ``resolve()`` method. The `fallback` flag indicates whether we should - attempt to resolve older versions of a plugin if the newest version - cannot be resolved. - - This method returns a 2-tuple: (`distributions`, `error_info`), where - `distributions` is a list of the distributions found in `plugin_env` - that were loadable, along with any other distributions that are needed - to resolve their dependencies. `error_info` is a dictionary mapping - unloadable plugin distributions to an exception instance describing the - error that occurred. Usually this will be a ``DistributionNotFound`` or - ``VersionConflict`` instance. - """ - - plugin_projects = list(plugin_env) - # scan project names in alphabetic order - plugin_projects.sort() - - error_info = {} - distributions = {} - - if full_env is None: - env = Environment(self.entries) - env += plugin_env - else: - env = full_env + plugin_env - - shadow_set = self.__class__([]) - # put all our entries in shadow_set - list(map(shadow_set.add, self)) - - for project_name in plugin_projects: - - for dist in plugin_env[project_name]: - - req = [dist.as_requirement()] - - try: - resolvees = shadow_set.resolve(req, env, installer) - - except ResolutionError as v: - # save error info - error_info[dist] = v - if fallback: - # try the next older version of project - continue - else: - # give up on this project, keep going - break - - else: - list(map(shadow_set.add, resolvees)) - distributions.update(dict.fromkeys(resolvees)) - - # success, no need to try any more versions of this project - break - - distributions = list(distributions) - distributions.sort() - - return distributions, error_info - - def require(self, *requirements): - """Ensure that distributions matching `requirements` are activated - - `requirements` must be a string or a (possibly-nested) sequence - thereof, specifying the distributions and versions required. The - return value is a sequence of the distributions that needed to be - activated to fulfill the requirements; all relevant distributions are - included, even if they were already activated in this working set. - """ - needed = self.resolve(parse_requirements(requirements)) - - for dist in needed: - self.add(dist) - - return needed - - def subscribe(self, callback, existing=True): - """Invoke `callback` for all distributions - - If `existing=True` (default), - call on all existing ones, as well. - """ - if callback in self.callbacks: - return - self.callbacks.append(callback) - if not existing: - return - for dist in self: - callback(dist) - - def _added_new(self, dist): - for callback in self.callbacks: - callback(dist) - - def __getstate__(self): - return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.normalized_to_canonical_keys.copy(), self.callbacks[:] - ) - - def __setstate__(self, e_k_b_n_c): - entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c - self.entries = entries[:] - self.entry_keys = keys.copy() - self.by_key = by_key.copy() - self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy() - self.callbacks = callbacks[:] - - -class _ReqExtras(dict): - """ - Map each requirement to the extras that demanded it. - """ - - def markers_pass(self, req, extras=None): - """ - Evaluate markers for req against each extra that - demanded it. - - Return False if the req has a marker and fails - evaluation. Otherwise, return True. - """ - extra_evals = ( - req.marker.evaluate({'extra': extra}) - for extra in self.get(req, ()) + (extras or (None,)) - ) - return not req.marker or any(extra_evals) - - -class Environment: - """Searchable snapshot of distributions on a search path""" - - def __init__( - self, search_path=None, platform=get_supported_platform(), - python=PY_MAJOR): - """Snapshot distributions available on a search path - - Any distributions found on `search_path` are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. - - `platform` is an optional string specifying the name of the platform - that platform-specific distributions must be compatible with. If - unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.6'``); - it defaults to the current version. - - You may explicitly set `platform` (and/or `python`) to ``None`` if you - wish to map *all* distributions, not just those compatible with the - running platform or Python version. - """ - self._distmap = {} - self.platform = platform - self.python = python - self.scan(search_path) - - def can_add(self, dist): - """Is distribution `dist` acceptable for this environment? - - The distribution must match the platform and python version - requirements specified when this environment was created, or False - is returned. - """ - py_compat = ( - self.python is None - or dist.py_version is None - or dist.py_version == self.python - ) - return py_compat and compatible_platforms(dist.platform, self.platform) - - def remove(self, dist): - """Remove `dist` from the environment""" - self._distmap[dist.key].remove(dist) - - def scan(self, search_path=None): - """Scan `search_path` for distributions usable in this environment - - Any distributions found are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. Only distributions conforming to - the platform/python version defined at initialization are added. - """ - if search_path is None: - search_path = sys.path - - for item in search_path: - for dist in find_distributions(item): - self.add(dist) - - def __getitem__(self, project_name): - """Return a newest-to-oldest list of distributions for `project_name` - - Uses case-insensitive `project_name` comparison, assuming all the - project's distributions use their project's name converted to all - lowercase as their key. - - """ - distribution_key = project_name.lower() - return self._distmap.get(distribution_key, []) - - def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added - """ - if self.can_add(dist) and dist.has_version(): - dists = self._distmap.setdefault(dist.key, []) - if dist not in dists: - dists.append(dist) - dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - - def best_match( - self, req, working_set, installer=None, replace_conflicting=False): - """Find distribution best matching `req` and usable on `working_set` - - This calls the ``find(req)`` method of the `working_set` to see if a - suitable distribution is already active. (This may raise - ``VersionConflict`` if an unsuitable version of the project is already - active in the specified `working_set`.) If a suitable distribution - isn't active, this method returns the newest distribution in the - environment that meets the ``Requirement`` in `req`. If no suitable - distribution is found, and `installer` is supplied, then the result of - calling the environment's ``obtain(req, installer)`` method will be - returned. - """ - try: - dist = working_set.find(req) - except VersionConflict: - if not replace_conflicting: - raise - dist = None - if dist is not None: - return dist - for dist in self[req.key]: - if dist in req: - return dist - # try to download/install - return self.obtain(req, installer) - - def obtain(self, requirement, installer=None): - """Obtain a distribution matching `requirement` (e.g. via download) - - Obtain a distro that matches requirement (e.g. via download). In the - base ``Environment`` class, this routine just returns - ``installer(requirement)``, unless `installer` is None, in which case - None is returned instead. This method is a hook that allows subclasses - to attempt other ways of obtaining a distribution before falling back - to the `installer` argument.""" - if installer is not None: - return installer(requirement) - - def __iter__(self): - """Yield the unique project names of the available distributions""" - for key in self._distmap.keys(): - if self[key]: - yield key - - def __iadd__(self, other): - """In-place addition of a distribution or environment""" - if isinstance(other, Distribution): - self.add(other) - elif isinstance(other, Environment): - for project in other: - for dist in other[project]: - self.add(dist) - else: - raise TypeError("Can't add %r to environment" % (other,)) - return self - - def __add__(self, other): - """Add an environment or distribution to an environment""" - new = self.__class__([], platform=None, python=None) - for env in self, other: - new += env - return new - - -# XXX backward compatibility -AvailableDistributions = Environment - - -class ExtractionError(RuntimeError): - """An error occurred extracting a resource - - The following attributes are available from instances of this exception: - - manager - The resource manager that raised this exception - - cache_path - The base directory for resource extraction - - original_error - The exception instance that caused extraction to fail - """ - - -class ResourceManager: - """Manage resource extraction and packages""" - extraction_path = None - - def __init__(self): - self.cached_files = {} - - def resource_exists(self, package_or_requirement, resource_name): - """Does the named resource exist?""" - return get_provider(package_or_requirement).has_resource(resource_name) - - def resource_isdir(self, package_or_requirement, resource_name): - """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) - - def resource_filename(self, package_or_requirement, resource_name): - """Return a true filesystem path for specified resource""" - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name - ) - - def resource_stream(self, package_or_requirement, resource_name): - """Return a readable file-like object for specified resource""" - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name - ) - - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" - return get_provider(package_or_requirement).get_resource_string( - self, resource_name - ) - - def resource_listdir(self, package_or_requirement, resource_name): - """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) - - def extraction_error(self): - """Give an error message for problems extracting file(s)""" - - old_exc = sys.exc_info()[1] - cache_path = self.extraction_path or get_default_cache() - - tmpl = textwrap.dedent(""" - Can't extract file(s) to egg cache - - The following error occurred while trying to extract file(s) - to the Python egg cache: - - {old_exc} - - The Python egg cache directory is currently set to: - - {cache_path} - - Perhaps your account does not have write access to this directory? - You can change the cache directory by setting the PYTHON_EGG_CACHE - environment variable to point to an accessible directory. - """).lstrip() - err = ExtractionError(tmpl.format(**locals())) - err.manager = self - err.cache_path = cache_path - err.original_error = old_exc - raise err - - def get_cache_path(self, archive_name, names=()): - """Return absolute location in cache for `archive_name` and `names` - - The parent directory of the resulting path will be created if it does - not already exist. `archive_name` should be the base filename of the - enclosing egg (which may not be the name of the enclosing zipfile!), - including its ".egg" extension. `names`, if provided, should be a - sequence of path name parts "under" the egg's extraction location. - - This method should only be called by resource providers that need to - obtain an extraction location, and only for names they intend to - extract, as it tracks the generated names for possible cleanup later. - """ - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name + '-tmp', *names) - try: - _bypass_ensure_directory(target_path) - except Exception: - self.extraction_error() - - self._warn_unsafe_extraction_path(extract_path) - - self.cached_files[target_path] = 1 - return target_path - - @staticmethod - def _warn_unsafe_extraction_path(path): - """ - If the default extraction path is overridden and set to an insecure - location, such as /tmp, it opens up an opportunity for an attacker to - replace an extracted file with an unauthorized payload. Warn the user - if a known insecure location is used. - - See Distribute #375 for more details. - """ - if os.name == 'nt' and not path.startswith(os.environ['windir']): - # On Windows, permissions are generally restrictive by default - # and temp directories are not writable by other users, so - # bypass the warning. - return - mode = os.stat(path).st_mode - if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ( - "Extraction path is writable by group/others " - "and vulnerable to attack when " - "used with get_resource_filename ({path}). " - "Consider a more secure " - "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." - ).format(**locals()) - warnings.warn(msg, UserWarning) - - def postprocess(self, tempname, filename): - """Perform any platform-specific postprocessing of `tempname` - - This is where Mac header rewrites should be done; other platforms don't - have anything special they should do. - - Resource providers should call this method ONLY after successfully - extracting a compressed resource. They must NOT call it on resources - that are already in the filesystem. - - `tempname` is the current (temporary) name of the file, and `filename` - is the name it will be renamed to by the caller after this routine - returns. - """ - - if os.name == 'posix': - # Make the resource executable - mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 - os.chmod(tempname, mode) - - def set_extraction_path(self, path): - """Set the base path where resources will be extracted to, if needed. - - If you do not call this routine before any extractions take place, the - path defaults to the return value of ``get_default_cache()``. (Which - is based on the ``PYTHON_EGG_CACHE`` environment variable, with various - platform-specific fallbacks. See that routine's documentation for more - details.) - - Resources are extracted to subdirectories of this path based upon - information given by the ``IResourceProvider``. You may set this to a - temporary directory, but then you must call ``cleanup_resources()`` to - delete the extracted files when done. There is no guarantee that - ``cleanup_resources()`` will be able to remove all extracted files. - - (Note: you may not change the extraction path for a given resource - manager once resources have been extracted, unless you first call - ``cleanup_resources()``.) - """ - if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) - - self.extraction_path = path - - def cleanup_resources(self, force=False): - """ - Delete all extracted resource files and directories, returning a list - of the file and directory names that could not be successfully removed. - This function does not have any concurrency protection, so it should - generally only be called when the extraction path is a temporary - directory exclusive to a single process. This method is not - automatically called; you must call it explicitly or register it as an - ``atexit`` function if you wish to ensure cleanup of a temporary - directory used for extractions. - """ - # XXX - - -def get_default_cache(): - """ - Return the ``PYTHON_EGG_CACHE`` environment variable - or a platform-relevant user cache dir for an app - named "Python-Eggs". - """ - return ( - os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') - ) - - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """ - Convert an arbitrary string to a standard version string - """ - try: - # normalize the version - return str(packaging.version.Version(version)) - except packaging.version.InvalidVersion: - version = version.replace(' ', '.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def safe_extra(extra): - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - """ - return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-', '_') - - -def invalid_marker(text): - """ - Validate text as a PEP 508 environment marker; return an exception - if invalid or False otherwise. - """ - try: - evaluate_marker(text) - except SyntaxError as e: - e.filename = None - e.lineno = None - return e - return False - - -def evaluate_marker(text, extra=None): - """ - Evaluate a PEP 508 environment marker. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - - This implementation uses the 'pyparsing' module. - """ - try: - marker = packaging.markers.Marker(text) - return marker.evaluate() - except packaging.markers.InvalidMarker as e: - raise SyntaxError(e) from e - - -class NullProvider: - """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - - egg_name = None - egg_info = None - loader = None - - def __init__(self, module): - self.loader = getattr(module, '__loader__', None) - self.module_path = os.path.dirname(getattr(module, '__file__', '')) - - def get_resource_filename(self, manager, resource_name): - return self._fn(self.module_path, resource_name) - - def get_resource_stream(self, manager, resource_name): - return io.BytesIO(self.get_resource_string(manager, resource_name)) - - def get_resource_string(self, manager, resource_name): - return self._get(self._fn(self.module_path, resource_name)) - - def has_resource(self, resource_name): - return self._has(self._fn(self.module_path, resource_name)) - - def _get_metadata_path(self, name): - return self._fn(self.egg_info, name) - - def has_metadata(self, name): - if not self.egg_info: - return self.egg_info - - path = self._get_metadata_path(name) - return self._has(path) - - def get_metadata(self, name): - if not self.egg_info: - return "" - path = self._get_metadata_path(name) - value = self._get(path) - try: - return value.decode('utf-8') - except UnicodeDecodeError as exc: - # Include the path in the error message to simplify - # troubleshooting, and without changing the exception type. - exc.reason += ' in {} file at path: {}'.format(name, path) - raise - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - def resource_isdir(self, resource_name): - return self._isdir(self._fn(self.module_path, resource_name)) - - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) - - def resource_listdir(self, resource_name): - return self._listdir(self._fn(self.module_path, resource_name)) - - def metadata_listdir(self, name): - if self.egg_info: - return self._listdir(self._fn(self.egg_info, name)) - return [] - - def run_script(self, script_name, namespace): - script = 'scripts/' + script_name - if not self.has_metadata(script): - raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}" - .format(**locals()), - ) - script_text = self.get_metadata(script).replace('\r\n', '\n') - script_text = script_text.replace('\r', '\n') - script_filename = self._fn(self.egg_info, script) - namespace['__file__'] = script_filename - if os.path.exists(script_filename): - with open(script_filename) as fid: - source = fid.read() - code = compile(source, script_filename, 'exec') - exec(code, namespace, namespace) - else: - from linecache import cache - cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename - ) - script_code = compile(script_text, script_filename, 'exec') - exec(script_code, namespace, namespace) - - def _has(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _isdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _listdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _fn(self, base, resource_name): - self._validate_resource_path(resource_name) - if resource_name: - return os.path.join(base, *resource_name.split('/')) - return base - - @staticmethod - def _validate_resource_path(path): - """ - Validate the resource paths according to the docs. - https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access - - >>> warned = getfixture('recwarn') - >>> warnings.simplefilter('always') - >>> vrp = NullProvider._validate_resource_path - >>> vrp('foo/bar.txt') - >>> bool(warned) - False - >>> vrp('../foo/bar.txt') - >>> bool(warned) - True - >>> warned.clear() - >>> vrp('/foo/bar.txt') - >>> bool(warned) - True - >>> vrp('foo/../../bar.txt') - >>> bool(warned) - True - >>> warned.clear() - >>> vrp('foo/f../bar.txt') - >>> bool(warned) - False - - Windows path separators are straight-up disallowed. - >>> vrp(r'\\foo/bar.txt') - Traceback (most recent call last): - ... - ValueError: Use of .. or absolute path in a resource path \ -is not allowed. - - >>> vrp(r'C:\\foo/bar.txt') - Traceback (most recent call last): - ... - ValueError: Use of .. or absolute path in a resource path \ -is not allowed. - - Blank values are allowed - - >>> vrp('') - >>> bool(warned) - False - - Non-string values are not. - - >>> vrp(None) - Traceback (most recent call last): - ... - AttributeError: ... - """ - invalid = ( - os.path.pardir in path.split(posixpath.sep) or - posixpath.isabs(path) or - ntpath.isabs(path) - ) - if not invalid: - return - - msg = "Use of .. or absolute path in a resource path is not allowed." - - # Aggressively disallow Windows absolute paths - if ntpath.isabs(path) and not posixpath.isabs(path): - raise ValueError(msg) - - # for compatibility, warn; in future - # raise ValueError(msg) - warnings.warn( - msg[:-1] + " and will raise exceptions in a future release.", - DeprecationWarning, - stacklevel=4, - ) - - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) - raise NotImplementedError( - "Can't perform this operation for loaders without 'get_data()'" - ) - - -register_loader_type(object, NullProvider) - - -def _parents(path): - """ - yield all parents of path including path - """ - last = None - while path != last: - yield path - last = path - path, _ = os.path.split(path) - - -class EggProvider(NullProvider): - """Provider based on a virtual filesystem""" - - def __init__(self, module): - super().__init__(module) - self._setup_prefix() - - def _setup_prefix(self): - # Assume that metadata may be nested inside a "basket" - # of multiple eggs and use module_path instead of .archive. - eggs = filter(_is_egg_path, _parents(self.module_path)) - egg = next(eggs, None) - egg and self._set_egg(egg) - - def _set_egg(self, path): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - - -class DefaultProvider(EggProvider): - """Provides access to package resources in the filesystem""" - - def _has(self, path): - return os.path.exists(path) - - def _isdir(self, path): - return os.path.isdir(path) - - def _listdir(self, path): - return os.listdir(path) - - def get_resource_stream(self, manager, resource_name): - return open(self._fn(self.module_path, resource_name), 'rb') - - def _get(self, path): - with open(path, 'rb') as stream: - return stream.read() - - @classmethod - def _register(cls): - loader_names = 'SourceFileLoader', 'SourcelessFileLoader', - for name in loader_names: - loader_cls = getattr(importlib_machinery, name, type(None)) - register_loader_type(loader_cls, cls) - - -DefaultProvider._register() - - -class EmptyProvider(NullProvider): - """Provider that returns nothing for all requests""" - - module_path = None - - _isdir = _has = lambda self, path: False - - def _get(self, path): - return '' - - def _listdir(self, path): - return [] - - def __init__(self): - pass - - -empty_provider = EmptyProvider() - - -class ZipManifests(dict): - """ - zip manifest builder - """ - - @classmethod - def build(cls, path): - """ - Build a dictionary similar to the zipimport directory - caches, except instead of tuples, store ZipInfo objects. - - Use a platform-specific path separator (os.sep) for the path keys - for compatibility with pypy on Windows. - """ - with zipfile.ZipFile(path) as zfile: - items = ( - ( - name.replace('/', os.sep), - zfile.getinfo(name), - ) - for name in zfile.namelist() - ) - return dict(items) - - load = build - - -class MemoizedZipManifests(ZipManifests): - """ - Memoized zipfile manifests. - """ - manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') - - def load(self, path): - """ - Load a manifest at path or return a suitable manifest already loaded. - """ - path = os.path.normpath(path) - mtime = os.stat(path).st_mtime - - if path not in self or self[path].mtime != mtime: - manifest = self.build(path) - self[path] = self.manifest_mod(manifest, mtime) - - return self[path].manifest - - -class ZipProvider(EggProvider): - """Resource support for zips and eggs""" - - eagers = None - _zip_manifests = MemoizedZipManifests() - - def __init__(self, module): - super().__init__(module) - self.zip_pre = self.loader.archive + os.sep - - def _zipinfo_name(self, fspath): - # Convert a virtual filename (full path to file) into a zipfile subpath - # usable with the zipimport directory cache for our target archive - fspath = fspath.rstrip(os.sep) - if fspath == self.loader.archive: - return '' - if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.zip_pre) - ) - - def _parts(self, zip_path): - # Convert a zipfile subpath into an egg-relative path part list. - # pseudo-fs path - fspath = self.zip_pre + zip_path - if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.egg_root) - ) - - @property - def zipinfo(self): - return self._zip_manifests.load(self.loader.archive) - - def get_resource_filename(self, manager, resource_name): - if not self.egg_name: - raise NotImplementedError( - "resource_filename() only supported for .egg, not .zip" - ) - # no need to lock for extraction, since we use temp names - zip_path = self._resource_to_zip(resource_name) - eagers = self._get_eager_resources() - if '/'.join(self._parts(zip_path)) in eagers: - for name in eagers: - self._extract_resource(manager, self._eager_to_zip(name)) - return self._extract_resource(manager, zip_path) - - @staticmethod - def _get_date_and_size(zip_stat): - size = zip_stat.file_size - # ymdhms+wday, yday, dst - date_time = zip_stat.date_time + (0, 0, -1) - # 1980 offset already done - timestamp = time.mktime(date_time) - return timestamp, size - - # FIXME: 'ZipProvider._extract_resource' is too complex (12) - def _extract_resource(self, manager, zip_path): # noqa: C901 - - if zip_path in self._index(): - for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) - # return the extracted directory name - return os.path.dirname(last) - - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - - if not WRITE_SUPPORT: - raise IOError('"os.rename" and "os.unlink" are not supported ' - 'on this platform') - try: - - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) - ) - - if self._is_current(real_path, zip_path): - return real_path - - outf, tmpnam = _mkstemp( - ".$extract", - dir=os.path.dirname(real_path), - ) - os.write(outf, self.loader.get_data(zip_path)) - os.close(outf) - utime(tmpnam, (timestamp, timestamp)) - manager.postprocess(tmpnam, real_path) - - try: - rename(tmpnam, real_path) - - except os.error: - if os.path.isfile(real_path): - if self._is_current(real_path, zip_path): - # the file became current since it was checked above, - # so proceed. - return real_path - # Windows, del old file and retry - elif os.name == 'nt': - unlink(real_path) - rename(tmpnam, real_path) - return real_path - raise - - except os.error: - # report a user-friendly error - manager.extraction_error() - - return real_path - - def _is_current(self, file_path, zip_path): - """ - Return True if the file_path is current for this zip_path - """ - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - if not os.path.isfile(file_path): - return False - stat = os.stat(file_path) - if stat.st_size != size or stat.st_mtime != timestamp: - return False - # check that the contents match - zip_contents = self.loader.get_data(zip_path) - with open(file_path, 'rb') as f: - file_contents = f.read() - return zip_contents == file_contents - - def _get_eager_resources(self): - if self.eagers is None: - eagers = [] - for name in ('native_libs.txt', 'eager_resources.txt'): - if self.has_metadata(name): - eagers.extend(self.get_metadata_lines(name)) - self.eagers = eagers - return self.eagers - - def _index(self): - try: - return self._dirindex - except AttributeError: - ind = {} - for path in self.zipinfo: - parts = path.split(os.sep) - while parts: - parent = os.sep.join(parts[:-1]) - if parent in ind: - ind[parent].append(parts[-1]) - break - else: - ind[parent] = [parts.pop()] - self._dirindex = ind - return ind - - def _has(self, fspath): - zip_path = self._zipinfo_name(fspath) - return zip_path in self.zipinfo or zip_path in self._index() - - def _isdir(self, fspath): - return self._zipinfo_name(fspath) in self._index() - - def _listdir(self, fspath): - return list(self._index().get(self._zipinfo_name(fspath), ())) - - def _eager_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.egg_root, resource_name)) - - def _resource_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.module_path, resource_name)) - - -register_loader_type(zipimport.zipimporter, ZipProvider) - - -class FileMetadata(EmptyProvider): - """Metadata handler for standalone PKG-INFO files - - Usage:: - - metadata = FileMetadata("/path/to/PKG-INFO") - - This provider rejects all data and metadata requests except for PKG-INFO, - which is treated as existing, and will be the contents of the file at - the provided location. - """ - - def __init__(self, path): - self.path = path - - def _get_metadata_path(self, name): - return self.path - - def has_metadata(self, name): - return name == 'PKG-INFO' and os.path.isfile(self.path) - - def get_metadata(self, name): - if name != 'PKG-INFO': - raise KeyError("No metadata except PKG-INFO is available") - - with io.open(self.path, encoding='utf-8', errors="replace") as f: - metadata = f.read() - self._warn_on_replacement(metadata) - return metadata - - def _warn_on_replacement(self, metadata): - replacement_char = '�' - if replacement_char in metadata: - tmpl = "{self.path} could not be properly decoded in UTF-8" - msg = tmpl.format(**locals()) - warnings.warn(msg) - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - -class PathMetadata(DefaultProvider): - """Metadata provider for egg directories - - Usage:: - - # Development eggs: - - egg_info = "/path/to/PackageName.egg-info" - base_dir = os.path.dirname(egg_info) - metadata = PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - dist = Distribution(basedir, project_name=dist_name, metadata=metadata) - - # Unpacked egg directories: - - egg_path = "/path/to/PackageName-ver-pyver-etc.egg" - metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) - dist = Distribution.from_filename(egg_path, metadata=metadata) - """ - - def __init__(self, path, egg_info): - self.module_path = path - self.egg_info = egg_info - - -class EggMetadata(ZipProvider): - """Metadata provider for .egg files""" - - def __init__(self, importer): - """Create a metadata provider from a zipimporter""" - - self.zip_pre = importer.archive + os.sep - self.loader = importer - if importer.prefix: - self.module_path = os.path.join(importer.archive, importer.prefix) - else: - self.module_path = importer.archive - self._setup_prefix() - - -_declare_state('dict', _distribution_finders={}) - - -def register_finder(importer_type, distribution_finder): - """Register `distribution_finder` to find distributions in sys.path items - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `distribution_finder` is a callable that, passed a path - item and the importer instance, yields ``Distribution`` instances found on - that path item. See ``pkg_resources.find_on_path`` for an example.""" - _distribution_finders[importer_type] = distribution_finder - - -def find_distributions(path_item, only=False): - """Yield distributions accessible via `path_item`""" - importer = get_importer(path_item) - finder = _find_adapter(_distribution_finders, importer) - return finder(importer, path_item, only) - - -def find_eggs_in_zip(importer, path_item, only=False): - """ - Find eggs in zip files; possibly multiple nested eggs. - """ - if importer.archive.endswith('.whl'): - # wheels are not supported with this finder - # they don't have PKG-INFO metadata, and won't ever contain eggs - return - metadata = EggMetadata(importer) - if metadata.has_metadata('PKG-INFO'): - yield Distribution.from_filename(path_item, metadata=metadata) - if only: - # don't yield nested distros - return - for subitem in metadata.resource_listdir(''): - if _is_egg_path(subitem): - subpath = os.path.join(path_item, subitem) - dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) - for dist in dists: - yield dist - elif subitem.lower().endswith(('.dist-info', '.egg-info')): - subpath = os.path.join(path_item, subitem) - submeta = EggMetadata(zipimport.zipimporter(subpath)) - submeta.egg_info = subpath - yield Distribution.from_location(path_item, subitem, submeta) - - -register_finder(zipimport.zipimporter, find_eggs_in_zip) - - -def find_nothing(importer, path_item, only=False): - return () - - -register_finder(object, find_nothing) - - -def _by_version_descending(names): - """ - Given a list of filenames, return them in descending order - by version number. - - >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' - >>> _by_version_descending(names) - ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] - """ - def try_parse(name): - """ - Attempt to parse as a version or return a null version. - """ - try: - return packaging.version.Version(name) - except Exception: - return packaging.version.Version('0') - - def _by_version(name): - """ - Parse each component of the filename - """ - name, ext = os.path.splitext(name) - parts = itertools.chain(name.split('-'), [ext]) - return [try_parse(part) for part in parts] - - return sorted(names, key=_by_version, reverse=True) - - -def find_on_path(importer, path_item, only=False): - """Yield distributions accessible on a sys.path directory""" - path_item = _normalize_cached(path_item) - - if _is_unpacked_egg(path_item): - yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item, 'EGG-INFO') - ) - ) - return - - entries = ( - os.path.join(path_item, child) - for child in safe_listdir(path_item) - ) - - # for performance, before sorting by version, - # screen entries for only those that will yield - # distributions - filtered = ( - entry - for entry in entries - if dist_factory(path_item, entry, only) - ) - - # scan for .egg and .egg-info in directory - path_item_entries = _by_version_descending(filtered) - for entry in path_item_entries: - fullpath = os.path.join(path_item, entry) - factory = dist_factory(path_item, entry, only) - for dist in factory(fullpath): - yield dist - - -def dist_factory(path_item, entry, only): - """Return a dist_factory for the given entry.""" - lower = entry.lower() - is_egg_info = lower.endswith('.egg-info') - is_dist_info = ( - lower.endswith('.dist-info') and - os.path.isdir(os.path.join(path_item, entry)) - ) - is_meta = is_egg_info or is_dist_info - return ( - distributions_from_metadata - if is_meta else - find_distributions - if not only and _is_egg_path(entry) else - resolve_egg_link - if not only and lower.endswith('.egg-link') else - NoDists() - ) - - -class NoDists: - """ - >>> bool(NoDists()) - False - - >>> list(NoDists()('anything')) - [] - """ - def __bool__(self): - return False - - def __call__(self, fullpath): - return iter(()) - - -def safe_listdir(path): - """ - Attempt to list contents of path, but suppress some exceptions. - """ - try: - return os.listdir(path) - except (PermissionError, NotADirectoryError): - pass - except OSError as e: - # Ignore the directory if does not exist, not a directory or - # permission denied - if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT): - raise - return () - - -def distributions_from_metadata(path): - root = os.path.dirname(path) - if os.path.isdir(path): - if len(os.listdir(path)) == 0: - # empty metadata dir; skip - return - metadata = PathMetadata(root, path) - else: - metadata = FileMetadata(path) - entry = os.path.basename(path) - yield Distribution.from_location( - root, entry, metadata, precedence=DEVELOP_DIST, - ) - - -def non_empty_lines(path): - """ - Yield non-empty lines from file at path - """ - with open(path) as f: - for line in f: - line = line.strip() - if line: - yield line - - -def resolve_egg_link(path): - """ - Given a path to an .egg-link, resolve distributions - present in the referenced path. - """ - referenced_paths = non_empty_lines(path) - resolved_paths = ( - os.path.join(os.path.dirname(path), ref) - for ref in referenced_paths - ) - dist_groups = map(find_distributions, resolved_paths) - return next(dist_groups, ()) - - -register_finder(pkgutil.ImpImporter, find_on_path) - -if hasattr(importlib_machinery, 'FileFinder'): - register_finder(importlib_machinery.FileFinder, find_on_path) - -_declare_state('dict', _namespace_handlers={}) -_declare_state('dict', _namespace_packages={}) - - -def register_namespace_handler(importer_type, namespace_handler): - """Register `namespace_handler` to declare namespace packages - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `namespace_handler` is a callable like this:: - - def namespace_handler(importer, path_entry, moduleName, module): - # return a path_entry to use for child packages - - Namespace handlers are only called if the importer object has already - agreed that it can handle the relevant path item, and they should only - return a subpath if the module __path__ does not already contain an - equivalent subpath. For an example namespace handler, see - ``pkg_resources.file_ns_handler``. - """ - _namespace_handlers[importer_type] = namespace_handler - - -def _handle_ns(packageName, path_item): - """Ensure that named package includes a subpath of path_item (if needed)""" - - importer = get_importer(path_item) - if importer is None: - return None - - # use find_spec (PEP 451) and fall-back to find_module (PEP 302) - try: - spec = importer.find_spec(packageName) - except AttributeError: - # capture warnings due to #1111 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - loader = importer.find_module(packageName) - else: - loader = spec.loader if spec else None - - if loader is None: - return None - module = sys.modules.get(packageName) - if module is None: - module = sys.modules[packageName] = types.ModuleType(packageName) - module.__path__ = [] - _set_parent_ns(packageName) - elif not hasattr(module, '__path__'): - raise TypeError("Not a package:", packageName) - handler = _find_adapter(_namespace_handlers, importer) - subpath = handler(importer, path_item, packageName, module) - if subpath is not None: - path = module.__path__ - path.append(subpath) - importlib.import_module(packageName) - _rebuild_mod_path(path, packageName, module) - return subpath - - -def _rebuild_mod_path(orig_path, package_name, module): - """ - Rebuild module.__path__ ensuring that all entries are ordered - corresponding to their sys.path order - """ - sys_path = [_normalize_cached(p) for p in sys.path] - - def safe_sys_path_index(entry): - """ - Workaround for #520 and #513. - """ - try: - return sys_path.index(entry) - except ValueError: - return float('inf') - - def position_in_sys_path(path): - """ - Return the ordinal of the path based on its position in sys.path - """ - path_parts = path.split(os.sep) - module_parts = package_name.count('.') + 1 - parts = path_parts[:-module_parts] - return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - - new_path = sorted(orig_path, key=position_in_sys_path) - new_path = [_normalize_cached(p) for p in new_path] - - if isinstance(module.__path__, list): - module.__path__[:] = new_path - else: - module.__path__ = new_path - - -def declare_namespace(packageName): - """Declare that package 'packageName' is a namespace package""" - - _imp.acquire_lock() - try: - if packageName in _namespace_packages: - return - - path = sys.path - parent, _, _ = packageName.rpartition('.') - - if parent: - declare_namespace(parent) - if parent not in _namespace_packages: - __import__(parent) - try: - path = sys.modules[parent].__path__ - except AttributeError as e: - raise TypeError("Not a package:", parent) from e - - # Track what packages are namespaces, so when new path items are added, - # they can be updated - _namespace_packages.setdefault(parent or None, []).append(packageName) - _namespace_packages.setdefault(packageName, []) - - for path_item in path: - # Ensure all the parent's path items are reflected in the child, - # if they apply - _handle_ns(packageName, path_item) - - finally: - _imp.release_lock() - - -def fixup_namespace_packages(path_item, parent=None): - """Ensure that previously-declared namespace packages include path_item""" - _imp.acquire_lock() - try: - for package in _namespace_packages.get(parent, ()): - subpath = _handle_ns(package, path_item) - if subpath: - fixup_namespace_packages(subpath, package) - finally: - _imp.release_lock() - - -def file_ns_handler(importer, path_item, packageName, module): - """Compute an ns-package subpath for a filesystem or zipfile importer""" - - subpath = os.path.join(path_item, packageName.split('.')[-1]) - normalized = _normalize_cached(subpath) - for item in module.__path__: - if _normalize_cached(item) == normalized: - break - else: - # Only return the path if it's not already there - return subpath - - -register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -register_namespace_handler(zipimport.zipimporter, file_ns_handler) - -if hasattr(importlib_machinery, 'FileFinder'): - register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) - - -def null_ns_handler(importer, path_item, packageName, module): - return None - - -register_namespace_handler(object, null_ns_handler) - - -def normalize_path(filename): - """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(os.path.normpath( - _cygwin_patch(filename)))) - - -def _cygwin_patch(filename): # pragma: nocover - """ - Contrary to POSIX 2008, on Cygwin, getcwd (3) contains - symlink components. Using - os.path.abspath() works around this limitation. A fix in os.getcwd() - would probably better, in Cygwin even more so, except - that this seems to be by design... - """ - return os.path.abspath(filename) if sys.platform == 'cygwin' else filename - - -def _normalize_cached(filename, _cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result - - -def _is_egg_path(path): - """ - Determine if given path appears to be an egg. - """ - return _is_zip_egg(path) or _is_unpacked_egg(path) - - -def _is_zip_egg(path): - return ( - path.lower().endswith('.egg') and - os.path.isfile(path) and - zipfile.is_zipfile(path) - ) - - -def _is_unpacked_egg(path): - """ - Determine if given path appears to be an unpacked egg. - """ - return ( - path.lower().endswith('.egg') and - os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) - ) - - -def _set_parent_ns(packageName): - parts = packageName.split('.') - name = parts.pop() - if parts: - parent = '.'.join(parts) - setattr(sys.modules[parent], name, sys.modules[packageName]) - - -MODULE = re.compile(r"\w+(\.\w+)*$").match -EGG_NAME = re.compile( - r""" - (?P[^-]+) ( - -(?P[^-]+) ( - -py(?P[^-]+) ( - -(?P.+) - )? - )? - )? - """, - re.VERBOSE | re.IGNORECASE, -).match - - -class EntryPoint: - """Object representing an advertised importable object""" - - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): - if not MODULE(module_name): - raise ValueError("Invalid module name", module_name) - self.name = name - self.module_name = module_name - self.attrs = tuple(attrs) - self.extras = tuple(extras) - self.dist = dist - - def __str__(self): - s = "%s = %s" % (self.name, self.module_name) - if self.attrs: - s += ':' + '.'.join(self.attrs) - if self.extras: - s += ' [%s]' % ','.join(self.extras) - return s - - def __repr__(self): - return "EntryPoint.parse(%r)" % str(self) - - def load(self, require=True, *args, **kwargs): - """ - Require packages for this EntryPoint, then resolve it. - """ - if not require or args or kwargs: - warnings.warn( - "Parameters to load are deprecated. Call .resolve and " - ".require separately.", - PkgResourcesDeprecationWarning, - stacklevel=2, - ) - if require: - self.require(*args, **kwargs) - return self.resolve() - - def resolve(self): - """ - Resolve the entry point from its module and attrs. - """ - module = __import__(self.module_name, fromlist=['__name__'], level=0) - try: - return functools.reduce(getattr, self.attrs, module) - except AttributeError as exc: - raise ImportError(str(exc)) from exc - - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) - - # Get the requirements for this entry point with all its extras and - # then resolve them. We have to pass `extras` along when resolving so - # that the working set knows what extras we want. Otherwise, for - # dist-info distributions, the working set will assume that the - # requirements for that extra are purely optional and skip over them. - reqs = self.dist.requires(self.extras) - items = working_set.resolve(reqs, env, installer, extras=self.extras) - list(map(working_set.add, items)) - - pattern = re.compile( - r'\s*' - r'(?P.+?)\s*' - r'=\s*' - r'(?P[\w.]+)\s*' - r'(:\s*(?P[\w.]+))?\s*' - r'(?P\[.*\])?\s*$' - ) - - @classmethod - def parse(cls, src, dist=None): - """Parse a single entry point from string `src` - - Entry point syntax follows the form:: - - name = some.module:some.attr [extra1, extra2] - - The entry name and module name are required, but the ``:attrs`` and - ``[extras]`` parts are optional - """ - m = cls.pattern.match(src) - if not m: - msg = "EntryPoint must be in 'name=module:attrs [extras]' format" - raise ValueError(msg, src) - res = m.groupdict() - extras = cls._parse_extras(res['extras']) - attrs = res['attr'].split('.') if res['attr'] else () - return cls(res['name'], res['module'], attrs, extras, dist) - - @classmethod - def _parse_extras(cls, extras_spec): - if not extras_spec: - return () - req = Requirement.parse('x' + extras_spec) - if req.specs: - raise ValueError() - return req.extras - - @classmethod - def parse_group(cls, group, lines, dist=None): - """Parse an entry point group""" - if not MODULE(group): - raise ValueError("Invalid group name", group) - this = {} - for line in yield_lines(lines): - ep = cls.parse(line, dist) - if ep.name in this: - raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name] = ep - return this - - @classmethod - def parse_map(cls, data, dist=None): - """Parse a map of entry point groups""" - if isinstance(data, dict): - data = data.items() - else: - data = split_sections(data) - maps = {} - for group, lines in data: - if group is None: - if not lines: - continue - raise ValueError("Entry points must be listed in groups") - group = group.strip() - if group in maps: - raise ValueError("Duplicate group name", group) - maps[group] = cls.parse_group(group, lines, dist) - return maps - - -def _version_from_file(lines): - """ - Given an iterable of lines from a Metadata file, return - the value of the Version field, if present, or None otherwise. - """ - def is_version_line(line): - return line.lower().startswith('version:') - version_lines = filter(is_version_line, lines) - line = next(iter(version_lines), '') - _, _, value = line.partition(':') - return safe_version(value.strip()) or None - - -class Distribution: - """Wrap an actual or potential sys.path entry w/metadata""" - PKG_INFO = 'PKG-INFO' - - def __init__( - self, location=None, metadata=None, project_name=None, - version=None, py_version=PY_MAJOR, platform=None, - precedence=EGG_DIST): - self.project_name = safe_name(project_name or 'Unknown') - if version is not None: - self._version = safe_version(version) - self.py_version = py_version - self.platform = platform - self.location = location - self.precedence = precedence - self._provider = metadata or empty_provider - - @classmethod - def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None] * 4 - basename, ext = os.path.splitext(basename) - if ext.lower() in _distributionImpl: - cls = _distributionImpl[ext.lower()] - - match = EGG_NAME(basename) - if match: - project_name, version, py_version, platform = match.group( - 'name', 'ver', 'pyver', 'plat' - ) - return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw - )._reload_version() - - def _reload_version(self): - return self - - @property - def hashcmp(self): - return ( - self.parsed_version, - self.precedence, - self.key, - self.location, - self.py_version or '', - self.platform or '', - ) - - def __hash__(self): - return hash(self.hashcmp) - - def __lt__(self, other): - return self.hashcmp < other.hashcmp - - def __le__(self, other): - return self.hashcmp <= other.hashcmp - - def __gt__(self, other): - return self.hashcmp > other.hashcmp - - def __ge__(self, other): - return self.hashcmp >= other.hashcmp - - def __eq__(self, other): - if not isinstance(other, self.__class__): - # It's not a Distribution, so they are not equal - return False - return self.hashcmp == other.hashcmp - - def __ne__(self, other): - return not self == other - - # These properties have to be lazy so that we don't have to load any - # metadata until/unless it's actually needed. (i.e., some distributions - # may not know their name or version without loading PKG-INFO) - - @property - def key(self): - try: - return self._key - except AttributeError: - self._key = key = self.project_name.lower() - return key - - @property - def parsed_version(self): - if not hasattr(self, "_parsed_version"): - self._parsed_version = parse_version(self.version) - - return self._parsed_version - - def _warn_legacy_version(self): - LV = packaging.version.LegacyVersion - is_legacy = isinstance(self._parsed_version, LV) - if not is_legacy: - return - - # While an empty version is technically a legacy version and - # is not a valid PEP 440 version, it's also unlikely to - # actually come from someone and instead it is more likely that - # it comes from setuptools attempting to parse a filename and - # including it in the list. So for that we'll gate this warning - # on if the version is anything at all or not. - if not self.version: - return - - tmpl = textwrap.dedent(""" - '{project_name} ({version})' is being parsed as a legacy, - non PEP 440, - version. You may find odd behavior and sort order. - In particular it will be sorted as less than 0.0. It - is recommended to migrate to PEP 440 compatible - versions. - """).strip().replace('\n', ' ') - - warnings.warn(tmpl.format(**vars(self)), PEP440Warning) - - @property - def version(self): - try: - return self._version - except AttributeError as e: - version = self._get_version() - if version is None: - path = self._get_metadata_path_for_display(self.PKG_INFO) - msg = ( - "Missing 'Version:' header and/or {} file at path: {}" - ).format(self.PKG_INFO, path) - raise ValueError(msg, self) from e - - return version - - @property - def _dep_map(self): - """ - A map of extra to its list of (direct) requirements - for this distribution, including the null extra. - """ - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._filter_extras(self._build_dep_map()) - return self.__dep_map - - @staticmethod - def _filter_extras(dm): - """ - Given a mapping of extras to dependencies, strip off - environment markers and filter out any dependencies - not matching the markers. - """ - for extra in list(filter(None, dm)): - new_extra = extra - reqs = dm.pop(extra) - new_extra, _, marker = extra.partition(':') - fails_marker = marker and ( - invalid_marker(marker) - or not evaluate_marker(marker) - ) - if fails_marker: - reqs = [] - new_extra = safe_extra(new_extra) or None - - dm.setdefault(new_extra, []).extend(reqs) - return dm - - def _build_dep_map(self): - dm = {} - for name in 'requires.txt', 'depends.txt': - for extra, reqs in split_sections(self._get_metadata(name)): - dm.setdefault(extra, []).extend(parse_requirements(reqs)) - return dm - - def requires(self, extras=()): - """List of Requirements needed for this distro if `extras` are used""" - dm = self._dep_map - deps = [] - deps.extend(dm.get(None, ())) - for ext in extras: - try: - deps.extend(dm[safe_extra(ext)]) - except KeyError as e: - raise UnknownExtra( - "%s has no such extra feature %r" % (self, ext) - ) from e - return deps - - def _get_metadata_path_for_display(self, name): - """ - Return the path to the given metadata file, if available. - """ - try: - # We need to access _get_metadata_path() on the provider object - # directly rather than through this class's __getattr__() - # since _get_metadata_path() is marked private. - path = self._provider._get_metadata_path(name) - - # Handle exceptions e.g. in case the distribution's metadata - # provider doesn't support _get_metadata_path(). - except Exception: - return '[could not detect]' - - return path - - def _get_metadata(self, name): - if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line - - def _get_version(self): - lines = self._get_metadata(self.PKG_INFO) - version = _version_from_file(lines) - - return version - - def activate(self, path=None, replace=False): - """Ensure distribution is importable on `path` (default=sys.path)""" - if path is None: - path = sys.path - self.insert_on(path, replace=replace) - if path is sys.path: - fixup_namespace_packages(self.location) - for pkg in self._get_metadata('namespace_packages.txt'): - if pkg in sys.modules: - declare_namespace(pkg) - - def egg_name(self): - """Return what this distribution's standard .egg filename should be""" - filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR - ) - - if self.platform: - filename += '-' + self.platform - return filename - - def __repr__(self): - if self.location: - return "%s (%s)" % (self, self.location) - else: - return str(self) - - def __str__(self): - try: - version = getattr(self, 'version', None) - except ValueError: - version = None - version = version or "[unknown version]" - return "%s %s" % (self.project_name, version) - - def __getattr__(self, attr): - """Delegate all unrecognized public attributes to .metadata provider""" - if attr.startswith('_'): - raise AttributeError(attr) - return getattr(self._provider, attr) - - def __dir__(self): - return list( - set(super(Distribution, self).__dir__()) - | set( - attr for attr in self._provider.__dir__() - if not attr.startswith('_') - ) - ) - - @classmethod - def from_filename(cls, filename, metadata=None, **kw): - return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw - ) - - def as_requirement(self): - """Return a ``Requirement`` that matches this distribution exactly""" - if isinstance(self.parsed_version, packaging.version.Version): - spec = "%s==%s" % (self.project_name, self.parsed_version) - else: - spec = "%s===%s" % (self.project_name, self.parsed_version) - - return Requirement.parse(spec) - - def load_entry_point(self, group, name): - """Return the `name` entry point of `group` or raise ImportError""" - ep = self.get_entry_info(group, name) - if ep is None: - raise ImportError("Entry point %r not found" % ((group, name),)) - return ep.load() - - def get_entry_map(self, group=None): - """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( - self._get_metadata('entry_points.txt'), self - ) - if group is not None: - return ep_map.get(group, {}) - return ep_map - - def get_entry_info(self, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return self.get_entry_map(group).get(name) - - # FIXME: 'Distribution.insert_on' is too complex (13) - def insert_on(self, path, loc=None, replace=False): # noqa: C901 - """Ensure self.location is on path - - If replace=False (default): - - If location is already in path anywhere, do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent. - - Else: add to the end of path. - If replace=True: - - If location is already on path anywhere (not eggs) - or higher priority than its parent (eggs) - do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent, - removing any lower-priority entries. - - Else: add it to the front of path. - """ - - loc = loc or self.location - if not loc: - return - - nloc = _normalize_cached(loc) - bdir = os.path.dirname(nloc) - npath = [(p and _normalize_cached(p) or p) for p in path] - - for p, item in enumerate(npath): - if item == nloc: - if replace: - break - else: - # don't modify path (even removing duplicates) if - # found and not replace - return - elif item == bdir and self.precedence == EGG_DIST: - # if it's an .egg, give it precedence over its directory - # UNLESS it's already been added to sys.path and replace=False - if (not replace) and nloc in npath[p:]: - return - if path is sys.path: - self.check_version_conflict() - path.insert(p, loc) - npath.insert(p, nloc) - break - else: - if path is sys.path: - self.check_version_conflict() - if replace: - path.insert(0, loc) - else: - path.append(loc) - return - - # p is the spot where we found or inserted loc; now remove duplicates - while True: - try: - np = npath.index(nloc, p + 1) - except ValueError: - break - else: - del npath[np], path[np] - # ha! - p = np - - return - - def check_version_conflict(self): - if self.key == 'setuptools': - # ignore the inevitable setuptools self-conflicts :( - return - - nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) - loc = normalize_path(self.location) - for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages): - continue - if modname in ('pkg_resources', 'setuptools', 'site'): - continue - fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or - fn.startswith(self.location)): - continue - issue_warning( - "Module %s was already imported from %s, but %s is being added" - " to sys.path" % (modname, fn, self.location), - ) - - def has_version(self): - try: - self.version - except ValueError: - issue_warning("Unbuilt egg for " + repr(self)) - return False - return True - - def clone(self, **kw): - """Copy this distribution, substituting in any changed keyword args""" - names = 'project_name version py_version platform location precedence' - for attr in names.split(): - kw.setdefault(attr, getattr(self, attr, None)) - kw.setdefault('metadata', self._provider) - return self.__class__(**kw) - - @property - def extras(self): - return [dep for dep in self._dep_map if dep] - - -class EggInfoDistribution(Distribution): - def _reload_version(self): - """ - Packages installed by distutils (e.g. numpy or scipy), - which uses an old safe_version, and so - their version numbers can get mangled when - converted to filenames (e.g., 1.11.0.dev0+2329eae to - 1.11.0.dev0_2329eae). These distributions will not be - parsed properly - downstream by Distribution and safe_version, so - take an extra step and try to get the version number from - the metadata file itself instead of the filename. - """ - md_version = self._get_version() - if md_version: - self._version = md_version - return self - - -class DistInfoDistribution(Distribution): - """ - Wrap an actual or potential sys.path entry - w/metadata, .dist-info style. - """ - PKG_INFO = 'METADATA' - EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") - - @property - def _parsed_pkg_info(self): - """Parse and cache metadata""" - try: - return self._pkg_info - except AttributeError: - metadata = self.get_metadata(self.PKG_INFO) - self._pkg_info = email.parser.Parser().parsestr(metadata) - return self._pkg_info - - @property - def _dep_map(self): - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._compute_dependencies() - return self.__dep_map - - def _compute_dependencies(self): - """Recompute this distribution's dependencies.""" - dm = self.__dep_map = {None: []} - - reqs = [] - # Including any condition expressions - for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: - reqs.extend(parse_requirements(req)) - - def reqs_for_extra(extra): - for req in reqs: - if not req.marker or req.marker.evaluate({'extra': extra}): - yield req - - common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None))) - dm[None].extend(common) - - for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: - s_extra = safe_extra(extra.strip()) - dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common] - - return dm - - -_distributionImpl = { - '.egg': Distribution, - '.egg-info': EggInfoDistribution, - '.dist-info': DistInfoDistribution, -} - - -def issue_warning(*args, **kw): - level = 1 - g = globals() - try: - # find the first stack frame that is *not* code in - # the pkg_resources module, to use for the warning - while sys._getframe(level).f_globals is g: - level += 1 - except ValueError: - pass - warnings.warn(stacklevel=level + 1, *args, **kw) - - -def parse_requirements(strs): - """ - Yield ``Requirement`` objects for each specification in `strs`. - - `strs` must be a string, or a (possibly-nested) iterable thereof. - """ - return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) - - -class RequirementParseError(packaging.requirements.InvalidRequirement): - "Compatibility wrapper for InvalidRequirement" - - -class Requirement(packaging.requirements.Requirement): - def __init__(self, requirement_string): - """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - super(Requirement, self).__init__(requirement_string) - self.unsafe_name = self.name - project_name = safe_name(self.name) - self.project_name, self.key = project_name, project_name.lower() - self.specs = [ - (spec.operator, spec.version) for spec in self.specifier] - self.extras = tuple(map(safe_extra, self.extras)) - self.hashCmp = ( - self.key, - self.url, - self.specifier, - frozenset(self.extras), - str(self.marker) if self.marker else None, - ) - self.__hash = hash(self.hashCmp) - - def __eq__(self, other): - return ( - isinstance(other, Requirement) and - self.hashCmp == other.hashCmp - ) - - def __ne__(self, other): - return not self == other - - def __contains__(self, item): - if isinstance(item, Distribution): - if item.key != self.key: - return False - - item = item.version - - # Allow prereleases always in order to match the previous behavior of - # this method. In the future this should be smarter and follow PEP 440 - # more accurately. - return self.specifier.contains(item, prereleases=True) - - def __hash__(self): - return self.__hash - - def __repr__(self): - return "Requirement.parse(%r)" % str(self) - - @staticmethod - def parse(s): - req, = parse_requirements(s) - return req - - -def _always_object(classes): - """ - Ensure object appears in the mro even - for old-style classes. - """ - if object not in classes: - return classes + (object,) - return classes - - -def _find_adapter(registry, ob): - """Return an adapter factory for `ob` from `registry`""" - types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) - for t in types: - if t in registry: - return registry[t] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - os.makedirs(dirname, exist_ok=True) - - -def _bypass_ensure_directory(path): - """Sandbox-bypassing version of ensure_directory()""" - if not WRITE_SUPPORT: - raise IOError('"os.mkdir" not supported on this platform.') - dirname, filename = split(path) - if dirname and filename and not isdir(dirname): - _bypass_ensure_directory(dirname) - try: - mkdir(dirname, 0o755) - except FileExistsError: - pass - - -def split_sections(s): - """Split a string or iterable thereof into (section, content) pairs - - Each ``section`` is a stripped version of the section header ("[section]") - and each ``content`` is a list of stripped lines excluding blank lines and - comment-only lines. If there are any such lines before the first section - header, they're returned in a first ``section`` of ``None``. - """ - section = None - content = [] - for line in yield_lines(s): - if line.startswith("["): - if line.endswith("]"): - if section or content: - yield section, content - section = line[1:-1].strip() - content = [] - else: - raise ValueError("Invalid section heading", line) - else: - content.append(line) - - # wrap up last segment - yield section, content - - -def _mkstemp(*args, **kw): - old_open = os.open - try: - # temporarily bypass sandboxing - os.open = os_open - return tempfile.mkstemp(*args, **kw) - finally: - # and then put it back - os.open = old_open - - -# Silence the PEP440Warning by default, so that end users don't get hit by it -# randomly just because they use pkg_resources. We want to append the rule -# because we want earlier uses of filterwarnings to take precedence over this -# one. -warnings.filterwarnings("ignore", category=PEP440Warning, append=True) - - -# from jaraco.functools 1.3 -def _call_aside(f, *args, **kwargs): - f(*args, **kwargs) - return f - - -@_call_aside -def _initialize(g=globals()): - "Set up global resource manager (deliberately not state-saved)" - manager = ResourceManager() - g['_manager'] = manager - g.update( - (name, getattr(manager, name)) - for name in dir(manager) - if not name.startswith('_') - ) - - -class PkgResourcesDeprecationWarning(Warning): - """ - Base class for warning about deprecations in ``pkg_resources`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ - - -@_call_aside -def _initialize_master_working_set(): - """ - Prepare the master working set and make the ``require()`` - API available. - - This function has explicit effects on the global state - of pkg_resources. It is intended to be invoked once at - the initialization of this module. - - Invocation by other packages is unsupported and done - at their own risk. - """ - working_set = WorkingSet._build_master() - _declare_state('object', working_set=working_set) - - require = working_set.require - iter_entry_points = working_set.iter_entry_points - add_activation_listener = working_set.subscribe - run_script = working_set.run_script - # backward compatibility - run_main = run_script - # Activate all distributions already on sys.path with replace=False and - # ensure that all distributions added to the working set in the future - # (e.g. by calling ``require()``) will get activated as well, - # with higher priority (replace=True). - tuple( - dist.activate(replace=False) - for dist in working_set - ) - add_activation_listener( - lambda dist: dist.activate(replace=True), - existing=False, - ) - working_set.entries = [] - # match order - list(map(working_set.add_entry, sys.path)) - globals().update(locals()) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/compatibility.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/compatibility.md deleted file mode 100644 index 83d93f51c056c598c1209f9a21a4e04407b827f0..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/compatibility.md +++ /dev/null @@ -1,84 +0,0 @@ -# Compatibility with Other Libraries - -## Compatibility with Detectron (and maskrcnn-benchmark) - -Detectron2 addresses some legacy issues left in Detectron. As a result, their models -are not compatible: -running inference with the same model weights will produce different results in the two code bases. - -The major differences regarding inference are: - -- The height and width of a box with corners (x1, y1) and (x2, y2) is now computed more naturally as - width = x2 - x1 and height = y2 - y1; - In Detectron, a "+ 1" was added both height and width. - - Note that the relevant ops in Caffe2 have [adopted this change of convention](https://github.com/pytorch/pytorch/pull/20550) - with an extra option. - So it is still possible to run inference with a Detectron2-trained model in Caffe2. - - The change in height/width calculations most notably changes: - - encoding/decoding in bounding box regression. - - non-maximum suppression. The effect here is very negligible, though. - -- RPN now uses simpler anchors with fewer quantization artifacts. - - In Detectron, the anchors were quantized and - [do not have accurate areas](https://github.com/facebookresearch/Detectron/issues/227). - In Detectron2, the anchors are center-aligned to feature grid points and not quantized. - -- Classification layers have a different ordering of class labels. - - This involves any trainable parameter with shape (..., num_categories + 1, ...). - In Detectron2, integer labels [0, K-1] correspond to the K = num_categories object categories - and the label "K" corresponds to the special "background" category. - In Detectron, label "0" means background, and labels [1, K] correspond to the K categories. - -- ROIAlign is implemented differently. The new implementation is [available in Caffe2](https://github.com/pytorch/pytorch/pull/23706). - - 1. All the ROIs are shifted by half a pixel compared to Detectron in order to create better image-feature-map alignment. - See `layers/roi_align.py` for details. - To enable the old behavior, use `ROIAlign(aligned=False)`, or `POOLER_TYPE=ROIAlign` instead of - `ROIAlignV2` (the default). - - 1. The ROIs are not required to have a minimum size of 1. - This will lead to tiny differences in the output, but should be negligible. - -- Mask inference function is different. - - In Detectron2, the "paste_mask" function is different and should be more accurate than in Detectron. This change - can improve mask AP on COCO by ~0.5% absolute. - -There are some other differences in training as well, but they won't affect -model-level compatibility. The major ones are: - -- We fixed a [bug](https://github.com/facebookresearch/Detectron/issues/459) in - Detectron, by making `RPN.POST_NMS_TOPK_TRAIN` per-image, rather than per-batch. - The fix may lead to a small accuracy drop for a few models (e.g. keypoint - detection) and will require some parameter tuning to match the Detectron results. -- For simplicity, we change the default loss in bounding box regression to L1 loss, instead of smooth L1 loss. - We have observed that this tends to slightly decrease box AP50 while improving box AP for higher - overlap thresholds (and leading to a slight overall improvement in box AP). -- We interpret the coordinates in COCO bounding box and segmentation annotations - as coordinates in range `[0, width]` or `[0, height]`. The coordinates in - COCO keypoint annotations are interpreted as pixel indices in range `[0, width - 1]` or `[0, height - 1]`. - Note that this affects how flip augmentation is implemented. - - -[This article](https://ppwwyyxx.com/blog/2021/Where-are-Pixels/) -explains more details on the above mentioned issues -about pixels, coordinates, and "+1"s. - - -## Compatibility with Caffe2 - -As mentioned above, despite the incompatibilities with Detectron, the relevant -ops have been implemented in Caffe2. -Therefore, models trained with detectron2 can be converted in Caffe2. -See [Deployment](../tutorials/deployment.md) for the tutorial. - -## Compatibility with TensorFlow - -Most ops are available in TensorFlow, although some tiny differences in -the implementation of resize / ROIAlign / padding need to be addressed. -A working conversion script is provided by [tensorpack Faster R-CNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2) -to run a standard detectron2 model in TensorFlow. diff --git a/spaces/Awiny/Image2Paragraph/models/segment_models/semantic_segment_anything_model.py b/spaces/Awiny/Image2Paragraph/models/segment_models/semantic_segment_anything_model.py deleted file mode 100644 index 2664ad717c105c988546299d22180129751363d1..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/segment_models/semantic_segment_anything_model.py +++ /dev/null @@ -1,165 +0,0 @@ -from transformers import (CLIPProcessor, CLIPModel, AutoProcessor, CLIPSegForImageSegmentation, - OneFormerProcessor, OneFormerForUniversalSegmentation, - BlipProcessor, BlipForConditionalGeneration) -import torch -import mmcv -import torch.nn.functional as F -import numpy as np -import spacy -from PIL import Image -import pycocotools.mask as maskUtils -from models.segment_models.configs.ade20k_id2label import CONFIG as CONFIG_ADE20K_ID2LABEL -from models.segment_models.configs.coco_id2label import CONFIG as CONFIG_COCO_ID2LABEL -from utils.util import resize_long_edge, resize_long_edge_cv2 -# from mmdet.core.visualization.image import imshow_det_bboxes # comment this line if you don't use mmdet - -nlp = spacy.load('en_core_web_sm') - -class SemanticSegment(): - def __init__(self, device): - self.device = device - self.model_init() - - def model_init(self): - self.init_clip() - self.init_oneformer_ade20k() - self.init_oneformer_coco() - self.init_blip() - self.init_clipseg() - - def init_clip(self): - # model_name = "openai/clip-vit-large-patch14" - model_name = "openai/clip-vit-base-patch32" - self.clip_processor = CLIPProcessor.from_pretrained(model_name) - self.clip_model = CLIPModel.from_pretrained(model_name).to(self.device) - - def init_oneformer_ade20k(self): - # model_name = "shi-labs/oneformer_ade20k_swin_large" - model_name = "shi-labs/oneformer_ade20k_swin_tiny" - self.oneformer_ade20k_processor = OneFormerProcessor.from_pretrained(model_name) - self.oneformer_ade20k_model = OneFormerForUniversalSegmentation.from_pretrained(model_name).to(self.device) - - def init_oneformer_coco(self): - model_name = "shi-labs/oneformer_coco_swin_large" - self.oneformer_coco_processor = OneFormerProcessor.from_pretrained(model_name) - self.oneformer_coco_model = OneFormerForUniversalSegmentation.from_pretrained(model_name).to(self.device) - - def init_blip(self): - model_name = "Salesforce/blip-image-captioning-base" - # model_name = "Salesforce/blip-image-captioning-large" - self.blip_processor = BlipProcessor.from_pretrained(model_name) - self.blip_model = BlipForConditionalGeneration.from_pretrained(model_name).to(self.device) - - def init_clipseg(self): - model_name = "CIDAS/clipseg-rd64-refined" - self.clipseg_processor = AutoProcessor.from_pretrained(model_name) - self.clipseg_model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(self.device) - self.clipseg_processor.image_processor.do_resize = False - - @staticmethod - def get_noun_phrases(text): - doc = nlp(text) - return [chunk.text for chunk in doc.noun_chunks] - - def open_vocabulary_classification_blip(self, raw_image): - captioning_inputs = self.blip_processor(raw_image, return_tensors="pt").to(self.device) - out = self.blip_model.generate(**captioning_inputs) - caption = self.blip_processor.decode(out[0], skip_special_tokens=True) - return SemanticSegment.get_noun_phrases(caption) - - def oneformer_segmentation(self, image, processor, model): - inputs = processor(images=image, task_inputs=["semantic"], return_tensors="pt").to(self.device) - outputs = model(**inputs) - predicted_semantic_map = processor.post_process_semantic_segmentation( - outputs, target_sizes=[image.size[::-1]])[0] - return predicted_semantic_map - - def clip_classification(self, image, class_list, top_k): - inputs = self.clip_processor(text=class_list, images=image, return_tensors="pt", padding=True).to(self.device) - outputs = self.clip_model(**inputs) - logits_per_image = outputs.logits_per_image - probs = logits_per_image.softmax(dim=1) - if top_k == 1: - return class_list[probs.argmax().item()] - else: - top_k_indices = probs.topk(top_k, dim=1).indices[0] - return [class_list[index] for index in top_k_indices] - - def clipseg_segmentation(self, image, class_list): - inputs = self.clipseg_processor( - text=class_list, images=[image] * len(class_list), - padding=True, return_tensors="pt").to(self.device) - - h, w = inputs['pixel_values'].shape[-2:] - fixed_scale = (512, 512) - inputs['pixel_values'] = F.interpolate( - inputs['pixel_values'], - size=fixed_scale, - mode='bilinear', - align_corners=False) - - outputs = self.clipseg_model(**inputs) - logits = F.interpolate(outputs.logits[None], size=(h, w), mode='bilinear', align_corners=False)[0] - return logits - - - def semantic_class_w_mask(self, img_src, anns, out_file_name="output/test.json", scale_small=1.2, scale_large=1.6): - """ - generate class name for each mask - :param img_src: image path - :param anns: coco annotations, the same as return dict besides "class_name" and "class_proposals" - :param out_file_name: output file name - :param scale_small: scale small - :param scale_large: scale large - :return: dict('segmentation', 'area', 'bbox', 'predicted_iou', 'point_coords', 'stability_score', 'crop_box', "class_name", "class_proposals"}) - """ - img = mmcv.imread(img_src) - img = resize_long_edge_cv2(img, 384) - oneformer_coco_seg = self.oneformer_segmentation(Image.fromarray(img), self.oneformer_coco_processor, self.oneformer_coco_model) - oneformer_ade20k_seg = self.oneformer_segmentation(Image.fromarray(img), self.oneformer_ade20k_processor, self.oneformer_ade20k_model) - bitmasks, class_names = [], [] - for ann in anns: - # for ann in anns['annotations']: - valid_mask = torch.tensor((ann['segmentation'])).bool() - # valid_mask = torch.tensor(maskUtils.decode(ann['segmentation'])).bool() - coco_propose_classes_ids = oneformer_coco_seg[valid_mask] - ade20k_propose_classes_ids = oneformer_ade20k_seg[valid_mask] - - top_k_coco_propose_classes_ids = torch.bincount(coco_propose_classes_ids.flatten()).topk(1).indices - top_k_ade20k_propose_classes_ids = torch.bincount(ade20k_propose_classes_ids.flatten()).topk(1).indices - - local_class_names = {CONFIG_ADE20K_ID2LABEL['id2label'][str(class_id.item())] for class_id in top_k_ade20k_propose_classes_ids} - local_class_names.update({CONFIG_COCO_ID2LABEL['refined_id2label'][str(class_id.item())] for class_id in top_k_coco_propose_classes_ids}) - - bbox = ann['bbox'] - patch_small = mmcv.imcrop(img, np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]), scale=scale_small) - patch_large = mmcv.imcrop(img, np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]), scale=scale_large) - - op_class_list = self.open_vocabulary_classification_blip(patch_large) - local_class_list = list(local_class_names.union(op_class_list)) - - top_k = min(len(local_class_list), 3) - mask_categories = self.clip_classification(patch_small, local_class_list, top_k) - class_ids_patch_large = self.clipseg_segmentation(patch_large, mask_categories).argmax(0) - - valid_mask_large_crop = mmcv.imcrop(valid_mask.numpy(), np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]), scale= - scale_large) - top_1_patch_large = torch.bincount(class_ids_patch_large[torch.tensor(valid_mask_large_crop)].flatten()).topk(1).indices - top_1_mask_category = mask_categories[top_1_patch_large.item()] - - ann['class_name'] = str(top_1_mask_category) - ann['class_proposals'] = mask_categories - class_names.append(ann['class_name']) - # bitmasks.append(maskUtils.decode(ann['segmentation'])) - bitmasks.append((ann['segmentation'])) - # mmcv.dump(anns, out_file_name) - return anns - # below for visualization - # imshow_det_bboxes(img, - # bboxes=None, - # labels=np.arange(len(bitmasks)), - # segms=np.stack(bitmasks), - # class_names=class_names, - # font_size=25, - # show=False, - # out_file='output/result2.png') \ No newline at end of file diff --git a/spaces/Ayemos/highlight_text_based_on_surprisals/app.py b/spaces/Ayemos/highlight_text_based_on_surprisals/app.py deleted file mode 100644 index 26ad917bb6768078c5cb6222009b34735db9bd67..0000000000000000000000000000000000000000 --- a/spaces/Ayemos/highlight_text_based_on_surprisals/app.py +++ /dev/null @@ -1,102 +0,0 @@ -from typing import List, Tuple - -import gradio as gr -import numpy as np -import torch -from transformers import AutoModelForCausalLM, T5Tokenizer - -device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") -tokenizer = T5Tokenizer.from_pretrained("rinna/japanese-gpt2-medium") -tokenizer.do_lower_case = True - -model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium") -model.to(device) - - -def calculate_surprisals( - input_text: str, normalize_surprisals: bool = True -) -> Tuple[float, List[Tuple[str, float]]]: - input_tokens = [ - token.replace("▁", "") - for token in tokenizer.tokenize(input_text) - if token != "▁" - ] - input_ids = tokenizer.encode( - "" + input_text, add_special_tokens=False, return_tensors="pt" - ).to(device) - - logits = model(input_ids)["logits"].squeeze(0) - - surprisals = [] - for i in range(logits.shape[0] - 1): - if input_ids[0][i + 1] == 9: - continue - logit = logits[i] - prob = torch.softmax(logit, dim=0) - neg_logprob = -torch.log(prob) - surprisals.append(neg_logprob[input_ids[0][i + 1]].item()) - mean_surprisal = np.mean(surprisals) - - if normalize_surprisals: - min_surprisal = np.min(surprisals) - max_surprisal = np.max(surprisals) - surprisals = [ - (surprisal - min_surprisal) / (max_surprisal - min_surprisal) - for surprisal in surprisals - ] - assert min(surprisals) >= 0 - assert max(surprisals) <= 1 - - tokens2surprisal: List[Tuple[str, float]] = [] - for token, surprisal in zip(input_tokens, surprisals): - tokens2surprisal.append((token, surprisal)) - - return mean_surprisal, tokens2surprisal - - -def highlight_token(token: str, score: float): - html_color = "#%02X%02X%02X" % (255, int(255 * (1 - score)), int(255 * (1 - score))) - return '{}'.format( - html_color, token - ) - - -def create_highlighted_text(tokens2scores: List[Tuple[str, float]]): - highlighted_text: str = "" - for token, score in tokens2scores: - highlighted_text += highlight_token(token, score) - highlighted_text += "

" - return highlighted_text - - -def main(input_text: str) -> Tuple[float, str]: - mean_surprisal, tokens2surprisal = calculate_surprisals( - input_text, normalize_surprisals=True - ) - highlighted_text = create_highlighted_text(tokens2surprisal) - return round(mean_surprisal, 2), highlighted_text - - -if __name__ == "__main__": - demo = gr.Interface( - fn=main, - title="読みにくい箇所を検出するAI(デモ)", - description="テキストを入力すると、読みにくさに応じてハイライトされて出力されます。", - inputs=gr.inputs.Textbox( - lines=5, - label="テキスト", - placeholder="ここにテキストを入力してください。", - ), - outputs=[ - gr.Number(label="文全体の読みにくさ(サプライザル)"), - gr.outputs.HTML(label="トークン毎サプライザル"), - ], - examples=[ - "太郎が二郎を殴った。", - "太郎が二郎に殴った。", - "サイエンスインパクトラボは、国立研究開発法人科学技術振興機構(JST)の「科学と社会」推進部が行う共創プログラムです。「先端の研究開発を行う研究者」と「社会課題解決に取り組むプレイヤー」が約3ヶ月に渡って共創活動を行います。", - "近年、ニューラル言語モデルが自然言語の統語知識をどれほど有しているかを、容認性判断課題を通して検証する研究が行われてきている。しかし、このような言語モデルの統語的評価を行うためのデータセットは、主に英語を中心とした欧米の諸言語を対象に構築されてきた。本研究では、既存のデータセットの問題点を克服しつつ、このようなデータセットが構築されてこなかった日本語を対象とした初めてのデータセットである JCoLA (JapaneseCorpus of Linguistic Acceptability) を構築した上で、それを用いた言語モデルの統語的評価を行った。", - ], - ) - - demo.launch() diff --git a/spaces/Banbri/zcvzcv/src/app/interface/progress/index.tsx b/spaces/Banbri/zcvzcv/src/app/interface/progress/index.tsx deleted file mode 100644 index ce24276a4b241d185fce5bd306a0c3e339835626..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/app/interface/progress/index.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import { useEffect, useRef, useState } from "react" - -import { ProgressBar } from "./progress-bar" -import { cn } from "@/lib/utils" - -export function Progress({ - isLoading, - resetKey = "", // when this key change, this will re-spawn the progress bar - className = "", -}: { - isLoading: boolean - resetKey?: string - className?: string -}) { - const timeoutRef = useRef() - const [progressPercent, setProcessPercent] = useState(0) - const progressRef = useRef(0) - const isLoadingRef = useRef(isLoading) - - const updateProgressBar = () => { - const duration = 1000 // 1 sec - const frequency = 200 // 200ms - const nbUpdatesPerSec = duration / frequency // 5x per second - - // normally it takes 45, and we will try to go below, - // but to be safe let's set the counter a 1 min - const nbSeconds = 80 // 1 min - const amountInPercent = 100 / (nbUpdatesPerSec * nbSeconds) // 0.333 - - progressRef.current = Math.min(100, progressRef.current + amountInPercent) - setProcessPercent(progressRef.current) - } - - useEffect(() => { - clearInterval(timeoutRef.current) - isLoadingRef.current = isLoading - progressRef.current = 0 - setProcessPercent(0) - if (isLoading) { - timeoutRef.current = setInterval(updateProgressBar, 200) - } - }, [isLoading, resetKey]) - - return ( -
- -
- ) -} \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/2vd Canciones Mp3 Descargar.md b/spaces/Benson/text-generation/Examples/2vd Canciones Mp3 Descargar.md deleted file mode 100644 index 737c2cd78ba8b345ec000c17588f116c62495f83..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/2vd Canciones Mp3 Descargar.md +++ /dev/null @@ -1,62 +0,0 @@ - -

¿Qué es la descarga de canciones mp3 de 2vd?

-

Si usted está buscando una manera simple y eficaz de descargar canciones mp3 de vídeos de YouTube, es posible que desee probar 2vd canciones mp3 descargar. 2vd es una herramienta online gratuita que te permite convertir cualquier vídeo de YouTube a formato mp3 en tan solo unos clics. Puedes disfrutar de tu música favorita sin conexión en cualquier dispositivo sin problemas.

-

Descargar canciones mp3 de videos de YouTube tiene muchos beneficios, como:

-

2vd canciones mp3 descargar


DOWNLOAD ===== https://bltlly.com/2v6IGT



-
    -
  • Puede guardar sus datos móviles y ancho de banda descargando música una vez y escuchándola sin conexión.
  • -
  • Puede acceder a su biblioteca de música en cualquier momento y en cualquier lugar, incluso sin conexión a Internet.
  • -
  • Puedes crear tus propias listas de reproducción y mixtapes con las canciones que te gustan.
  • -
  • Puedes descubrir nueva música y géneros explorando diferentes canales de YouTube y listas de reproducción.
  • -
  • Puedes apoyar a tus artistas y creadores favoritos viendo sus videos y descargando sus canciones.
  • -
-

¿Cómo usar canciones mp3 de 2vd?

-

Usar la descarga de canciones mp3 de 2vd es muy fácil y rápido. Solo tiene que seguir estos sencillos pasos:

-
    -
  1. Vaya a https://www.2vd.co/ en su navegador.
  2. -
  3. Copiar la URL del vídeo de YouTube que desea descargar como mp3.
  4. -
  5. Pegue la URL en el cuadro de búsqueda en 2vd y haga clic en "Convertir".
  6. -
  7. Espere unos segundos mientras 2vd analiza el vídeo y genera el archivo mp3.
  8. -
  9. Haga clic en "Descargar" para guardar el archivo mp3 en su dispositivo.
  10. -
-

¡Eso es todo! Has descargado exitosamente una canción mp3 de un video de YouTube usando 2vd. Puede repetir el mismo proceso para tantos vídeos como desee.

-

Características de la descarga de canciones mp3 de 2vd

-

2vd mp3 canciones de descarga no es solo otro descargador de mp3. Tiene algunas características sorprendentes que lo hacen destacar entre la multitud. Estos son algunos de ellos:

-
    - -
  • Velocidad rápida: 2vd es uno de los convertidores de YouTube a mp3 más rápidos disponibles en línea. Puede convertir y descargar cualquier vídeo en cuestión de segundos, sin comprometer la calidad.
  • -
  • Descargas ilimitadas: 2vd no tiene restricciones o limitaciones sobre cuántos videos puede convertir y descargar como mp3. Puedes descargar tanta música como quieras, gratis.
  • -
  • No se requiere registro: 2vd no le pide que se registre o cree una cuenta para usar su servicio. Puede acceder a ella de forma anónima y segura, sin proporcionar ninguna información personal o dirección de correo electrónico.
  • -
  • Alta compatibilidad: 2vd funciona bien con todos los navegadores y dispositivos, incluidos Windows, Mac, Android, iPhone, iPad, etc. Puede usarlo en cualquier plataforma y dispositivo que admita reproducción de mp3.
  • -
-

Comparación con

Comparación con otros descargadores de mp3

-

Hay muchos otros descargadores de mp3 disponibles en línea, pero no todos son tan buenos como 2vd. Aquí hay una tabla de comparación que muestra cómo la descarga de canciones mp3 de 2vd se compara con algunas de las más populares:

- | Mp3 Downloader | Calidad | Velocidad | Descargas | Registro | Compatibilidad | | --- - - - - - - - - - - - - - - - - | ---- | -- | | 2vd | Hasta 320 kbps | Muy rápido | Ilimitado | No | Todos los navegadores y dispositivos | | BestMP3Converter | Hasta 320 kbps | Rápido | Ilimitado | No | Todos los navegadores y dispositivos | | OKmusi | Hasta 320 kbps | Rápido | Ilimitado | No | Todos los navegadores y dispositivos | | | JioSaavn Hasta 320 kbps kbps | Lento | Limitado | Sí | Solo Android e iOS |

Como puedes ver, 2vd descarga de canciones mp3 es la mejor opción para descargar canciones mp3 de vídeos de YouTube. Ofrece la más alta calidad, la velocidad más rápida, la mayor cantidad de descargas, la menor molestia y la mayor compatibilidad.

-

Consejos y trucos para descargar canciones mp3 de 2vd

-

Ahora que sabes cómo usar la descarga de canciones mp3 de 2vd, aquí hay algunos consejos y trucos para ayudarte a sacarle el máximo partido:

-

- -
  • Encuentra los mejores videos de YouTube: Para obtener la mejor calidad y variedad de música, debes buscar videos de YouTube que tengan altas vistas, me gusta, comentarios y calificaciones. También puedes usar filtros y palabras clave para reducir tus resultados de búsqueda.
  • -
  • Personalizar la configuración de salida: Antes de hacer clic en "Convertir", puede ajustar la configuración de salida de su archivo mp3, como la tasa de bits, el volumen, la duración y el nombre del archivo. También puede recortar o recortar el vídeo para obtener solo la parte que desee.
  • -
  • Administra los archivos descargados: Después de descargar tus archivos mp3, puedes organizarlos en carpetas, renombrarlos, eliminarlos o transferirlos a otros dispositivos. También puede utilizar una aplicación de reproductor de música para reproducirlos sin conexión.
  • - -

    Conclusión

    -

    2vd mp3 songs download es una gran herramienta para descargar canciones mp3 de videos de YouTube. Es gratis, fácil, rápido y confiable. Tiene muchas características y ventajas que lo hacen superior a otros descargadores de mp3. Puede usarlo para disfrutar de su música favorita sin conexión en cualquier dispositivo.

    -

    Si usted está buscando una manera simple y eficaz de descargar canciones mp3 de vídeos de YouTube, usted debe probar definitivamente 2vd canciones mp3 descargar. ¡No te arrepentirás!

    -

    Para comenzar a descargar canciones mp3 de videos de YouTube usando 2vd, haga clic en este enlace: -

  • ¿Funciona la descarga de canciones mp3 de 2vd con otros sitios web?
  • -

    No, la descarga de canciones mp3 de 2vd solo funciona con videos de YouTube. No es compatible con otros sitios web o plataformas que alojan o transmiten videos o música. Si desea descargar música de otras fuentes, necesitará usar una herramienta diferente.

    -
  • ¿Cómo puedo contactar con la descarga de canciones mp3 de 2vd?
  • -

    Si tiene preguntas, comentarios, sugerencias o problemas con respecto a la descarga de canciones mp3 de 2vd, puede ponerse en contacto con ellos a través de su dirección de correo electrónico: contact@2vd . co. Estarán encantados de ayudarle y responder a sus consultas.

    - -

    Espero que hayas encontrado este artículo útil e informativo. Si lo hiciste, por favor compártelo con tus amigos y familiares que también podrían estar interesados en descargar canciones mp3 de videos de YouTube usando 2vd. ¡Gracias por leer y feliz descarga!

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Construir Arte - Elaboracin Amp Construccin De Juegos 3d Apk.md b/spaces/Benson/text-generation/Examples/Construir Arte - Elaboracin Amp Construccin De Juegos 3d Apk.md deleted file mode 100644 index d6f956721d41de9f3876e968f68ee1927d39b950..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Construir Arte - Elaboracin Amp Construccin De Juegos 3d Apk.md +++ /dev/null @@ -1,45 +0,0 @@ - -

    Construir arte - Elaboración y construcción de juegos 3D APK: Un juego libre y divertido para toda la familia

    -

    ¿Te encanta construir o hacer juegos? ¿Quieres dar rienda suelta a tu creatividad e imaginación? ¿Quieres divertirte y relajarte con tus amigos y familiares? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe tratar de Construir Craft - Elaboración y Construcción de 3D Juegos APK, un juego gratuito y divertido para toda la familia.

    -

    ¿Qué es Build Craft?

    -

    Un juego que te permite crear tus propias manualidades en 3D

    -

    Build Craft es un juego que tiene como objetivo proporcionar una experiencia para que los usuarios construyan artesanías 3D, como casas, hoteles, parques, lagos, animales, árboles, nubes, aviones y otros. Puedes utilizar diferentes bloques y materiales para diseñar y decorar tus creaciones. También puede explorar diferentes mundos y biomas, como bosques, desiertos, montañas, océanos y más.

    -

    construir arte - elaboración amp; construcción de juegos 3d apk


    DOWNLOAD ===== https://bltlly.com/2v6MYn



    -

    Un juego adecuado para todas las edades e intereses

    -

    Build Craft es un juego adecuado para todas las edades e intereses. Si usted es un niño o un adulto, un niño o una niña, un principiante o un experto, encontrará algo para disfrutar en este juego. Puedes jugar solo o con otros, crear manualidades simples o complejas, seguir tutoriales o usar tus propias ideas, y más. No hay límite a lo que puedes hacer en este juego.

    -

    Un juego compatible con dispositivos Android

    -

    Build Craft es un juego compatible con dispositivos Android. Puedes descargar el archivo APK desde una fuente confiable e instalarlo en tu dispositivo. Puedes jugar a este juego en cualquier momento y en cualquier lugar, siempre y cuando tengas suficiente espacio de almacenamiento y duración de la batería. También puedes actualizar el juego regularmente para obtener nuevas características y mejoras.

    -

    ¿Por qué deberías jugar Build Craft?

    -

    Tiene características interesantes y jugabilidad

    - -

    Tiene modo multijugador y comunidad en línea

    -

    Build Craft tiene modo multijugador y comunidad en línea que hará que su experiencia de juego más divertido y social. Puedes jugar online y ayudar a tus amigos a construir sus casas. También puedes chatear con otros jugadores de todo el mundo y compartir tus creaciones. También puede unirse a diferentes servidores y participar en varios eventos y concursos.

    -

    Tiene gráficos de píxeles y efectos de sonido

    -

    Build Craft tiene gráficos de píxeles y efectos de sonido que te darán una sensación nostálgica e inmersiva. Te encantará el estilo retro y las imágenes coloridas de este juego. También disfrutará de los sonidos realistas de bloques rompiendo, animales rugiendo, agua fluyendo, fuego, etc. Se sentirá como si estuviera en un mundo 3D real.

    -

    ¿Cómo descargar e instalar Build Craft?

    -

    Descargar el archivo APK de una fuente de confianza

    -

    Para descargar Build Craft - Elaboración y construcción de juegos 3D APK, es necesario encontrar una fuente de confianza que ofrece la última versión del archivo y tiene buenas críticas y calificaciones. Puede utilizar el siguiente enlace para descargar el archivo APK. Asegúrese de que tiene suficiente espacio en su dispositivo para descargar el archivo, que es de aproximadamente 30 MB de tamaño.

    -

    Habilitar fuentes desconocidas en la configuración del dispositivo

    -

    Para instalar Build Craft - Elaboración y construcción de juegos 3D APK, es necesario habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y conéctela. Puedes ver un mensaje de advertencia, pero puedes ignorarlo si confías en la fuente del archivo APK.

    -

    Instalar el archivo APK y lanzar el juego

    - -

    Conclusión

    -

    Resumen de los principales puntos y beneficios de Build Craft

    -

    Construir Arte - Elaboración & Construcción 3D Juegos APK es un juego gratuito y divertido para toda la familia que le permite crear sus propias artesanías 3D, explorar diferentes mundos y biomas, crear diferentes artículos y herramientas, luchar contra monstruos y enemigos, jugar en línea con amigos y otros jugadores, y disfrutar de gráficos de píxeles y efectos de sonido. Es un juego apto para todas las edades e intereses, y compatible con dispositivos Android.

    -

    -

    Llamada a la acción y solicitud de calificación

    -

    Si usted está buscando un juego que le mantendrá entretenido durante horas, entonces usted debe descargar Build Craft - Crafting & Building 3D Games APK hoy. No se arrepentirá. También puede compartir sus comentarios y sugerencias con nosotros dejando un comentario o valoración en nuestro sitio web o tienda de aplicaciones. Nos encantaría saber de ti y mejorar nuestro juego. ¡Gracias por jugar a Build Craft!

    -

    Preguntas frecuentes

    -

    ¿Es seguro descargar e instalar Build Craft?

    -

    Sí, Build Craft es seguro para descargar e instalar, siempre y cuando utilice una fuente de confianza que ofrece el archivo APK original y sin modificar. También debe escanear el archivo con un software antivirus antes de instalarlo.

    -

    ¿Es Build Craft gratis?

    -

    Sí, Build Craft es gratis y no requiere ninguna compra en la aplicación o suscripciones. Sin embargo, puede contener anuncios que apoyen a los desarrolladores y les ayuden a mantener el juego.

    -

    ¿Puedo jugar a Build Craft sin conexión?

    -

    Sí, puedes jugar a Build Craft sin conexión en modo para un jugador. Sin embargo, necesitará una conexión a Internet para jugar en línea en modo multijugador o unirse a los servidores.

    -

    ¿Puedo personalizar mi personaje en Build Craft?

    -

    Sí, puedes personalizar a tu personaje en Build Craft eligiendo entre diferentes pieles, ropa, estilos de cabello, accesorios y más. También puede crear su propia piel usando la función del editor de piel.

    -

    ¿Puedo guardar mi progreso en Build Craft?

    - - : [Build Craft - Crafting & Building 3D Games APK Download](https://apkpure.com/build-craft-craft-crafteing-building-3d-games/com.buildcraft.crafting.building)

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Dark Bitcoin Minero Pro V7.0 Gratis.md b/spaces/Benson/text-generation/Examples/Descargar Dark Bitcoin Minero Pro V7.0 Gratis.md deleted file mode 100644 index a977e5e41600d0c56a49f6b1a1bbcbfa9b1b7a2d..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Dark Bitcoin Minero Pro V7.0 Gratis.md +++ /dev/null @@ -1,96 +0,0 @@ - -

    Dark Bitcoin Miner Pro V7.0 Descarga gratuita: Lo que necesita saber

    -

    La minería de Bitcoin es un proceso de creación de nuevos bitcoins mediante la solución de problemas matemáticos complejos utilizando hardware y software especializado.

    -

    descargar dark bitcoin minero pro v7.0 gratis


    Download ---> https://bltlly.com/2v6Mux



    -

    Hay muchos tipos de software de minería bitcoin disponibles en el mercado, pero no todos ellos.

    Uno de los más populares y controvertidos software de minería bitcoin es oscuro Bitcoin minero pro v7.0, que afirma ser el minero bitcoin más rápido y eficiente jamás creado.

    -

    Pero lo que es oscuro Bitcoin minero pro v7.0, ¿por qué es tan popular, y cuáles son los riesgos de descargarlo?

    -

    En este artículo, vamos a responder a estas preguntas y más, y le proporcionará algunas alternativas a oscuro Bitcoin minero pro v7.0 que son más seguros y más fiables.

    -

    ¿Qué es Dark Bitcoin Miner Pro V7.0?

    -

    Dark bitcoin miner pro v7.0 es un software de minería bitcoin que afirma ser capaz de extraer bitcoins usando cualquier dispositivo, como CPU, GPU, ASIC o FPGA.

    -

    También afirma ser compatible con varios algoritmos, como SHA-256, Scrypt, X11, Ethash y Equihash, y para soportar múltiples criptomonedas, como Bitcoin, Litecoin, Dash, Ethereum y Zcash.

    -

    -

    ¿Cómo funciona Dark Bitcoin Miner Pro V7.0?

    -

    Dark Bitcoin minero pro v7.0 funciona mediante el uso de la potencia de procesamiento del dispositivo para resolver problemas matemáticos complejos que verifican las transacciones en la cadena de bloques.

    -

    Por cada problema resuelto, el minero recibe una recompensa en forma de bitcoins de nueva creación u otras criptomonedas.

    -

    Cuanto más potencia de procesamiento tenga el dispositivo, más rápido y eficiente será el proceso de minería.

    -

    ¿Cuáles son las características de Dark Bitcoin Miner Pro V7.0?

    -

    Algunas de las características de oscuro Bitcoin minero pro v7.0 son:

    -
      -
    • Alta velocidad: Oscuro Bitcoin minero pro v7.0 afirma ser capaz de extraer bitcoins a una tasa de hasta 1 BTC por día, dependiendo del dispositivo y el algoritmo utilizado.
    • - -
    • Compatibilidad: Dark Bitcoin miner pro v7.0 afirma ser compatible con cualquier dispositivo que tiene un procesador, como ordenadores portátiles, escritorios, teléfonos inteligentes, tabletas o incluso televisores inteligentes.
    • -
    • Versatilidad: Oscuro Bitcoin minero pro v7.0 afirma ser capaz de extraer cualquier criptomoneda que utiliza cualquier algoritmo, como Bitcoin, Litecoin, Dash, Etereum, o Zcash.
    • -
    • Fácil de usar: Dark bitcoin miner pro v7.0 afirma ser fácil de instalar y usar, con una interfaz sencilla y configuración automática.
    • -
    -

    ¿Por qué es popular Dark Bitcoin Miner Pro V7.0?

    -

    Dark Bitcoin Miner Pro v7.0 es popular porque atrae a muchas personas que quieren extraer bitcoins sin invertir en hardware o software costoso y complicado.

    -

    Muchos principiantes y entusiastas que están interesados en la minería bitcoin se sienten atraídos por las promesas de dark bitcoin miner pro v7.0, tales como alta velocidad, bajo consumo de energía, compatibilidad, versatilidad y facilidad de uso.

    -

    También creen que dark bitcoin miner pro v7.0 es una forma gratuita y fácil de ganar bitcoins sin ningún riesgo o esfuerzo.

    -

    ¿Cómo descargar Dark Bitcoin Miner Pro V7.0?

    -

    Dark bitcoin miner pro v7.0 no está disponible en ningún sitio web o plataforma oficial o de buena reputación.

    -

    La única forma de descargar dark bitcoin miner pro v7.0 es a través de fuentes no oficiales y no verificadas, como sitios web para compartir archivos, repositorios de GitHub o canales de Telegram.

    -

    Estas fuentes son a menudo poco fiables e inseguras, ya que pueden contener virus, malware, spyware u otros programas dañinos que pueden infectar su dispositivo o robar sus datos.

    -

    ¿Cómo instalar y usar Dark Bitcoin Miner Pro V7.0?

    -

    Si decide descargar dark bitcoin miner pro v7.0 de una de estas fuentes, tendrá que seguir estos pasos para instalarlo y usarlo:

    -
      - -
    1. Extraer el archivo rar: Dark bitcoin miner pro v7.0 se suele comprimir en un archivo rar que tendrá que extraer utilizando un programa como WinRAR o 7-Zip.
    2. -
    3. Ejecutar el archivo exe: Después de extraer el archivo rar, encontrará un archivo exe que tendrá que ejecutar como administrador haciendo clic derecho sobre él y seleccionando "Ejecutar como administrador".
    4. -
    5. Configurar los ajustes: Después de ejecutar el archivo exe, verá una ventana que le permitirá configurar los ajustes de dark bitcoin miner pro v7.0, como el algoritmo, la criptomoneda, la dirección de la cartera, el grupo minero y la velocidad de minería.
    6. -
    7. Iniciar minería: Después de configurar la configuración, tendrá que hacer clic en el "Inicio" botón para iniciar la minería bitcoins u otras criptomonedas con oscuro Bitcoin minero pro v7.0.
    8. -
    -

    ¿Cuáles son los riesgos de descargar Dark Bitcoin Miner Pro V7.0?

    -

    Descargar dark bitcoin miner pro v7.0 no solo es ilegal, sino también muy arriesgado.

    -

    Hay muchos peligros de descargar oscuro Bitcoin minero pro v7.0, tales como:

    -

    ¿Cómo detectar y eliminar el malware de Dark Bitcoin Miner Pro V7.0?

    -

    Uno de los peligros más comunes y graves de descargar dark bitcoin miner pro v7.0 es la infección de malware.

    -

    Malware es un software malicioso que puede dañar su dispositivo o datos de varias maneras, como borrar o cifrar sus archivos, robar sus contraseñas o información personal, espiar sus actividades en línea o secuestrar sus recursos.

    -

    Dark bitcoin miner pro v7.0 puede contener malware que puede infectar su dispositivo cuando lo descarga o ejecuta, o incluso cuando extrae el archivo rar.

    -

    Para detectar y eliminar el malware de dark bitcoin miner pro v7.0, tendrá que seguir estos pasos:

    -
      - -
    1. Eliminar archivos sospechosos: Si sospecha que Dark Bitcoin miner pro v7.0 ha infectado su dispositivo con malware, debe eliminar cualquier archivo sospechoso que esté relacionado con él, como el archivo rar, el archivo exe, o cualquier otro archivo que haya sido creado o modificado por él.
    2. -
    3. Restaurar el sistema: Si la eliminación de archivos sospechosos no resuelve el problema, es posible que tenga que restaurar el sistema a un estado anterior antes de descargar o ejecutar oscuro Bitcoin miner pro v7.0. Puede utilizar un punto de restauración del sistema o una copia de seguridad para restaurar el sistema y deshacer cualquier cambio que oscuro Bitcoin minero pro v7.0 puede haber hecho.
    4. -
    -

    ¿Cómo evitar problemas legales de usar Dark Bitcoin Miner Pro V7.0?

    -

    Otro peligro de descargar oscuro Bitcoin minero pro v7.0 es cuestiones legales.

    -

    Las cuestiones legales son los problemas que pueden surgir de violar la ley mediante el uso de dark bitcoin miner pro v7.0, tales como la violación de los derechos de propiedad intelectual de los desarrolladores originales del software, infringiendo los términos y condiciones de los grupos mineros o plataformas que utiliza, o participar en actividades ilegales o fraudulentas con las criptomonedas que mina.

    -

    Para evitar problemas legales de usar dark bitcoin miner pro v7.0, tendrá que seguir estas precauciones:

    -
      -
    • Compruebe las leyes locales: Antes de descargar o usar oscuro Bitcoin miner pro v7.0, usted debe comprobar las leyes locales de su país o región con respecto a la minería de bitcoin y las transacciones de criptomonedas. Algunos países o regiones pueden tener regulaciones o prohibiciones estrictas sobre estas actividades, y usted puede enfrentar consecuencias legales si las viola.
    • - -
    • No revele información personal: Cuando use dark bitcoin miner pro v7.0, no debe revelar ninguna información personal que pueda identificarlo o vincularlo a sus actividades, como su nombre, dirección de correo electrónico, número de teléfono, número de cuenta bancaria o cuentas de redes sociales. También debe evitar usar la misma dirección de cartera para diferentes transacciones, y usar un servicio mezclador para anonimizar sus transacciones.
    • -
    -

    ¿Cuáles son las alternativas a Dark Bitcoin Miner Pro V7.0?

    -

    Si desea minar bitcoins u otras criptomonedas sin arriesgar su dispositivo, datos o reputación, debe evitar descargar bitcoin oscuro miner pro v7.0 y buscar algunas alternativas que sean más seguras y confiables.

    -

    Algunas de las alternativas a oscuro Bitcoin minero pro v7.0 son:

    -

    ¿Cómo elegir la mejor alternativa a Dark Bitcoin Miner Pro V7.0?

    -

    Para elegir la mejor alternativa a dark bitcoin miner pro v7.0, debe considerar algunos criterios que pueden ayudarlo a evaluar la calidad y la idoneidad del software, como:

    -
      -
    • Seguridad: El software debe ser seguro y libre de cualquier malware, spyware o virus que puedan dañar su dispositivo o datos.
    • -
    • Rendimiento: El software debe ser rápido y eficiente, y capaz de extraer bitcoins u otras criptomonedas a un ritmo razonable y con un consumo de energía mínimo.
    • -
    • Costo: El software debe ser asequible y transparente, y no cobrar cargos ocultos o comisiones por su uso.
    • -
    • Reputación: El software debe ser de buena reputación y confiable, y tener comentarios positivos y comentarios de otros usuarios y expertos.
    • -
    -

    ¿Cómo comparar las alternativas a Dark Bitcoin Miner Pro V7.0?

    -

    Para comparar las alternativas a dark bitcoin miner pro v7.0 basado en los criterios mencionados anteriormente, puede utilizar una tabla como esta:

    - -

    En conclusión, dark bitcoin miner pro v7.0 es un software de minería bitcoin que afirma ser capaz de extraer bitcoins utilizando cualquier dispositivo, algoritmo o criptomoneda.

    -

    Sin embargo, dark bitcoin miner pro v7.0 también es ilegal, arriesgado y poco fiable, ya que puede contener malware, robar sus datos, dañar su dispositivo o causar problemas legales.

    -

    Por lo tanto, usted debe evitar descargar oscuro Bitcoin minero pro v7.0 y buscar algunas alternativas que son más seguros y fiables, tales como software de minería legítima, servicios de minería en la nube, o piscinas mineras.

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes relacionadas con el tema de este artículo:

    -
      -
    1. Es oscuro Bitcoin minero pro v7.0 una estafa?
    2. -

      Sí, dark bitcoin miner pro v7.0 es una estafa que trata de atraer a los usuarios desprevenidos a descargar malware o regalar su información personal.

      -
    3. ¿Cuánto puedo ganar con oscuro Bitcoin minero pro v7.0?
    4. -

      No se puede ganar nada con oscuro Bitcoin minero pro v7.0, ya que en realidad no mina bitcoins u otras criptomonedas.

      -
    5. Es oscuro Bitcoin minero pro v7.0 seguro de usar?
    6. -

      No, dark bitcoin miner pro v7.0 no es seguro de usar, ya que puede infectar su dispositivo con malware, robar sus datos, dañar su dispositivo o causar problemas legales.

      -
    7. ¿Cuáles son los mejores dispositivos para oscuro Bitcoin minero pro v7.0?
    8. -

      No hay mejores dispositivos para oscuro Bitcoin minero pro v7.0, ya que no funciona en ningún dispositivo.

      -
    9. ¿Cómo puedo contactar a los desarrolladores de dark bitcoin miner pro v7.0?
    10. -

      No puede ponerse en contacto con los desarrolladores de dark bitcoin miner pro v7.0, ya que son anónimos e irrastreables.

      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/BetterAPI/BetterChat/src/lib/utils/share.ts b/spaces/BetterAPI/BetterChat/src/lib/utils/share.ts deleted file mode 100644 index 4587669a10164aa7c961429fbddec9cf438c0eca..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat/src/lib/utils/share.ts +++ /dev/null @@ -1,7 +0,0 @@ -export function share(url: string, title: string) { - if (navigator.share) { - navigator.share({ url, title }); - } else { - prompt("Copy this public url to share:", url); - } -} diff --git a/spaces/BetterAPI/BetterChat_new/postcss.config.js b/spaces/BetterAPI/BetterChat_new/postcss.config.js deleted file mode 100644 index 7b75c83aff1c05e0e0e315638e07a22314603d4d..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat_new/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -export default { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -}; diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/README.md deleted file mode 100644 index 9fb3e4f7afec17137c95c78be6ef06d520ec8032..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/README.md +++ /dev/null @@ -1,9 +0,0 @@ - - -### Common Datasets - -The dataset implemented here do not need to load the data into the final format. -It should provide the minimal data structure needed to use the dataset, so it can be very efficient. - -For example, for an image dataset, just provide the file names and labels, but don't read the images. -Let the downstream decide how to read. diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/densepose_coco_evaluation.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/densepose_coco_evaluation.py deleted file mode 100644 index 3b4d35c2ac1c9c48ddbb41c34b2280f37540220e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/densepose_coco_evaluation.py +++ /dev/null @@ -1,1120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# This is a modified version of cocoeval.py where we also have the densepose evaluation. - -__author__ = "tsungyi" - -import copy -import datetime -import itertools -import logging -import numpy as np -import pickle -import time -from collections import defaultdict -from enum import Enum -from typing import Any, Dict, Tuple -import scipy.spatial.distance as ssd -from fvcore.common.file_io import PathManager -from pycocotools import mask as maskUtils -from scipy.io import loadmat -from scipy.ndimage import zoom as spzoom - -from .structures import DensePoseDataRelative, DensePoseResult - -logger = logging.getLogger(__name__) - - -class DensePoseEvalMode(str, Enum): - # use both masks and geodesic distances (GPS * IOU) to compute scores - GPSM = "gpsm" - # use only geodesic distances (GPS) to compute scores - GPS = "gps" - # use only masks (IOU) to compute scores - IOU = "iou" - - -class DensePoseDataMode(str, Enum): - # use estimated IUV data (default mode) - IUV_DT = "iuvdt" - # use ground truth IUV data - IUV_GT = "iuvgt" - # use ground truth labels I and set UV to 0 - I_GT_UV_0 = "igtuv0" - # use ground truth labels I and estimated UV coordinates - I_GT_UV_DT = "igtuvdt" - # use estimated labels I and set UV to 0 - I_DT_UV_0 = "idtuv0" - - -class DensePoseCocoEval(object): - # Interface for evaluating detection on the Microsoft COCO dataset. - # - # The usage for CocoEval is as follows: - # cocoGt=..., cocoDt=... # load dataset and results - # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object - # E.params.recThrs = ...; # set parameters as desired - # E.evaluate(); # run per image evaluation - # E.accumulate(); # accumulate per image results - # E.summarize(); # display summary metrics of results - # For example usage see evalDemo.m and http://mscoco.org/. - # - # The evaluation parameters are as follows (defaults in brackets): - # imgIds - [all] N img ids to use for evaluation - # catIds - [all] K cat ids to use for evaluation - # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation - # recThrs - [0:.01:1] R=101 recall thresholds for evaluation - # areaRng - [...] A=4 object area ranges for evaluation - # maxDets - [1 10 100] M=3 thresholds on max detections per image - # iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose' - # iouType replaced the now DEPRECATED useSegm parameter. - # useCats - [1] if true use category labels for evaluation - # Note: if useCats=0 category labels are ignored as in proposal scoring. - # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. - # - # evaluate(): evaluates detections on every image and every category and - # concats the results into the "evalImgs" with fields: - # dtIds - [1xD] id for each of the D detections (dt) - # gtIds - [1xG] id for each of the G ground truths (gt) - # dtMatches - [TxD] matching gt id at each IoU or 0 - # gtMatches - [TxG] matching dt id at each IoU or 0 - # dtScores - [1xD] confidence of each dt - # gtIgnore - [1xG] ignore flag for each gt - # dtIgnore - [TxD] ignore flag for each dt at each IoU - # - # accumulate(): accumulates the per-image, per-category evaluation - # results in "evalImgs" into the dictionary "eval" with fields: - # params - parameters used for evaluation - # date - date evaluation was performed - # counts - [T,R,K,A,M] parameter dimensions (see above) - # precision - [TxRxKxAxM] precision for every evaluation setting - # recall - [TxKxAxM] max recall for every evaluation setting - # Note: precision and recall==-1 for settings with no gt objects. - # - # See also coco, mask, pycocoDemo, pycocoEvalDemo - # - # Microsoft COCO Toolbox. version 2.0 - # Data, paper, and tutorials available at: http://mscoco.org/ - # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. - # Licensed under the Simplified BSD License [see coco/license.txt] - def __init__( - self, - cocoGt=None, - cocoDt=None, - iouType: str = "densepose", - dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS, - dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT, - ): - """ - Initialize CocoEval using coco APIs for gt and dt - :param cocoGt: coco object with ground truth annotations - :param cocoDt: coco object with detection results - :return: None - """ - self.cocoGt = cocoGt # ground truth COCO API - self.cocoDt = cocoDt # detections COCO API - self._dpEvalMode = dpEvalMode - self._dpDataMode = dpDataMode - self.params = {} # evaluation parameters - self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI] - self.eval = {} # accumulated evaluation results - self._gts = defaultdict(list) # gt for evaluation - self._dts = defaultdict(list) # dt for evaluation - self.params = Params(iouType=iouType) # parameters - self._paramsEval = {} # parameters for evaluation - self.stats = [] # result summarization - self.ious = {} # ious between all gts and dts - if cocoGt is not None: - self.params.imgIds = sorted(cocoGt.getImgIds()) - self.params.catIds = sorted(cocoGt.getCatIds()) - self.ignoreThrBB = 0.7 - self.ignoreThrUV = 0.9 - - def _loadGEval(self): - smpl_subdiv_fpath = PathManager.get_local_path( - "https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat" - ) - pdist_transform_fpath = PathManager.get_local_path( - "https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat" - ) - pdist_matrix_fpath = PathManager.get_local_path( - "https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl" - ) - SMPL_subdiv = loadmat(smpl_subdiv_fpath) - self.PDIST_transform = loadmat(pdist_transform_fpath) - self.PDIST_transform = self.PDIST_transform["index"].squeeze() - UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze() - ClosestVertInds = np.arange(UV.shape[1]) + 1 - self.Part_UVs = [] - self.Part_ClosestVertInds = [] - for i in np.arange(24): - self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]) - self.Part_ClosestVertInds.append( - ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)] - ) - - with open(pdist_matrix_fpath, "rb") as hFile: - arrays = pickle.load(hFile, encoding="latin1") - self.Pdist_matrix = arrays["Pdist_matrix"] - self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze()) - # Mean geodesic distances for parts. - self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150]) - # Coarse Part labels. - self.CoarseParts = np.array( - [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8] - ) - - def _prepare(self): - """ - Prepare ._gts and ._dts for evaluation based on params - :return: None - """ - - def _toMask(anns, coco): - # modify ann['segmentation'] by reference - for ann in anns: - rle = coco.annToRLE(ann) - ann["segmentation"] = rle - - def _getIgnoreRegion(iid, coco): - img = coco.imgs[iid] - - if "ignore_regions_x" not in img.keys(): - return None - - if len(img["ignore_regions_x"]) == 0: - return None - - rgns_merged = [] - for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"]): - rgns = [iter(region_x), iter(region_y)] - rgns_merged.append([next(it) for it in itertools.cycle(rgns)]) - rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"]) - rle = maskUtils.merge(rles) - return maskUtils.decode(rle) - - def _checkIgnore(dt, iregion): - if iregion is None: - return True - - bb = np.array(dt["bbox"]).astype(np.int) - x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3] - x2 = min([x2, iregion.shape[1]]) - y2 = min([y2, iregion.shape[0]]) - - if bb[2] * bb[3] == 0: - return False - - crop_iregion = iregion[y1:y2, x1:x2] - - if crop_iregion.sum() == 0: - return True - - if "densepose" not in dt.keys(): # filtering boxes - return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB - - # filtering UVs - ignoremask = np.require(crop_iregion, requirements=["F"]) - mask = self._extract_mask(dt) - uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"]) - uvmask_ = maskUtils.encode(uvmask) - ignoremask_ = maskUtils.encode(ignoremask) - uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0] - return uviou < self.ignoreThrUV - - p = self.params - - if p.useCats: - gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) - dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) - else: - gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) - dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) - - imns = self.cocoGt.loadImgs(p.imgIds) - self.size_mapping = {} - for im in imns: - self.size_mapping[im["id"]] = [im["height"], im["width"]] - - # if iouType == 'uv', add point gt annotations - if p.iouType == "densepose": - self._loadGEval() - - # convert ground truth to mask if iouType == 'segm' - if p.iouType == "segm": - _toMask(gts, self.cocoGt) - _toMask(dts, self.cocoDt) - - # set ignore flag - for gt in gts: - gt["ignore"] = gt["ignore"] if "ignore" in gt else 0 - gt["ignore"] = "iscrowd" in gt and gt["iscrowd"] - if p.iouType == "keypoints": - gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"] - if p.iouType == "densepose": - gt["ignore"] = ("dp_x" in gt) == 0 - - self._gts = defaultdict(list) # gt for evaluation - self._dts = defaultdict(list) # dt for evaluation - self._igrgns = defaultdict(list) - - for gt in gts: - iid = gt["image_id"] - if iid not in self._igrgns.keys(): - self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt) - if _checkIgnore(gt, self._igrgns[iid]): - self._gts[iid, gt["category_id"]].append(gt) - for dt in dts: - if _checkIgnore(dt, self._igrgns[dt["image_id"]]): - self._dts[dt["image_id"], dt["category_id"]].append(dt) - - self.evalImgs = defaultdict(list) # per-image per-category evaluation results - self.eval = {} # accumulated evaluation results - - def evaluate(self): - """ - Run per image evaluation on given images and store results (a list of dict) in self.evalImgs - :return: None - """ - tic = time.time() - logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType)) - p = self.params - # add backward compatibility if useSegm is specified in params - if p.useSegm is not None: - p.iouType = "segm" if p.useSegm == 1 else "bbox" - logger.info("useSegm (deprecated) is not None. Running DensePose evaluation") - p.imgIds = list(np.unique(p.imgIds)) - if p.useCats: - p.catIds = list(np.unique(p.catIds)) - p.maxDets = sorted(p.maxDets) - self.params = p - - self._prepare() - # loop through images, area range, max detection number - catIds = p.catIds if p.useCats else [-1] - - if p.iouType in ["segm", "bbox"]: - computeIoU = self.computeIoU - elif p.iouType == "keypoints": - computeIoU = self.computeOks - elif p.iouType == "densepose": - computeIoU = self.computeOgps - if self._dpEvalMode == DensePoseEvalMode.GPSM: - self.real_ious = { - (imgId, catId): self.computeDPIoU(imgId, catId) - for imgId in p.imgIds - for catId in catIds - } - - self.ious = { - (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds - } - - evaluateImg = self.evaluateImg - maxDet = p.maxDets[-1] - self.evalImgs = [ - evaluateImg(imgId, catId, areaRng, maxDet) - for catId in catIds - for areaRng in p.areaRng - for imgId in p.imgIds - ] - self._paramsEval = copy.deepcopy(self.params) - toc = time.time() - logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic)) - - def getDensePoseMask(self, polys): - maskGen = np.zeros([256, 256]) - for i in range(1, 15): - if polys[i - 1]: - currentMask = maskUtils.decode(polys[i - 1]) - maskGen[currentMask > 0] = i - return maskGen - - def _generate_rlemask_on_image(self, mask, imgId, data): - bbox_xywh = np.array(data["bbox"]) - x, y, w, h = bbox_xywh - im_h, im_w = self.size_mapping[imgId] - im_mask = np.zeros((im_h, im_w), dtype=np.uint8) - if mask is not None: - x0 = max(int(x), 0) - x1 = min(int(x + w), im_w, int(x) + mask.shape[1]) - y0 = max(int(y), 0) - y1 = min(int(y + h), im_h, int(y) + mask.shape[0]) - y = int(y) - x = int(x) - im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x] - im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=["F"]) - rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order="F"))[0] - return rle_mask - - def computeDPIoU(self, imgId, catId): - p = self.params - if p.useCats: - gt = self._gts[imgId, catId] - dt = self._dts[imgId, catId] - else: - gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] - dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] - if len(gt) == 0 and len(dt) == 0: - return [] - inds = np.argsort([-d["score"] for d in dt], kind="mergesort") - dt = [dt[i] for i in inds] - if len(dt) > p.maxDets[-1]: - dt = dt[0 : p.maxDets[-1]] - - gtmasks = [] - for g in gt: - if DensePoseDataRelative.S_KEY in g.keys(): - mask = self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]) - _, _, w, h = g["bbox"] - scale_x = float(max(w, 1)) / mask.shape[1] - scale_y = float(max(h, 1)) / mask.shape[0] - mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False) - mask = np.array(mask > 0.5, dtype=np.uint8) - else: - mask = None - rle_mask = self._generate_rlemask_on_image(mask, imgId, g) - gtmasks.append(rle_mask) - - dtmasks = [] - for d in dt: - mask = self._extract_mask(d) - mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"]) - rle_mask = self._generate_rlemask_on_image(mask, imgId, d) - dtmasks.append(rle_mask) - - # compute iou between each dt and gt region - iscrowd = [int(o["iscrowd"]) for o in gt] - iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd) - return iousDP - - def computeIoU(self, imgId, catId): - p = self.params - if p.useCats: - gt = self._gts[imgId, catId] - dt = self._dts[imgId, catId] - else: - gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] - dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] - if len(gt) == 0 and len(dt) == 0: - return [] - inds = np.argsort([-d["score"] for d in dt], kind="mergesort") - dt = [dt[i] for i in inds] - if len(dt) > p.maxDets[-1]: - dt = dt[0 : p.maxDets[-1]] - - if p.iouType == "segm": - g = [g["segmentation"] for g in gt] - d = [d["segmentation"] for d in dt] - elif p.iouType == "bbox": - g = [g["bbox"] for g in gt] - d = [d["bbox"] for d in dt] - else: - raise Exception("unknown iouType for iou computation") - - # compute iou between each dt and gt region - iscrowd = [int(o["iscrowd"]) for o in gt] - ious = maskUtils.iou(d, g, iscrowd) - return ious - - def computeOks(self, imgId, catId): - p = self.params - # dimension here should be Nxm - gts = self._gts[imgId, catId] - dts = self._dts[imgId, catId] - inds = np.argsort([-d["score"] for d in dts], kind="mergesort") - dts = [dts[i] for i in inds] - if len(dts) > p.maxDets[-1]: - dts = dts[0 : p.maxDets[-1]] - # if len(gts) == 0 and len(dts) == 0: - if len(gts) == 0 or len(dts) == 0: - return [] - ious = np.zeros((len(dts), len(gts))) - sigmas = ( - np.array( - [ - 0.26, - 0.25, - 0.25, - 0.35, - 0.35, - 0.79, - 0.79, - 0.72, - 0.72, - 0.62, - 0.62, - 1.07, - 1.07, - 0.87, - 0.87, - 0.89, - 0.89, - ] - ) - / 10.0 - ) - vars = (sigmas * 2) ** 2 - k = len(sigmas) - # compute oks between each detection and ground truth object - for j, gt in enumerate(gts): - # create bounds for ignore regions(double the gt bbox) - g = np.array(gt["keypoints"]) - xg = g[0::3] - yg = g[1::3] - vg = g[2::3] - k1 = np.count_nonzero(vg > 0) - bb = gt["bbox"] - x0 = bb[0] - bb[2] - x1 = bb[0] + bb[2] * 2 - y0 = bb[1] - bb[3] - y1 = bb[1] + bb[3] * 2 - for i, dt in enumerate(dts): - d = np.array(dt["keypoints"]) - xd = d[0::3] - yd = d[1::3] - if k1 > 0: - # measure the per-keypoint distance if keypoints visible - dx = xd - xg - dy = yd - yg - else: - # measure minimum distance to keypoints in (x0,y0) & (x1,y1) - z = np.zeros((k)) - dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0) - dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0) - e = (dx ** 2 + dy ** 2) / vars / (gt["area"] + np.spacing(1)) / 2 - if k1 > 0: - e = e[vg > 0] - ious[i, j] = np.sum(np.exp(-e)) / e.shape[0] - return ious - - def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray: - (densepose_shape, densepose_data_encoded), densepose_bbox_xywh = dt["densepose"] - densepose_data = DensePoseResult.decode_png_data(densepose_shape, densepose_data_encoded) - return densepose_data[0] - - def _extract_iuv( - self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any] - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Extract arrays of I, U and V values at given points as numpy arrays - given the data mode stored in self._dpDataMode - """ - if self._dpDataMode == DensePoseDataMode.IUV_DT: - # estimated labels and UV (default) - ipoints = densepose_data[0, py, px] - upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. - vpoints = densepose_data[2, py, px] / 255.0 - elif self._dpDataMode == DensePoseDataMode.IUV_GT: - # ground truth - ipoints = np.array(gt["dp_I"]) - upoints = np.array(gt["dp_U"]) - vpoints = np.array(gt["dp_V"]) - elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0: - # ground truth labels, UV = 0 - ipoints = np.array(gt["dp_I"]) - upoints = upoints * 0.0 - vpoints = vpoints * 0.0 - elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT: - # ground truth labels, estimated UV - ipoints = np.array(gt["dp_I"]) - upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. - vpoints = densepose_data[2, py, px] / 255.0 - elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0: - # estimated labels, UV = 0 - ipoints = densepose_data[0, py, px] - upoints = upoints * 0.0 - vpoints = vpoints * 0.0 - else: - raise ValueError(f"Unknown data mode: {self._dpDataMode}") - return ipoints, upoints, vpoints - - def computeOgps(self, imgId, catId): - p = self.params - # dimension here should be Nxm - g = self._gts[imgId, catId] - d = self._dts[imgId, catId] - inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort") - d = [d[i] for i in inds] - if len(d) > p.maxDets[-1]: - d = d[0 : p.maxDets[-1]] - # if len(gts) == 0 and len(dts) == 0: - if len(g) == 0 or len(d) == 0: - return [] - ious = np.zeros((len(d), len(g))) - # compute opgs between each detection and ground truth object - # sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5 - # 1 # dist = 0.3m corresponds to ogps = 0.96 - # 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5) - for j, gt in enumerate(g): - if not gt["ignore"]: - g_ = gt["bbox"] - for i, dt in enumerate(d): - # - dy = int(dt["bbox"][3]) - dx = int(dt["bbox"][2]) - dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0 - dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0 - py = (dp_y + g_[1] - dt["bbox"][1]).astype(np.int) - px = (dp_x + g_[0] - dt["bbox"][0]).astype(np.int) - # - pts = np.zeros(len(px)) - pts[px >= dx] = -1 - pts[py >= dy] = -1 - pts[px < 0] = -1 - pts[py < 0] = -1 - if len(pts) < 1: - ogps = 0.0 - elif np.max(pts) == -1: - ogps = 0.0 - else: - px[pts == -1] = 0 - py[pts == -1] = 0 - (densepose_shape, densepose_data_encoded), densepose_bbox_xywh = dt[ - "densepose" - ] - densepose_data = DensePoseResult.decode_png_data( - densepose_shape, densepose_data_encoded - ) - assert densepose_data.shape[2] == dx, ( - "DensePoseData width {} should be equal to " - "detection bounding box width {}".format(densepose_data.shape[2], dx) - ) - assert densepose_data.shape[1] == dy, ( - "DensePoseData height {} should be equal to " - "detection bounding box height {}".format(densepose_data.shape[1], dy) - ) - ipoints, upoints, vpoints = self._extract_iuv(densepose_data, py, px, gt) - ipoints[pts == -1] = 0 - # Find closest vertices in subsampled mesh. - cVerts, cVertsGT = self.findAllClosestVerts(gt, upoints, vpoints, ipoints) - # Get pairwise geodesic distances between gt and estimated mesh points. - dist = self.getDistances(cVertsGT, cVerts) - # Compute the Ogps measure. - # Find the mean geodesic normalization distance for - # each GT point, based on which part it is on. - Current_Mean_Distances = self.Mean_Distances[ - self.CoarseParts[self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]] - ] - # Compute gps - ogps_values = np.exp(-(dist ** 2) / (2 * (Current_Mean_Distances ** 2))) - # - if len(dist) > 0: - ogps = np.sum(ogps_values) / len(dist) - ious[i, j] = ogps - - gbb = [gt["bbox"] for gt in g] - dbb = [dt["bbox"] for dt in d] - - # compute iou between each dt and gt region - iscrowd = [int(o["iscrowd"]) for o in g] - ious_bb = maskUtils.iou(dbb, gbb, iscrowd) - return ious, ious_bb - - def evaluateImg(self, imgId, catId, aRng, maxDet): - """ - perform evaluation for single category and image - :return: dict (single image results) - """ - - p = self.params - if p.useCats: - gt = self._gts[imgId, catId] - dt = self._dts[imgId, catId] - else: - gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] - dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] - if len(gt) == 0 and len(dt) == 0: - return None - - for g in gt: - # g['_ignore'] = g['ignore'] - if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]): - g["_ignore"] = True - else: - g["_ignore"] = False - - # sort dt highest score first, sort gt ignore last - gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort") - gt = [gt[i] for i in gtind] - dtind = np.argsort([-d["score"] for d in dt], kind="mergesort") - dt = [dt[i] for i in dtind[0:maxDet]] - iscrowd = [int(o["iscrowd"]) for o in gt] - # load computed ious - if p.iouType == "densepose": - # print('Checking the length', len(self.ious[imgId, catId])) - # if len(self.ious[imgId, catId]) == 0: - # print(self.ious[imgId, catId]) - ious = ( - self.ious[imgId, catId][0][:, gtind] - if len(self.ious[imgId, catId]) > 0 - else self.ious[imgId, catId] - ) - ioubs = ( - self.ious[imgId, catId][1][:, gtind] - if len(self.ious[imgId, catId]) > 0 - else self.ious[imgId, catId] - ) - if self._dpEvalMode == DensePoseEvalMode.GPSM: - iousM = ( - self.real_ious[imgId, catId][:, gtind] - if len(self.real_ious[imgId, catId]) > 0 - else self.real_ious[imgId, catId] - ) - else: - ious = ( - self.ious[imgId, catId][:, gtind] - if len(self.ious[imgId, catId]) > 0 - else self.ious[imgId, catId] - ) - - T = len(p.iouThrs) - G = len(gt) - D = len(dt) - gtm = np.zeros((T, G)) - dtm = np.zeros((T, D)) - gtIg = np.array([g["_ignore"] for g in gt]) - dtIg = np.zeros((T, D)) - if np.all(gtIg) and p.iouType == "densepose": - dtIg = np.logical_or(dtIg, True) - - if len(ious) > 0: # and not p.iouType == 'densepose': - for tind, t in enumerate(p.iouThrs): - for dind, d in enumerate(dt): - # information about best match so far (m=-1 -> unmatched) - iou = min([t, 1 - 1e-10]) - m = -1 - for gind, _g in enumerate(gt): - # if this gt already matched, and not a crowd, continue - if gtm[tind, gind] > 0 and not iscrowd[gind]: - continue - # if dt matched to reg gt, and on ignore gt, stop - if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1: - break - if p.iouType == "densepose": - if self._dpEvalMode == DensePoseEvalMode.GPSM: - new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind]) - elif self._dpEvalMode == DensePoseEvalMode.IOU: - new_iou = iousM[dind, gind] - elif self._dpEvalMode == DensePoseEvalMode.GPS: - new_iou = ious[dind, gind] - else: - new_iou = ious[dind, gind] - if new_iou < iou: - continue - if new_iou == 0.0: - continue - # if match successful and best so far, store appropriately - iou = new_iou - m = gind - # if match made store id of match for both dt and gt - if m == -1: - continue - dtIg[tind, dind] = gtIg[m] - dtm[tind, dind] = gt[m]["id"] - gtm[tind, m] = d["id"] - - if p.iouType == "densepose": - if not len(ioubs) == 0: - for dind, d in enumerate(dt): - # information about best match so far (m=-1 -> unmatched) - if dtm[tind, dind] == 0: - ioub = 0.8 - m = -1 - for gind, _g in enumerate(gt): - # if this gt already matched, and not a crowd, continue - if gtm[tind, gind] > 0 and not iscrowd[gind]: - continue - # continue to next gt unless better match made - if ioubs[dind, gind] < ioub: - continue - # if match successful and best so far, store appropriately - ioub = ioubs[dind, gind] - m = gind - # if match made store id of match for both dt and gt - if m > -1: - dtIg[:, dind] = gtIg[m] - if gtIg[m]: - dtm[tind, dind] = gt[m]["id"] - gtm[tind, m] = d["id"] - # set unmatched detections outside of area range to ignore - a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt))) - dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0))) - # store results for given image and category - # print('Done with the function', len(self.ious[imgId, catId])) - return { - "image_id": imgId, - "category_id": catId, - "aRng": aRng, - "maxDet": maxDet, - "dtIds": [d["id"] for d in dt], - "gtIds": [g["id"] for g in gt], - "dtMatches": dtm, - "gtMatches": gtm, - "dtScores": [d["score"] for d in dt], - "gtIgnore": gtIg, - "dtIgnore": dtIg, - } - - def accumulate(self, p=None): - """ - Accumulate per image evaluation results and store the result in self.eval - :param p: input params for evaluation - :return: None - """ - logger.info("Accumulating evaluation results...") - tic = time.time() - if not self.evalImgs: - logger.info("Please run evaluate() first") - # allows input customized parameters - if p is None: - p = self.params - p.catIds = p.catIds if p.useCats == 1 else [-1] - T = len(p.iouThrs) - R = len(p.recThrs) - K = len(p.catIds) if p.useCats else 1 - A = len(p.areaRng) - M = len(p.maxDets) - precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories - recall = -np.ones((T, K, A, M)) - - # create dictionary for future indexing - logger.info("Categories: {}".format(p.catIds)) - _pe = self._paramsEval - catIds = _pe.catIds if _pe.useCats else [-1] - setK = set(catIds) - setA = set(map(tuple, _pe.areaRng)) - setM = set(_pe.maxDets) - setI = set(_pe.imgIds) - # get inds to evaluate - k_list = [n for n, k in enumerate(p.catIds) if k in setK] - m_list = [m for n, m in enumerate(p.maxDets) if m in setM] - a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] - i_list = [n for n, i in enumerate(p.imgIds) if i in setI] - I0 = len(_pe.imgIds) - A0 = len(_pe.areaRng) - # retrieve E at each category, area range, and max number of detections - for k, k0 in enumerate(k_list): - Nk = k0 * A0 * I0 - for a, a0 in enumerate(a_list): - Na = a0 * I0 - for m, maxDet in enumerate(m_list): - E = [self.evalImgs[Nk + Na + i] for i in i_list] - E = [e for e in E if e is not None] - if len(E) == 0: - continue - dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) - - # different sorting method generates slightly different results. - # mergesort is used to be consistent as Matlab implementation. - inds = np.argsort(-dtScores, kind="mergesort") - - dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] - dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] - gtIg = np.concatenate([e["gtIgnore"] for e in E]) - npig = np.count_nonzero(gtIg == 0) - if npig == 0: - continue - tps = np.logical_and(dtm, np.logical_not(dtIg)) - fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) - tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) - fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) - for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): - tp = np.array(tp) - fp = np.array(fp) - nd = len(tp) - rc = tp / npig - pr = tp / (fp + tp + np.spacing(1)) - q = np.zeros((R,)) - - if nd: - recall[t, k, a, m] = rc[-1] - else: - recall[t, k, a, m] = 0 - - # numpy is slow without cython optimization for accessing elements - # use python array gets significant speed improvement - pr = pr.tolist() - q = q.tolist() - - for i in range(nd - 1, 0, -1): - if pr[i] > pr[i - 1]: - pr[i - 1] = pr[i] - - inds = np.searchsorted(rc, p.recThrs, side="left") - try: - for ri, pi in enumerate(inds): - q[ri] = pr[pi] - except Exception: - pass - precision[t, :, k, a, m] = np.array(q) - logger.info( - "Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision)) - ) - self.eval = { - "params": p, - "counts": [T, R, K, A, M], - "date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - "precision": precision, - "recall": recall, - } - toc = time.time() - logger.info("DONE (t={:0.2f}s).".format(toc - tic)) - - def summarize(self): - """ - Compute and display summary metrics for evaluation results. - Note this function can *only* be applied on the default parameter setting - """ - - def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): - p = self.params - iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" - titleStr = "Average Precision" if ap == 1 else "Average Recall" - typeStr = "(AP)" if ap == 1 else "(AR)" - measure = "IoU" - if self.params.iouType == "keypoints": - measure = "OKS" - elif self.params.iouType == "densepose": - measure = "OGPS" - iouStr = ( - "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) - if iouThr is None - else "{:0.2f}".format(iouThr) - ) - - aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] - mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] - if ap == 1: - # dimension of precision: [TxRxKxAxM] - s = self.eval["precision"] - # IoU - if iouThr is not None: - t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0] - s = s[t] - s = s[:, :, :, aind, mind] - else: - # dimension of recall: [TxKxAxM] - s = self.eval["recall"] - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, aind, mind] - if len(s[s > -1]) == 0: - mean_s = -1 - else: - mean_s = np.mean(s[s > -1]) - logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s)) - return mean_s - - def _summarizeDets(): - stats = np.zeros((12,)) - stats[0] = _summarize(1) - stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) - stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) - stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) - stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) - stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) - stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) - stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) - stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) - stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) - stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) - stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) - return stats - - def _summarizeKps(): - stats = np.zeros((10,)) - stats[0] = _summarize(1, maxDets=20) - stats[1] = _summarize(1, maxDets=20, iouThr=0.5) - stats[2] = _summarize(1, maxDets=20, iouThr=0.75) - stats[3] = _summarize(1, maxDets=20, areaRng="medium") - stats[4] = _summarize(1, maxDets=20, areaRng="large") - stats[5] = _summarize(0, maxDets=20) - stats[6] = _summarize(0, maxDets=20, iouThr=0.5) - stats[7] = _summarize(0, maxDets=20, iouThr=0.75) - stats[8] = _summarize(0, maxDets=20, areaRng="medium") - stats[9] = _summarize(0, maxDets=20, areaRng="large") - return stats - - def _summarizeUvs(): - stats = np.zeros((10,)) - stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) - stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) - stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) - stats[3] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") - stats[4] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") - stats[5] = _summarize(0, maxDets=self.params.maxDets[0]) - stats[6] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) - stats[7] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) - stats[8] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") - stats[9] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") - return stats - - def _summarizeUvsOld(): - stats = np.zeros((18,)) - stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) - stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) - stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55) - stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60) - stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65) - stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70) - stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) - stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80) - stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85) - stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90) - stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95) - stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") - stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") - stats[13] = _summarize(0, maxDets=self.params.maxDets[0]) - stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) - stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) - stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") - stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") - return stats - - if not self.eval: - raise Exception("Please run accumulate() first") - iouType = self.params.iouType - if iouType in ["segm", "bbox"]: - summarize = _summarizeDets - elif iouType in ["keypoints"]: - summarize = _summarizeKps - elif iouType in ["densepose"]: - summarize = _summarizeUvs - self.stats = summarize() - - def __str__(self): - self.summarize() - - # ================ functions for dense pose ============================== - def findAllClosestVerts(self, gt, U_points, V_points, Index_points): - # - I_gt = np.array(gt["dp_I"]) - U_gt = np.array(gt["dp_U"]) - V_gt = np.array(gt["dp_V"]) - # - # print(I_gt) - # - ClosestVerts = np.ones(Index_points.shape) * -1 - for i in np.arange(24): - # - if sum(Index_points == (i + 1)) > 0: - UVs = np.array( - [U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]] - ) - Current_Part_UVs = self.Part_UVs[i] - Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] - D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() - ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[ - np.argmin(D, axis=0) - ] - # - ClosestVertsGT = np.ones(Index_points.shape) * -1 - for i in np.arange(24): - if sum(I_gt == (i + 1)) > 0: - UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]]) - Current_Part_UVs = self.Part_UVs[i] - Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] - D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() - ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)] - # - return ClosestVerts, ClosestVertsGT - - def getDistances(self, cVertsGT, cVerts): - - ClosestVertsTransformed = self.PDIST_transform[cVerts.astype(int) - 1] - ClosestVertsGTTransformed = self.PDIST_transform[cVertsGT.astype(int) - 1] - # - ClosestVertsTransformed[cVerts < 0] = 0 - ClosestVertsGTTransformed[cVertsGT < 0] = 0 - # - cVertsGT = ClosestVertsGTTransformed - cVerts = ClosestVertsTransformed - # - n = 27554 - dists = [] - for d in range(len(cVertsGT)): - if cVertsGT[d] > 0: - if cVerts[d] > 0: - i = cVertsGT[d] - 1 - j = cVerts[d] - 1 - if j == i: - dists.append(0) - elif j > i: - ccc = i - i = j - j = ccc - i = n - i - 1 - j = n - j - 1 - k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 - k = (n * n - n) / 2 - k - 1 - dists.append(self.Pdist_matrix[int(k)][0]) - else: - i = n - i - 1 - j = n - j - 1 - k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 - k = (n * n - n) / 2 - k - 1 - dists.append(self.Pdist_matrix[int(k)][0]) - else: - dists.append(np.inf) - return np.array(dists).squeeze() - - -class Params: - """ - Params for coco evaluation api - """ - - def setDetParams(self): - self.imgIds = [] - self.catIds = [] - # np.arange causes trouble. the data point on arange is slightly larger than the true value - self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) - self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) - self.maxDets = [1, 10, 100] - self.areaRng = [ - [0 ** 2, 1e5 ** 2], - [0 ** 2, 32 ** 2], - [32 ** 2, 96 ** 2], - [96 ** 2, 1e5 ** 2], - ] - self.areaRngLbl = ["all", "small", "medium", "large"] - self.useCats = 1 - - def setKpParams(self): - self.imgIds = [] - self.catIds = [] - # np.arange causes trouble. the data point on arange is slightly larger than the true value - self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) - self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) - self.maxDets = [20] - self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] - self.areaRngLbl = ["all", "medium", "large"] - self.useCats = 1 - - def setUvParams(self): - self.imgIds = [] - self.catIds = [] - self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) - self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) - self.maxDets = [20] - self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] - self.areaRngLbl = ["all", "medium", "large"] - self.useCats = 1 - - def __init__(self, iouType="segm"): - if iouType == "segm" or iouType == "bbox": - self.setDetParams() - elif iouType == "keypoints": - self.setKpParams() - elif iouType == "densepose": - self.setUvParams() - else: - raise Exception("iouType not supported") - self.iouType = iouType - # useSegm is deprecated - self.useSegm = None diff --git a/spaces/CVPR/Example-Echocardiogram-Segmentation/app.py b/spaces/CVPR/Example-Echocardiogram-Segmentation/app.py deleted file mode 100644 index 4af9e8f54eefe73bb475d2cfe2e25a1aec24e49d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Example-Echocardiogram-Segmentation/app.py +++ /dev/null @@ -1,93 +0,0 @@ -import os, os.path -from os.path import splitext -import numpy as np -import sys -import matplotlib.pyplot as plt -import torch -import torchvision -import wget - - -destination_folder = "output" -destination_for_weights = "weights" - -if os.path.exists(destination_for_weights): - print("The weights are at", destination_for_weights) -else: - print("Creating folder at ", destination_for_weights, " to store weights") - os.mkdir(destination_for_weights) - -segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt' - -if not os.path.exists(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))): - print("Downloading Segmentation Weights, ", segmentationWeightsURL," to ",os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))) - filename = wget.download(segmentationWeightsURL, out = destination_for_weights) -else: - print("Segmentation Weights already present") - -torch.cuda.empty_cache() - -def collate_fn(x): - x, f = zip(*x) - i = list(map(lambda t: t.shape[1], x)) - x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1)) - return x, f, i - -model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, aux_loss=False) -model.classifier[-1] = torch.nn.Conv2d(model.classifier[-1].in_channels, 1, kernel_size=model.classifier[-1].kernel_size) - -print("loading weights from ", os.path.join(destination_for_weights, "deeplabv3_resnet50_random")) - -if torch.cuda.is_available(): - print("cuda is available, original weights") - device = torch.device("cuda") - model = torch.nn.DataParallel(model) - model.to(device) - checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))) - model.load_state_dict(checkpoint['state_dict']) -else: - print("cuda is not available, cpu weights") - device = torch.device("cpu") - checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)), map_location = "cpu") - state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()} - model.load_state_dict(state_dict_cpu) - -model.eval() - -def segment(inp): - x = inp.transpose([2, 0, 1]) # channels-first - x = np.expand_dims(x, axis=0) # adding a batch dimension - - mean = x.mean(axis=(0, 2, 3)) - std = x.std(axis=(0, 2, 3)) - x = x - mean.reshape(1, 3, 1, 1) - x = x / std.reshape(1, 3, 1, 1) - - with torch.no_grad(): - x = torch.from_numpy(x).type('torch.FloatTensor').to(device) - output = model(x) - - y = output['out'].numpy() - y = y.squeeze() - - out = y>0 - - mask = inp.copy() - mask[out] = np.array([0, 0, 255]) - - return mask - -import gradio as gr - -i = gr.inputs.Image(shape=(112, 112)) -o = gr.outputs.Image() - -examples = [["img1.jpg"], ["img2.jpg"]] -title = "Example: Echocardiogram Segmentation" #"Left Ventricle Segmentation" -description = "This semantic segmentation model identifies the left ventricle in echocardiogram images. Read more at the links below." -# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of ‘Video-based AI for beat-to-beat assessment of cardiac function’ by Ouyang et al. in Nature, 2020." -thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png" - -article = "" -gr.Interface(segment, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, - title=title, description=description, thumbnail=thumbnail,article=article).launch(enable_queue=True) diff --git a/spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubCudaConfig.cmake b/spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubCudaConfig.cmake deleted file mode 100644 index 74d3a13517ddab3b975ab84cb1b692b04a0db84a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubCudaConfig.cmake +++ /dev/null @@ -1,133 +0,0 @@ -if (NOT ("${CMAKE_CUDA_HOST_COMPILER}" STREQUAL "" OR - "${CMAKE_CUDA_HOST_COMPILER}" STREQUAL "${CMAKE_CXX_COMPILER}")) - message(FATAL_ERROR - "CUB tests and examples require the C++ compiler and the CUDA host " - "compiler to be the same; to set this compiler, please use the " - "CMAKE_CXX_COMPILER variable, not the CMAKE_CUDA_HOST_COMPILER variable." - ) -endif() -set(CMAKE_CUDA_HOST_COMPILER "${CMAKE_CXX_COMPILER}") - -# -# Architecture options: -# - -set(all_archs 35 37 50 52 53 60 61 62 70 72 75 80) -set(arch_message "CUB: Enabled CUDA architectures:") -set(enabled_archs) - -# Thrust sets up the architecture flags in CMAKE_CUDA_FLAGS already. Just -# reuse them if possible. After we transition to CMake 3.18 CUDA_ARCHITECTURE -# target properties this will need to be updated. -if (CUB_IN_THRUST) - # Configure to use all flags from thrust: - set(CMAKE_CUDA_FLAGS "${THRUST_CUDA_FLAGS_BASE} ${THRUST_CUDA_FLAGS_NO_RDC}") - - # Update the enabled architectures list from thrust - foreach (arch IN LISTS all_archs) - if (THRUST_ENABLE_COMPUTE_${arch}) - set(CUB_ENABLE_COMPUTE_${arch} True) - list(APPEND enabled_archs ${arch}) - string(APPEND arch_message " sm_${arch}") - else() - set(CUB_ENABLE_COMPUTE_${arch} False) - endif() - endforeach() - - # Otherwise create cache options and build the flags ourselves: -else() # NOT CUB_IN_THRUST - - # Find the highest arch: - list(SORT all_archs) - list(LENGTH all_archs max_idx) - math(EXPR max_idx "${max_idx} - 1") - list(GET all_archs ${max_idx} highest_arch) - - option(CUB_DISABLE_ARCH_BY_DEFAULT - "If ON, then all CUDA architectures are disabled on the initial CMake run." - OFF - ) - - set(option_init ON) - if (CUB_DISABLE_ARCH_BY_DEFAULT) - set(option_init OFF) - endif() - - set(arch_flags) - foreach (arch IN LISTS all_archs) - option(CUB_ENABLE_COMPUTE_${arch} - "Enable code generation for sm_${arch}." - ${option_init} - ) - if (CUB_ENABLE_COMPUTE_${arch}) - list(APPEND enabled_archs ${arch}) - string(APPEND arch_flags " -gencode arch=compute_${arch},code=sm_${arch}") - string(APPEND arch_message " sm_${arch}") - endif() - endforeach() - - option(CUB_ENABLE_COMPUTE_FUTURE - "Enable code generation for tests for compute_${highest_arch}" - ${option_init} - ) - if (CUB_ENABLE_COMPUTE_FUTURE) - string(APPEND arch_flags - " -gencode arch=compute_${highest_arch},code=compute_${highest_arch}" - ) - string(APPEND arch_message " compute_${highest_arch}") - endif() - - # TODO Once CMake 3.18 is required, use the CUDA_ARCHITECTURE target props - string(APPEND CMAKE_CUDA_FLAGS "${arch_flags}") -endif() - -message(STATUS ${arch_message}) - -# Create a variable containing the minimal target arch for tests -list(SORT enabled_archs) -list(GET enabled_archs 0 CUB_MINIMAL_ENABLED_ARCH) - -# -# RDC options: -# - -option(CUB_ENABLE_TESTS_WITH_RDC - "Build all CUB tests with RDC; tests that require RDC are not affected by this option." - OFF -) - -option(CUB_ENABLE_EXAMPLES_WITH_RDC - "Build all CUB examples with RDC; examples which require RDC are not affected by this option." - OFF -) - -# Check for RDC/SM compatibility and error/warn if necessary -set(no_rdc_archs 53 62 72) -set(rdc_supported True) -foreach (arch IN LISTS no_rdc_archs) - if (CUB_ENABLE_COMPUTE_${arch}) - set(rdc_supported False) - break() - endif() -endforeach() - -set(rdc_opts - CUB_ENABLE_TESTS_WITH_RDC - CUB_ENABLE_EXAMPLES_WITH_RDC -) -set(rdc_requested False) -foreach (rdc_opt IN LISTS rdc_opts) - if (${rdc_opt}) - set(rdc_requested True) - break() - endif() -endforeach() - -if (rdc_requested AND NOT rdc_supported) - string(JOIN ", " no_rdc ${no_rdc_archs}) - string(JOIN "\n" opts ${rdc_opts}) - message(FATAL_ERROR - "Architectures {${no_rdc}} do not support RDC and are incompatible with " - "these options:\n${opts}" - ) -endif() diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/scatter.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/scatter.h deleted file mode 100644 index c6ae90664ad9538e73febfde86c334011de417c8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/scatter.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special version of this algorithm - diff --git a/spaces/CVPR/MonoScene/README.md b/spaces/CVPR/MonoScene/README.md deleted file mode 100644 index 93d3e18f6b1d1636b4b43bc21d2713d909cb51df..0000000000000000000000000000000000000000 --- a/spaces/CVPR/MonoScene/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MonoScene -emoji: 🚘🏙️ -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: true -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/WALT/mmdet/models/utils/res_layer.py b/spaces/CVPR/WALT/mmdet/models/utils/res_layer.py deleted file mode 100644 index 4a4efd3dd30b30123ed5135eac080ad9f7f7b448..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/utils/res_layer.py +++ /dev/null @@ -1,187 +0,0 @@ -from mmcv.cnn import build_conv_layer, build_norm_layer -from torch import nn as nn - - -class ResLayer(nn.Sequential): - """ResLayer to build ResNet style backbone. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - **kwargs): - self.block = block - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - if avg_down: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - if downsample_first: - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - inplanes = planes * block.expansion - for _ in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - - else: # downsample_first=False is for HourglassModule - for _ in range(num_blocks - 1): - layers.append( - block( - inplanes=inplanes, - planes=inplanes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - super(ResLayer, self).__init__(*layers) - - -class SimplifiedBasicBlock(nn.Module): - """Simplified version of original basic residual block. This is used in - `SCNet `_. - - - Norm layer is now optional - - Last ReLU in forward function is removed - """ - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None): - super(SimplifiedBasicBlock, self).__init__() - assert dcn is None, 'Not implemented yet.' - assert plugins is None, 'Not implemented yet.' - assert not with_cp, 'Not implemented yet.' - self.with_norm = norm_cfg is not None - with_bias = True if norm_cfg is None else False - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - 3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=with_bias) - if self.with_norm: - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, planes, postfix=1) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, planes, planes, 3, padding=1, bias=with_bias) - if self.with_norm: - self.norm2_name, norm2 = build_norm_layer( - norm_cfg, planes, postfix=2) - self.add_module(self.norm2_name, norm2) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) if self.with_norm else None - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) if self.with_norm else None - - def forward(self, x): - """Forward function.""" - - identity = x - - out = self.conv1(x) - if self.with_norm: - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - if self.with_norm: - out = self.norm2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out diff --git a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_regression.py b/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_regression.py deleted file mode 100644 index 8b8f8a92c3e6e57c2434e44f3b91844ce3c6ef25..0000000000000000000000000000000000000000 --- a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_regression.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest -import pandas as pd -import unittest -from result_data_processor import ResultDataProcessor -import os - -class TestRegression(unittest.TestCase): - def test_data_output_is_the_same(self): - - df_current = ResultDataProcessor().data - - # load the reference dataframe - last_commit = os.popen('git rev-parse HEAD').read().strip() - print(last_commit) - reference_file = f'dataframe_history/output_{last_commit}.parquet' - df_reference = pd.read_parquet(reference_file) - - #TODO - # if there are no untracked changes, the dataframes should be the same - # if there is no file saved for the current commit, save a file for the current commit - # instead check the last commit to the one previous to that one - # if there are untracked changes, the dataframes should be different - # either optionally take a parameter for this test or extract the comparison logic so that it can be used separately to - # compare given any two commit hashes - - # Compare DataFrames, allowing for some tolerance in floating-point comparisons - pd.testing.assert_frame_equal(df_current, df_reference, check_dtype=True, atol=1e-5) - - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/_tkinter_finder.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/_tkinter_finder.py deleted file mode 100644 index 597c21b5e385b7fe09191c9f5dd89b6600c22967..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/_tkinter_finder.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Find compiled module linking to Tcl / Tk libraries -""" -import sys -import tkinter -from tkinter import _tkinter as tk - -try: - if hasattr(sys, "pypy_find_executable"): - TKINTER_LIB = tk.tklib_cffi.__file__ - else: - TKINTER_LIB = tk.__file__ -except AttributeError: - # _tkinter may be compiled directly into Python, in which case __file__ is - # not available. load_tkinter_funcs will check the binary first in any case. - TKINTER_LIB = None - -tk_version = str(tkinter.TkVersion) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/sfnt.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/sfnt.py deleted file mode 100644 index 354fb85ea2fa33c93884ca5ef725ac99d9efcdb8..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/sfnt.py +++ /dev/null @@ -1,664 +0,0 @@ -"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format. - -Defines two public classes: - SFNTReader - SFNTWriter - -(Normally you don't have to use these classes explicitly; they are -used automatically by ttLib.TTFont.) - -The reading and writing of sfnt files is separated in two distinct -classes, since whenever the number of tables changes or whenever -a table's length changes you need to rewrite the whole file anyway. -""" - -from io import BytesIO -from types import SimpleNamespace -from fontTools.misc.textTools import Tag -from fontTools.misc import sstruct -from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError -import struct -from collections import OrderedDict -import logging - - -log = logging.getLogger(__name__) - - -class SFNTReader(object): - def __new__(cls, *args, **kwargs): - """Return an instance of the SFNTReader sub-class which is compatible - with the input file type. - """ - if args and cls is SFNTReader: - infile = args[0] - infile.seek(0) - sfntVersion = Tag(infile.read(4)) - infile.seek(0) - if sfntVersion == "wOF2": - # return new WOFF2Reader object - from fontTools.ttLib.woff2 import WOFF2Reader - - return object.__new__(WOFF2Reader) - # return default object - return object.__new__(cls) - - def __init__(self, file, checkChecksums=0, fontNumber=-1): - self.file = file - self.checkChecksums = checkChecksums - - self.flavor = None - self.flavorData = None - self.DirectoryEntry = SFNTDirectoryEntry - self.file.seek(0) - self.sfntVersion = self.file.read(4) - self.file.seek(0) - if self.sfntVersion == b"ttcf": - header = readTTCHeader(self.file) - numFonts = header.numFonts - if not 0 <= fontNumber < numFonts: - raise TTLibFileIsCollectionError( - "specify a font number between 0 and %d (inclusive)" - % (numFonts - 1) - ) - self.numFonts = numFonts - self.file.seek(header.offsetTable[fontNumber]) - data = self.file.read(sfntDirectorySize) - if len(data) != sfntDirectorySize: - raise TTLibError("Not a Font Collection (not enough data)") - sstruct.unpack(sfntDirectoryFormat, data, self) - elif self.sfntVersion == b"wOFF": - self.flavor = "woff" - self.DirectoryEntry = WOFFDirectoryEntry - data = self.file.read(woffDirectorySize) - if len(data) != woffDirectorySize: - raise TTLibError("Not a WOFF font (not enough data)") - sstruct.unpack(woffDirectoryFormat, data, self) - else: - data = self.file.read(sfntDirectorySize) - if len(data) != sfntDirectorySize: - raise TTLibError("Not a TrueType or OpenType font (not enough data)") - sstruct.unpack(sfntDirectoryFormat, data, self) - self.sfntVersion = Tag(self.sfntVersion) - - if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): - raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") - tables = {} - for i in range(self.numTables): - entry = self.DirectoryEntry() - entry.fromFile(self.file) - tag = Tag(entry.tag) - tables[tag] = entry - self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset)) - - # Load flavor data if any - if self.flavor == "woff": - self.flavorData = WOFFFlavorData(self) - - def has_key(self, tag): - return tag in self.tables - - __contains__ = has_key - - def keys(self): - return self.tables.keys() - - def __getitem__(self, tag): - """Fetch the raw table data.""" - entry = self.tables[Tag(tag)] - data = entry.loadData(self.file) - if self.checkChecksums: - if tag == "head": - # Beh: we have to special-case the 'head' table. - checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) - else: - checksum = calcChecksum(data) - if self.checkChecksums > 1: - # Be obnoxious, and barf when it's wrong - assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag - elif checksum != entry.checkSum: - # Be friendly, and just log a warning. - log.warning("bad checksum for '%s' table", tag) - return data - - def __delitem__(self, tag): - del self.tables[Tag(tag)] - - def close(self): - self.file.close() - - # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able - # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a - # reference to an external file object which is not pickleable. So in __getstate__ - # we store the file name and current position, and in __setstate__ we reopen the - # same named file after unpickling. - - def __getstate__(self): - if isinstance(self.file, BytesIO): - # BytesIO is already pickleable, return the state unmodified - return self.__dict__ - - # remove unpickleable file attribute, and only store its name and pos - state = self.__dict__.copy() - del state["file"] - state["_filename"] = self.file.name - state["_filepos"] = self.file.tell() - return state - - def __setstate__(self, state): - if "file" not in state: - self.file = open(state.pop("_filename"), "rb") - self.file.seek(state.pop("_filepos")) - self.__dict__.update(state) - - -# default compression level for WOFF 1.0 tables and metadata -ZLIB_COMPRESSION_LEVEL = 6 - -# if set to True, use zopfli instead of zlib for compressing WOFF 1.0. -# The Python bindings are available at https://pypi.python.org/pypi/zopfli -USE_ZOPFLI = False - -# mapping between zlib's compression levels and zopfli's 'numiterations'. -# Use lower values for files over several MB in size or it will be too slow -ZOPFLI_LEVELS = { - # 0: 0, # can't do 0 iterations... - 1: 1, - 2: 3, - 3: 5, - 4: 8, - 5: 10, - 6: 15, - 7: 25, - 8: 50, - 9: 100, -} - - -def compress(data, level=ZLIB_COMPRESSION_LEVEL): - """Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, - zopfli is used instead of the zlib module. - The compression 'level' must be between 0 and 9. 1 gives best speed, - 9 gives best compression (0 gives no compression at all). - The default value is a compromise between speed and compression (6). - """ - if not (0 <= level <= 9): - raise ValueError("Bad compression level: %s" % level) - if not USE_ZOPFLI or level == 0: - from zlib import compress - - return compress(data, level) - else: - from zopfli.zlib import compress - - return compress(data, numiterations=ZOPFLI_LEVELS[level]) - - -class SFNTWriter(object): - def __new__(cls, *args, **kwargs): - """Return an instance of the SFNTWriter sub-class which is compatible - with the specified 'flavor'. - """ - flavor = None - if kwargs and "flavor" in kwargs: - flavor = kwargs["flavor"] - elif args and len(args) > 3: - flavor = args[3] - if cls is SFNTWriter: - if flavor == "woff2": - # return new WOFF2Writer object - from fontTools.ttLib.woff2 import WOFF2Writer - - return object.__new__(WOFF2Writer) - # return default object - return object.__new__(cls) - - def __init__( - self, - file, - numTables, - sfntVersion="\000\001\000\000", - flavor=None, - flavorData=None, - ): - self.file = file - self.numTables = numTables - self.sfntVersion = Tag(sfntVersion) - self.flavor = flavor - self.flavorData = flavorData - - if self.flavor == "woff": - self.directoryFormat = woffDirectoryFormat - self.directorySize = woffDirectorySize - self.DirectoryEntry = WOFFDirectoryEntry - - self.signature = "wOFF" - - # to calculate WOFF checksum adjustment, we also need the original SFNT offsets - self.origNextTableOffset = ( - sfntDirectorySize + numTables * sfntDirectoryEntrySize - ) - else: - assert not self.flavor, "Unknown flavor '%s'" % self.flavor - self.directoryFormat = sfntDirectoryFormat - self.directorySize = sfntDirectorySize - self.DirectoryEntry = SFNTDirectoryEntry - - from fontTools.ttLib import getSearchRange - - self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( - numTables, 16 - ) - - self.directoryOffset = self.file.tell() - self.nextTableOffset = ( - self.directoryOffset - + self.directorySize - + numTables * self.DirectoryEntry.formatSize - ) - # clear out directory area - self.file.seek(self.nextTableOffset) - # make sure we're actually where we want to be. (old cStringIO bug) - self.file.write(b"\0" * (self.nextTableOffset - self.file.tell())) - self.tables = OrderedDict() - - def setEntry(self, tag, entry): - if tag in self.tables: - raise TTLibError("cannot rewrite '%s' table" % tag) - - self.tables[tag] = entry - - def __setitem__(self, tag, data): - """Write raw table data to disk.""" - if tag in self.tables: - raise TTLibError("cannot rewrite '%s' table" % tag) - - entry = self.DirectoryEntry() - entry.tag = tag - entry.offset = self.nextTableOffset - if tag == "head": - entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) - self.headTable = data - entry.uncompressed = True - else: - entry.checkSum = calcChecksum(data) - entry.saveData(self.file, data) - - if self.flavor == "woff": - entry.origOffset = self.origNextTableOffset - self.origNextTableOffset += (entry.origLength + 3) & ~3 - - self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) - # Add NUL bytes to pad the table data to a 4-byte boundary. - # Don't depend on f.seek() as we need to add the padding even if no - # subsequent write follows (seek is lazy), ie. after the final table - # in the font. - self.file.write(b"\0" * (self.nextTableOffset - self.file.tell())) - assert self.nextTableOffset == self.file.tell() - - self.setEntry(tag, entry) - - def __getitem__(self, tag): - return self.tables[tag] - - def close(self): - """All tables must have been written to disk. Now write the - directory. - """ - tables = sorted(self.tables.items()) - if len(tables) != self.numTables: - raise TTLibError( - "wrong number of tables; expected %d, found %d" - % (self.numTables, len(tables)) - ) - - if self.flavor == "woff": - self.signature = b"wOFF" - self.reserved = 0 - - self.totalSfntSize = 12 - self.totalSfntSize += 16 * len(tables) - for tag, entry in tables: - self.totalSfntSize += (entry.origLength + 3) & ~3 - - data = self.flavorData if self.flavorData else WOFFFlavorData() - if data.majorVersion is not None and data.minorVersion is not None: - self.majorVersion = data.majorVersion - self.minorVersion = data.minorVersion - else: - if hasattr(self, "headTable"): - self.majorVersion, self.minorVersion = struct.unpack( - ">HH", self.headTable[4:8] - ) - else: - self.majorVersion = self.minorVersion = 0 - if data.metaData: - self.metaOrigLength = len(data.metaData) - self.file.seek(0, 2) - self.metaOffset = self.file.tell() - compressedMetaData = compress(data.metaData) - self.metaLength = len(compressedMetaData) - self.file.write(compressedMetaData) - else: - self.metaOffset = self.metaLength = self.metaOrigLength = 0 - if data.privData: - self.file.seek(0, 2) - off = self.file.tell() - paddedOff = (off + 3) & ~3 - self.file.write(b"\0" * (paddedOff - off)) - self.privOffset = self.file.tell() - self.privLength = len(data.privData) - self.file.write(data.privData) - else: - self.privOffset = self.privLength = 0 - - self.file.seek(0, 2) - self.length = self.file.tell() - - else: - assert not self.flavor, "Unknown flavor '%s'" % self.flavor - pass - - directory = sstruct.pack(self.directoryFormat, self) - - self.file.seek(self.directoryOffset + self.directorySize) - seenHead = 0 - for tag, entry in tables: - if tag == "head": - seenHead = 1 - directory = directory + entry.toString() - if seenHead: - self.writeMasterChecksum(directory) - self.file.seek(self.directoryOffset) - self.file.write(directory) - - def _calcMasterChecksum(self, directory): - # calculate checkSumAdjustment - tags = list(self.tables.keys()) - checksums = [] - for i in range(len(tags)): - checksums.append(self.tables[tags[i]].checkSum) - - if self.DirectoryEntry != SFNTDirectoryEntry: - # Create a SFNT directory for checksum calculation purposes - from fontTools.ttLib import getSearchRange - - self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( - self.numTables, 16 - ) - directory = sstruct.pack(sfntDirectoryFormat, self) - tables = sorted(self.tables.items()) - for tag, entry in tables: - sfntEntry = SFNTDirectoryEntry() - sfntEntry.tag = entry.tag - sfntEntry.checkSum = entry.checkSum - sfntEntry.offset = entry.origOffset - sfntEntry.length = entry.origLength - directory = directory + sfntEntry.toString() - - directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize - assert directory_end == len(directory) - - checksums.append(calcChecksum(directory)) - checksum = sum(checksums) & 0xFFFFFFFF - # BiboAfba! - checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF - return checksumadjustment - - def writeMasterChecksum(self, directory): - checksumadjustment = self._calcMasterChecksum(directory) - # write the checksum to the file - self.file.seek(self.tables["head"].offset + 8) - self.file.write(struct.pack(">L", checksumadjustment)) - - def reordersTables(self): - return False - - -# -- sfnt directory helpers and cruft - -ttcHeaderFormat = """ - > # big endian - TTCTag: 4s # "ttcf" - Version: L # 0x00010000 or 0x00020000 - numFonts: L # number of fonts - # OffsetTable[numFonts]: L # array with offsets from beginning of file - # ulDsigTag: L # version 2.0 only - # ulDsigLength: L # version 2.0 only - # ulDsigOffset: L # version 2.0 only -""" - -ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat) - -sfntDirectoryFormat = """ - > # big endian - sfntVersion: 4s - numTables: H # number of tables - searchRange: H # (max2 <= numTables)*16 - entrySelector: H # log2(max2 <= numTables) - rangeShift: H # numTables*16-searchRange -""" - -sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat) - -sfntDirectoryEntryFormat = """ - > # big endian - tag: 4s - checkSum: L - offset: L - length: L -""" - -sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) - -woffDirectoryFormat = """ - > # big endian - signature: 4s # "wOFF" - sfntVersion: 4s - length: L # total woff file size - numTables: H # number of tables - reserved: H # set to 0 - totalSfntSize: L # uncompressed size - majorVersion: H # major version of WOFF file - minorVersion: H # minor version of WOFF file - metaOffset: L # offset to metadata block - metaLength: L # length of compressed metadata - metaOrigLength: L # length of uncompressed metadata - privOffset: L # offset to private data block - privLength: L # length of private data block -""" - -woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) - -woffDirectoryEntryFormat = """ - > # big endian - tag: 4s - offset: L - length: L # compressed length - origLength: L # original length - checkSum: L # original checksum -""" - -woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) - - -class DirectoryEntry(object): - def __init__(self): - self.uncompressed = False # if True, always embed entry raw - - def fromFile(self, file): - sstruct.unpack(self.format, file.read(self.formatSize), self) - - def fromString(self, str): - sstruct.unpack(self.format, str, self) - - def toString(self): - return sstruct.pack(self.format, self) - - def __repr__(self): - if hasattr(self, "tag"): - return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) - else: - return "<%s at %x>" % (self.__class__.__name__, id(self)) - - def loadData(self, file): - file.seek(self.offset) - data = file.read(self.length) - assert len(data) == self.length - if hasattr(self.__class__, "decodeData"): - data = self.decodeData(data) - return data - - def saveData(self, file, data): - if hasattr(self.__class__, "encodeData"): - data = self.encodeData(data) - self.length = len(data) - file.seek(self.offset) - file.write(data) - - def decodeData(self, rawData): - return rawData - - def encodeData(self, data): - return data - - -class SFNTDirectoryEntry(DirectoryEntry): - - format = sfntDirectoryEntryFormat - formatSize = sfntDirectoryEntrySize - - -class WOFFDirectoryEntry(DirectoryEntry): - - format = woffDirectoryEntryFormat - formatSize = woffDirectoryEntrySize - - def __init__(self): - super(WOFFDirectoryEntry, self).__init__() - # With fonttools<=3.1.2, the only way to set a different zlib - # compression level for WOFF directory entries was to set the class - # attribute 'zlibCompressionLevel'. This is now replaced by a globally - # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when - # compressing the metadata. For backward compatibility, we still - # use the class attribute if it was already set. - if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"): - self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL - - def decodeData(self, rawData): - import zlib - - if self.length == self.origLength: - data = rawData - else: - assert self.length < self.origLength - data = zlib.decompress(rawData) - assert len(data) == self.origLength - return data - - def encodeData(self, data): - self.origLength = len(data) - if not self.uncompressed: - compressedData = compress(data, self.zlibCompressionLevel) - if self.uncompressed or len(compressedData) >= self.origLength: - # Encode uncompressed - rawData = data - self.length = self.origLength - else: - rawData = compressedData - self.length = len(rawData) - return rawData - - -class WOFFFlavorData: - - Flavor = "woff" - - def __init__(self, reader=None): - self.majorVersion = None - self.minorVersion = None - self.metaData = None - self.privData = None - if reader: - self.majorVersion = reader.majorVersion - self.minorVersion = reader.minorVersion - if reader.metaLength: - reader.file.seek(reader.metaOffset) - rawData = reader.file.read(reader.metaLength) - assert len(rawData) == reader.metaLength - data = self._decompress(rawData) - assert len(data) == reader.metaOrigLength - self.metaData = data - if reader.privLength: - reader.file.seek(reader.privOffset) - data = reader.file.read(reader.privLength) - assert len(data) == reader.privLength - self.privData = data - - def _decompress(self, rawData): - import zlib - - return zlib.decompress(rawData) - - -def calcChecksum(data): - """Calculate the checksum for an arbitrary block of data. - - If the data length is not a multiple of four, it assumes - it is to be padded with null byte. - - >>> print(calcChecksum(b"abcd")) - 1633837924 - >>> print(calcChecksum(b"abcdxyz")) - 3655064932 - """ - remainder = len(data) % 4 - if remainder: - data += b"\0" * (4 - remainder) - value = 0 - blockSize = 4096 - assert blockSize % 4 == 0 - for i in range(0, len(data), blockSize): - block = data[i : i + blockSize] - longs = struct.unpack(">%dL" % (len(block) // 4), block) - value = (value + sum(longs)) & 0xFFFFFFFF - return value - - -def readTTCHeader(file): - file.seek(0) - data = file.read(ttcHeaderSize) - if len(data) != ttcHeaderSize: - raise TTLibError("Not a Font Collection (not enough data)") - self = SimpleNamespace() - sstruct.unpack(ttcHeaderFormat, data, self) - if self.TTCTag != "ttcf": - raise TTLibError("Not a Font Collection") - assert self.Version == 0x00010000 or self.Version == 0x00020000, ( - "unrecognized TTC version 0x%08x" % self.Version - ) - self.offsetTable = struct.unpack( - ">%dL" % self.numFonts, file.read(self.numFonts * 4) - ) - if self.Version == 0x00020000: - pass # ignoring version 2.0 signatures - return self - - -def writeTTCHeader(file, numFonts): - self = SimpleNamespace() - self.TTCTag = "ttcf" - self.Version = 0x00010000 - self.numFonts = numFonts - file.seek(0) - file.write(sstruct.pack(ttcHeaderFormat, self)) - offset = file.tell() - file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts))) - return offset - - -if __name__ == "__main__": - import sys - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Button-f155035a.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Button-f155035a.js deleted file mode 100644 index a1620bdc99b2542290909cb58d6a94a19b6fa3e7..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Button-f155035a.js +++ /dev/null @@ -1,4 +0,0 @@ -import{i as Q,a8 as C,S as q,e as A,s as E,z as k,v as B,a9 as I,E as T,N as L,aa as U,U as c,L as o,p as V,ab as D,ac as F,ad as K,q as G,A as N,K as y,Q as H,F as J}from"./index-1d65707a.js";function M(t){const e=t-1;return e*e*e+1}function x(t,{delay:e=0,duration:i=400,easing:n=Q}={}){const u=+getComputedStyle(t).opacity;return{delay:e,duration:i,easing:n,css:d=>`opacity: ${d*u}`}}function p(t,{delay:e=0,duration:i=400,easing:n=M,x:u=0,y:d=0,opacity:m=0}={}){const f=getComputedStyle(t),l=+f.opacity,s=f.transform==="none"?"":f.transform,h=l*(1-m),[r,b]=C(u),[w,_]=C(d);return{delay:e,duration:i,easing:n,css:(g,v)=>` - transform: ${s} translate(${(1-g)*r}${b}, ${(1-g)*w}${_}); - opacity: ${l-h*v}`}}function P(t){let e,i,n;const u=t[17].default,d=I(u,t,t[16],null);let m=[{"data-testid":t[7]},{id:t[2]},{class:i="block "+t[3].join(" ")+" svelte-90oupt"}],f={};for(let l=0;l{"height"in a&&i(0,d=a.height),"width"in a&&i(1,m=a.width),"elem_id"in a&&i(2,f=a.elem_id),"elem_classes"in a&&i(3,l=a.elem_classes),"variant"in a&&i(4,s=a.variant),"border_mode"in a&&i(5,h=a.border_mode),"padding"in a&&i(6,r=a.padding),"type"in a&&i(15,b=a.type),"test_id"in a&&i(7,w=a.test_id),"explicit_call"in a&&i(8,_=a.explicit_call),"container"in a&&i(9,g=a.container),"visible"in a&&i(10,v=a.visible),"allow_overflow"in a&&i(11,z=a.allow_overflow),"scale"in a&&i(12,j=a.scale),"min_width"in a&&i(13,S=a.min_width),"$$scope"in a&&i(16,u=a.$$scope)},[d,m,f,l,s,h,r,w,_,g,v,z,j,S,O,b,u,n]}class $ extends q{constructor(e){super(),A(this,e,W,R,E,{height:0,width:1,elem_id:2,elem_classes:3,variant:4,border_mode:5,padding:6,type:15,test_id:7,explicit_call:8,container:9,visible:10,allow_overflow:11,scale:12,min_width:13})}}function X(t){let e,i,n,u,d;const m=t[9].default,f=I(m,t,t[8],null);return{c(){e=L("button"),f&&f.c(),y(e,"class",i=t[4]+" "+t[3]+" "+t[1].join(" ")+" svelte-1e89no8"),y(e,"id",t[0]),e.disabled=t[5],c(e,"hidden",!t[2]),o(e,"flex-grow",t[6]),o(e,"width",t[6]===0?"fit-content":null),o(e,"min-width",typeof t[7]=="number"?`calc(min(${t[7]}px, 100%))`:null)},m(l,s){V(l,e,s),f&&f.m(e,null),n=!0,u||(d=H(e,"click",t[10]),u=!0)},p(l,[s]){f&&f.p&&(!n||s&256)&&D(f,m,l,l[8],n?K(m,l[8],s,null):F(l[8]),null),(!n||s&26&&i!==(i=l[4]+" "+l[3]+" "+l[1].join(" ")+" svelte-1e89no8"))&&y(e,"class",i),(!n||s&1)&&y(e,"id",l[0]),(!n||s&32)&&(e.disabled=l[5]),(!n||s&30)&&c(e,"hidden",!l[2]),s&64&&o(e,"flex-grow",l[6]),s&64&&o(e,"width",l[6]===0?"fit-content":null),s&128&&o(e,"min-width",typeof l[7]=="number"?`calc(min(${l[7]}px, 100%))`:null)},i(l){n||(k(f,l),n=!0)},o(l){B(f,l),n=!1},d(l){l&&N(e),f&&f.d(l),u=!1,d()}}}function Y(t,e,i){let{$$slots:n={},$$scope:u}=e,{elem_id:d=""}=e,{elem_classes:m=[]}=e,{visible:f=!0}=e,{variant:l="secondary"}=e,{size:s="lg"}=e,{disabled:h=!1}=e,{scale:r=null}=e,{min_width:b=void 0}=e;function w(_){J.call(this,t,_)}return t.$$set=_=>{"elem_id"in _&&i(0,d=_.elem_id),"elem_classes"in _&&i(1,m=_.elem_classes),"visible"in _&&i(2,f=_.visible),"variant"in _&&i(3,l=_.variant),"size"in _&&i(4,s=_.size),"disabled"in _&&i(5,h=_.disabled),"scale"in _&&i(6,r=_.scale),"min_width"in _&&i(7,b=_.min_width),"$$scope"in _&&i(8,u=_.$$scope)},[d,m,f,l,s,h,r,b,u,n,w]}class ee extends q{constructor(e){super(),A(this,e,Y,X,E,{elem_id:0,elem_classes:1,visible:2,variant:3,size:4,disabled:5,scale:6,min_width:7})}}export{$ as B,ee as a,p as b,M as c,x as f}; -//# sourceMappingURL=Button-f155035a.js.map diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/__version__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/__version__.py deleted file mode 100644 index 6a8e63c60262fc2650cb5c71514a4b23f949aa58..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/__version__.py +++ /dev/null @@ -1,3 +0,0 @@ -__title__ = "httpx" -__description__ = "A next generation HTTP client, for Python 3." -__version__ = "0.24.1" diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/configs/transforms_config.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/configs/transforms_config.py deleted file mode 100644 index ac12b5d5ba0571f21715e0f6b24b9c1ebe84bf72..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/configs/transforms_config.py +++ /dev/null @@ -1,62 +0,0 @@ -from abc import abstractmethod -import torchvision.transforms as transforms - - -class TransformsConfig(object): - - def __init__(self, opts): - self.opts = opts - - @abstractmethod - def get_transforms(self): - pass - - -class EncodeTransforms(TransformsConfig): - - def __init__(self, opts): - super(EncodeTransforms, self).__init__(opts) - - def get_transforms(self): - transforms_dict = { - 'transform_gt_train': transforms.Compose([ - transforms.Resize((256, 256)), - transforms.RandomHorizontalFlip(0.5), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), - 'transform_source': None, - 'transform_test': transforms.Compose([ - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), - 'transform_inference': transforms.Compose([ - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) - } - return transforms_dict - - -class CarsEncodeTransforms(TransformsConfig): - - def __init__(self, opts): - super(CarsEncodeTransforms, self).__init__(opts) - - def get_transforms(self): - transforms_dict = { - 'transform_gt_train': transforms.Compose([ - transforms.Resize((192, 256)), - transforms.RandomHorizontalFlip(0.5), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), - 'transform_source': None, - 'transform_test': transforms.Compose([ - transforms.Resize((192, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), - 'transform_inference': transforms.Compose([ - transforms.Resize((192, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) - } - return transforms_dict diff --git a/spaces/DrSnowbird/clip-image-search/app.py b/spaces/DrSnowbird/clip-image-search/app.py deleted file mode 100644 index d67dd870f2103e77100f1839773217178b5f9e99..0000000000000000000000000000000000000000 --- a/spaces/DrSnowbird/clip-image-search/app.py +++ /dev/null @@ -1,171 +0,0 @@ -import os - -from sentence_transformers import SentenceTransformer, util -from PIL import Image -import glob - -import pickle -import zipfile -from tqdm.autonotebook import tqdm -from pathlib import Path - -from sentence_transformers import SentenceTransformer, util - -import torch -torch.cuda.is_available = lambda : False -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# device = torch.device('cpu') -print("Device:", device) - -import gradio as gr - -from loguru import logger - - -I2I = "Text->Text" -T2I = "Text->Image" -I2I = "Image->Image" - -model = SentenceTransformer("clip-ViT-B-32") -# load_model_path = 'xxxxxxx.pt' -# model = torch.load(load_model_path) -mode = model.to(device) -# mode = model.to('cpu') -print(next(model.parameters()).device) - - -img_folder = Path("./photos/") -# Next, we get about 25k images from Unsplash -if not os.path.exists(img_folder) or len(os.listdir(img_folder)) == 0: - os.makedirs(img_folder, exist_ok=True) - - photo_filename = 'unsplash-25k-photos.zip' - if not os.path.exists(photo_filename): # Download dataset if does not exist - util.http_get('http://sbert.net/datasets/' + photo_filename, photo_filename) - - # Extract all images - with zipfile.ZipFile(photo_filename, 'r') as zf: - for member in tqdm(zf.infolist(), desc='Extracting'): - zf.extract(member, img_folder) - -emb_filename = Path("unsplash-25k-photos-embeddings.pkl") -with open(emb_filename, "rb") as fIn: - img_names, img_emb = pickle.load(fIn) - -use_precomputed_embeddings = True -def compute_embeddings(use_precomputed_embeddings: bool = use_precomputed_embeddings): - if use_precomputed_embeddings: - emb_filename = 'unsplash-25k-photos-embeddings.pkl' - if not os.path.exists(emb_filename): # Download dataset if does not exist - util.http_get('http://sbert.net/datasets/' + emb_filename, emb_filename) - - with open(emb_filename, 'rb') as fIn: - img_names, img_emb = pickle.load(fIn) - print("Images:", len(img_names)) - else: - # For embedding images, we need the non-multilingual CLIP model - img_model = SentenceTransformer('clip-ViT-B-32') - img_names = list(glob.glob('photos/*.jpg')) - print("Images:", len(img_names)) - img_emb = img_model.encode([Image.open(filepath) for filepath in img_names], batch_size=128, - convert_to_tensor=True, show_progress_bar=True) - return img_names, img_emb - -img_names, img_emb = compute_embeddings() - - -# Cat image downloaded from https://www.flickr.com/photos/blacktigersdream/23119711630 -cat_image = "./cat_example.jpg" -# Dog example downloaded from https://upload.wikimedia.org/wikipedia/commons/1/18/Dog_Breeds.jpg -dog_image = "./dog_example.jpg" - -blank_image = "./blank_image.jpg" - -# img = PIL.Image.open(img) -# Load CLIP model -model = SentenceTransformer('clip-ViT-B-32') - -TEXT_FILE_PATTERN=os.getenv("TEXT_FILE_PATTERN", "./data-text/*.txt") -def embed_text_files(file_pattern="./data-text/*.txt"): - texts_files = glob.glob(file_pattern) - texts_=[] - for text_file in texts_files: - try: - # Encode a text file: - with open(text_file, 'r') as file: - data = file.read().replace('\n', '') - - texts_.append(data) - except IOError: - print(f">>> ERROR: embed_text_files(): IOError: {text_file}") - return None - texts_emb = model.encode[texts_] - return texts_files, texts_emb - - -IMAGE_FILE_PATTERN=os.getenv("IMAGE_FILE_PATTERN", "./photos/*.jpg") -def embed_image_files(file_pattern=IMAGE_FILE_PATTERN): - imgs_files = glob.glob(file_pattern) - imgs_emb=[] - for img in imgs_files: - try: - # Encode an image: - img_emb = model.encode(PIL.Image.open(img)) - imgs_emb.append(img_emb) - except PIL.UnidentifiedImageError: - print(f">>> ERROR: embed_text_files(): IOError: {img}") - return None - return imgs_files, imgs_emb - - -def search(image, mode, top_k, text): - logger.info(f"Mode {mode} selected") - if mode == I2I: - logger.info(f"Processing image in mode {mode}") - emb = model.encode([Image.fromarray(image)]) #, convert_to_tensor=True) - elif mode == T2I: - logger.info(f"Processing text in mode {mode}") - emb = model.encode([text]) #, convert_to_tensor=True) - - print(f">>> emb Shape: {emb}") - print(f">>> img_emb Shape: {img_emb}") - - cos_sim = util.cos_sim(img_emb, emb) - print(f">>> cos_sim Shape: {cos_sim}") - - logger.info(f"Best match: {img_names[torch.argmax(cos_sim)]}") - # return [Image.open(img_folder / img_names[top_k_best_image]) for top_k_best_image in torch.topk(cos_sim, 3, 0).indices] - return [Image.open(img_folder / img_names[top_k_best_image]) for top_k_best_image in torch.topk(cos_sim, top_k, 0).indices] - - -if __name__ == "__main__": - - iface = gr.Interface( - fn=search, - inputs=[ - gr.inputs.Image(label="Image to search", optional=True), - gr.inputs.Radio([T2I, I2I]), - gr.inputs.Slider(1, 5, step=1, default=3), - gr.inputs.Textbox( - lines=1, label="Text query", placeholder="Introduce the search text...", - ), - ], - theme="grass", - outputs=gr.outputs.Carousel([gr.outputs.Image(type="pil")]), - # outputs=[gr.outputs.Image(type="auto", label="1st Best match"), - # gr.outputs.Image(type="auto", label="2nd Best match"), - # gr.outputs.Image(type="auto", label="3rd Best match")], - examples=[ - [cat_image, I2I, 3, ""], - [dog_image, I2I, 3, ""], - [blank_image, T2I, 5, "A white dog"], - [blank_image, T2I, 3, "A black dog"], - [blank_image, T2I, 3, "A cat"], - [blank_image, T2I, 3, "Hugging Face"], - ], - # enable_queue=True, - title="CLIP Image Search", - description="Select the mode to search for a match in Unsplash (thumbnail size) dataset. text2image mode needs a text as input and outputs the image with the most similar embedding (following cosine similarity). The Image to image mode is similar, but an input image is used instead of a text query", - ) - iface.launch() diff --git a/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.cpp b/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.cpp deleted file mode 100644 index 2d7177fc60040751d20e9a8da0301fa3ab64968a..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.cpp +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "upfirdn2d.h" - -//------------------------------------------------------------------------ - -static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x"); - TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(f.numel() <= INT_MAX, "f is too large"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(f.dim() == 2, "f must be rank 2"); - TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1"); - TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1"); - TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx; - int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy; - TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format()); - TORCH_CHECK(y.numel() <= INT_MAX, "output is too large"); - - // Initialize CUDA kernel parameters. - upfirdn2d_kernel_params p; - p.x = x.data_ptr(); - p.f = f.data_ptr(); - p.y = y.data_ptr(); - p.up = make_int2(upx, upy); - p.down = make_int2(downx, downy); - p.pad0 = make_int2(padx0, pady0); - p.flip = (flip) ? 1 : 0; - p.gain = gain; - p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0)); - p.filterSize = make_int2((int)f.size(1), (int)f.size(0)); - p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0)); - p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0)); - p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z; - p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1; - - // Choose CUDA kernel. - upfirdn2d_kernel_spec spec; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - spec = choose_upfirdn2d_kernel(p); - }); - - // Set looping options. - p.loopMajor = (p.sizeMajor - 1) / 16384 + 1; - p.loopMinor = spec.loopMinor; - p.loopX = spec.loopX; - p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1; - p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1; - - // Compute grid size. - dim3 blockSize, gridSize; - if (spec.tileOutW < 0) // large - { - blockSize = dim3(4, 32, 1); - gridSize = dim3( - ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor, - (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, - p.launchMajor); - } - else // small - { - blockSize = dim3(256, 1, 1); - gridSize = dim3( - ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor, - (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, - p.launchMajor); - } - - // Launch CUDA kernel. - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("upfirdn2d", &upfirdn2d); -} - -//------------------------------------------------------------------------ diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_configs/global_config.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_configs/global_config.py deleted file mode 100644 index bda8d2d08828aace7551db94847e2a1e039876df..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_configs/global_config.py +++ /dev/null @@ -1,12 +0,0 @@ -# Device -cuda_visible_devices = '0' -device = 'cuda:0' - -# Logs -training_step = 1 -image_rec_result_log_snapshot = 100 -pivotal_training_steps = 0 -model_snapshot_interval = 400 - -# Run name to be updated during PTI -run_name = 'exp' diff --git a/spaces/ECCV2022/bytetrack/tutorials/ctracker/test.py b/spaces/ECCV2022/bytetrack/tutorials/ctracker/test.py deleted file mode 100644 index 772d9169975cd51f4aad5830fde363f776e97b4b..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/ctracker/test.py +++ /dev/null @@ -1,337 +0,0 @@ -import numpy as np -import torchvision -import time -import math -import os -import copy -import pdb -import argparse -import sys -import cv2 -import skimage.io -import skimage.transform -import skimage.color -import skimage -import torch -import model - -from torch.utils.data import Dataset, DataLoader -from torchvision import datasets, models, transforms -from dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer, RGB_MEAN, RGB_STD -from scipy.optimize import linear_sum_assignment - -# assert torch.__version__.split('.')[1] == '4' - -print('CUDA available: {}'.format(torch.cuda.is_available())) - -color_list = [(0, 0, 255), (255, 0, 0), (0, 255, 0), (255, 0, 255), (0, 255, 255), (255, 255, 0), (128, 0, 255), -(0, 128, 255), (128, 255, 0), (0, 255, 128), (255, 128, 0), (255, 0, 128), (128, 128, 255), (128, 255, 128), (255, 128, 128), (128, 128, 0), (128, 0, 128)] - -class detect_rect: - def __init__(self): - self.curr_frame = 0 - self.curr_rect = np.array([0, 0, 1, 1]) - self.next_rect = np.array([0, 0, 1, 1]) - self.conf = 0 - self.id = 0 - - @property - def position(self): - x = (self.curr_rect[0] + self.curr_rect[2])/2 - y = (self.curr_rect[1] + self.curr_rect[3])/2 - return np.array([x, y]) - - @property - def size(self): - w = self.curr_rect[2] - self.curr_rect[0] - h = self.curr_rect[3] - self.curr_rect[1] - return np.array([w, h]) - -class tracklet: - def __init__(self, det_rect): - self.id = det_rect.id - self.rect_list = [det_rect] - self.rect_num = 1 - self.last_rect = det_rect - self.last_frame = det_rect.curr_frame - self.no_match_frame = 0 - - def add_rect(self, det_rect): - self.rect_list.append(det_rect) - self.rect_num = self.rect_num + 1 - self.last_rect = det_rect - self.last_frame = det_rect.curr_frame - - @property - def velocity(self): - if(self.rect_num < 2): - return (0, 0) - elif(self.rect_num < 6): - return (self.rect_list[self.rect_num - 1].position - self.rect_list[self.rect_num - 2].position) / (self.rect_list[self.rect_num - 1].curr_frame - self.rect_list[self.rect_num - 2].curr_frame) - else: - v1 = (self.rect_list[self.rect_num - 1].position - self.rect_list[self.rect_num - 4].position) / (self.rect_list[self.rect_num - 1].curr_frame - self.rect_list[self.rect_num - 4].curr_frame) - v2 = (self.rect_list[self.rect_num - 2].position - self.rect_list[self.rect_num - 5].position) / (self.rect_list[self.rect_num - 2].curr_frame - self.rect_list[self.rect_num - 5].curr_frame) - v3 = (self.rect_list[self.rect_num - 3].position - self.rect_list[self.rect_num - 6].position) / (self.rect_list[self.rect_num - 3].curr_frame - self.rect_list[self.rect_num - 6].curr_frame) - return (v1 + v2 + v3) / 3 - - -def cal_iou(rect1, rect2): - x1, y1, x2, y2 = rect1 - x3, y3, x4, y4 = rect2 - i_w = min(x2, x4) - max(x1, x3) - i_h = min(y2, y4) - max(y1, y3) - if(i_w <= 0 or i_h <= 0): - return 0 - i_s = i_w * i_h - s_1 = (x2 - x1) * (y2 - y1) - s_2 = (x4 - x3) * (y4 - y3) - return float(i_s) / (s_1 + s_2 - i_s) - -def cal_simi(det_rect1, det_rect2): - return cal_iou(det_rect1.next_rect, det_rect2.curr_rect) - -def cal_simi_track_det(track, det_rect): - if(det_rect.curr_frame <= track.last_frame): - print("cal_simi_track_det error") - return 0 - elif(det_rect.curr_frame - track.last_frame == 1): - return cal_iou(track.last_rect.next_rect, det_rect.curr_rect) - else: - pred_rect = track.last_rect.curr_rect + np.append(track.velocity, track.velocity) * (det_rect.curr_frame - track.last_frame) - return cal_iou(pred_rect, det_rect.curr_rect) - -def track_det_match(tracklet_list, det_rect_list, min_iou = 0.5): - num1 = len(tracklet_list) - num2 = len(det_rect_list) - cost_mat = np.zeros((num1, num2)) - for i in range(num1): - for j in range(num2): - cost_mat[i, j] = -cal_simi_track_det(tracklet_list[i], det_rect_list[j]) - - match_result = linear_sum_assignment(cost_mat) - match_result = np.asarray(match_result) - match_result = np.transpose(match_result) - - matches, unmatched1, unmatched2 = [], [], [] - for i in range(num1): - if i not in match_result[:, 0]: - unmatched1.append(i) - for j in range(num2): - if j not in match_result[:, 1]: - unmatched2.append(j) - for i, j in match_result: - if cost_mat[i, j] > -min_iou: - unmatched1.append(i) - unmatched2.append(j) - else: - matches.append((i, j)) - return matches, unmatched1, unmatched2 - -def draw_caption(image, box, caption, color): - b = np.array(box).astype(int) - cv2.putText(image, caption, (b[0], b[1] - 8), cv2.FONT_HERSHEY_PLAIN, 2, color, 2) - - -def run_each_dataset(model_dir, retinanet, dataset_path, subset, cur_dataset): - print(cur_dataset) - - img_list = os.listdir(os.path.join(dataset_path, subset, cur_dataset, 'img1')) - img_list = [os.path.join(dataset_path, subset, cur_dataset, 'img1', _) for _ in img_list if ('jpg' in _) or ('png' in _)] - img_list = sorted(img_list) - - img_len = len(img_list) - last_feat = None - - confidence_threshold = 0.4 - IOU_threshold = 0.5 - retention_threshold = 10 - - det_list_all = [] - tracklet_all = [] - max_id = 0 - max_draw_len = 100 - draw_interval = 5 - img_width = 1920 - img_height = 1080 - fps = 30 - - for i in range(img_len): - det_list_all.append([]) - - for idx in range((int(img_len / 2)), img_len + 1): - i = idx - 1 - print('tracking: ', i) - with torch.no_grad(): - data_path1 = img_list[min(idx, img_len - 1)] - img_origin1 = skimage.io.imread(data_path1) - img_h, img_w, _ = img_origin1.shape - img_height, img_width = img_h, img_w - resize_h, resize_w = math.ceil(img_h / 32) * 32, math.ceil(img_w / 32) * 32 - img1 = np.zeros((resize_h, resize_w, 3), dtype=img_origin1.dtype) - img1[:img_h, :img_w, :] = img_origin1 - img1 = (img1.astype(np.float32) / 255.0 - np.array([[RGB_MEAN]])) / np.array([[RGB_STD]]) - img1 = torch.from_numpy(img1).permute(2, 0, 1).view(1, 3, resize_h, resize_w) - scores, transformed_anchors, last_feat = retinanet(img1.cuda().float(), last_feat=last_feat) -# if idx > 0: - if idx > (int(img_len / 2)): - idxs = np.where(scores>0.1) - - for j in range(idxs[0].shape[0]): - bbox = transformed_anchors[idxs[0][j], :] - x1 = int(bbox[0]) - y1 = int(bbox[1]) - x2 = int(bbox[2]) - y2 = int(bbox[3]) - - x3 = int(bbox[4]) - y3 = int(bbox[5]) - x4 = int(bbox[6]) - y4 = int(bbox[7]) - - det_conf = float(scores[idxs[0][j]]) - - det_rect = detect_rect() - det_rect.curr_frame = idx - det_rect.curr_rect = np.array([x1, y1, x2, y2]) - det_rect.next_rect = np.array([x3, y3, x4, y4]) - det_rect.conf = det_conf - - if det_rect.conf > confidence_threshold: - det_list_all[det_rect.curr_frame - 1].append(det_rect) -# if i == 0: - if i == int(img_len / 2): - for j in range(len(det_list_all[i])): - det_list_all[i][j].id = j + 1 - max_id = max(max_id, j + 1) - track = tracklet(det_list_all[i][j]) - tracklet_all.append(track) - continue - - matches, unmatched1, unmatched2 = track_det_match(tracklet_all, det_list_all[i], IOU_threshold) - - for j in range(len(matches)): - det_list_all[i][matches[j][1]].id = tracklet_all[matches[j][0]].id - det_list_all[i][matches[j][1]].id = tracklet_all[matches[j][0]].id - tracklet_all[matches[j][0]].add_rect(det_list_all[i][matches[j][1]]) - - delete_track_list = [] - for j in range(len(unmatched1)): - tracklet_all[unmatched1[j]].no_match_frame = tracklet_all[unmatched1[j]].no_match_frame + 1 - if(tracklet_all[unmatched1[j]].no_match_frame >= retention_threshold): - delete_track_list.append(unmatched1[j]) - - origin_index = set([k for k in range(len(tracklet_all))]) - delete_index = set(delete_track_list) - left_index = list(origin_index - delete_index) - tracklet_all = [tracklet_all[k] for k in left_index] - - - for j in range(len(unmatched2)): - det_list_all[i][unmatched2[j]].id = max_id + 1 - max_id = max_id + 1 - track = tracklet(det_list_all[i][unmatched2[j]]) - tracklet_all.append(track) - - - - #**************visualize tracking result and save evaluate file**************** - - fout_tracking = open(os.path.join(model_dir, 'results', cur_dataset + '.txt'), 'w') - - save_img_dir = os.path.join(model_dir, 'results', cur_dataset) - if not os.path.exists(save_img_dir): - os.makedirs(save_img_dir) - - out_video = os.path.join(model_dir, 'results', cur_dataset + '.mp4') - videoWriter = cv2.VideoWriter(out_video, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps, (img_width, img_height)) - - id_dict = {} - - - for i in range((int(img_len / 2)), img_len): - print('saving: ', i) - img = cv2.imread(img_list[i]) - - for j in range(len(det_list_all[i])): - - x1, y1, x2, y2 = det_list_all[i][j].curr_rect.astype(int) - trace_id = det_list_all[i][j].id - - id_dict.setdefault(str(trace_id),[]).append((int((x1+x2)/2), y2)) - draw_trace_id = str(trace_id) - draw_caption(img, (x1, y1, x2, y2), draw_trace_id, color=color_list[trace_id % len(color_list)]) - cv2.rectangle(img, (x1, y1), (x2, y2), color=color_list[trace_id % len(color_list)], thickness=2) - - trace_len = len(id_dict[str(trace_id)]) - trace_len_draw = min(max_draw_len, trace_len) - - for k in range(trace_len_draw - draw_interval): - if(k % draw_interval == 0): - draw_point1 = id_dict[str(trace_id)][trace_len - k - 1] - draw_point2 = id_dict[str(trace_id)][trace_len - k - 1 - draw_interval] - cv2.line(img, draw_point1, draw_point2, color=color_list[trace_id % len(color_list)], thickness=2) - - fout_tracking.write(str(i+1) + ',' + str(trace_id) + ',' + str(x1) + ',' + str(y1) + ',' + str(x2 - x1) + ',' + str(y2 - y1) + ',-1,-1,-1,-1\n') - - cv2.imwrite(os.path.join(save_img_dir, str(i + 1).zfill(6) + '.jpg'), img) - videoWriter.write(img) -# cv2.waitKey(0) - - fout_tracking.close() - videoWriter.release() - -def run_from_train(model_dir, root_path): - if not os.path.exists(os.path.join(model_dir, 'results')): - os.makedirs(os.path.join(model_dir, 'results')) - retinanet = torch.load(os.path.join(model_dir, 'model_final.pt')) - - use_gpu = True - - if use_gpu: retinanet = retinanet.cuda() - - retinanet.eval() - - for seq_num in [2, 4, 5, 9, 10, 11, 13]: - run_each_dataset(model_dir, retinanet, root_path, 'train', 'MOT17-{:02d}'.format(seq_num)) - for seq_num in [1, 3, 6, 7, 8, 12, 14]: - run_each_dataset(model_dir, retinanet, root_path, 'test', 'MOT17-{:02d}'.format(seq_num)) - -def main(args=None): - parser = argparse.ArgumentParser(description='Simple script for testing a CTracker network.') - parser.add_argument('--dataset_path', default='/dockerdata/home/jeromepeng/data/MOT/MOT17/', type=str, help='Dataset path, location of the images sequence.') - parser.add_argument('--model_dir', default='./trained_model/', help='Path to model (.pt) file.') - parser.add_argument('--model_path', default='./trained_model/model_final.pth', help='Path to model (.pt) file.') - parser = parser.parse_args(args) - - if not os.path.exists(os.path.join(parser.model_dir, 'results')): - os.makedirs(os.path.join(parser.model_dir, 'results')) - - retinanet = model.resnet50(num_classes=1, pretrained=True) -# retinanet_save = torch.load(os.path.join(parser.model_dir, 'model_final.pth')) - retinanet_save = torch.load(os.path.join(parser.model_path)) - - # rename moco pre-trained keys - state_dict = retinanet_save.state_dict() - for k in list(state_dict.keys()): - # retain only encoder up to before the embedding layer - if k.startswith('module.'): - # remove prefix - state_dict[k[len("module."):]] = state_dict[k] - # delete renamed or unused k - del state_dict[k] - - retinanet.load_state_dict(state_dict) - - use_gpu = True - - if use_gpu: retinanet = retinanet.cuda() - - retinanet.eval() - - for seq_num in [2, 4, 5, 9, 10, 11, 13]: - run_each_dataset(parser.model_dir, retinanet, parser.dataset_path, 'train', 'MOT17-{:02d}'.format(seq_num)) -# for seq_num in [1, 3, 6, 7, 8, 12, 14]: -# run_each_dataset(parser.model_dir, retinanet, parser.dataset_path, 'test', 'MOT17-{:02d}'.format(seq_num)) - -if __name__ == '__main__': - main() diff --git a/spaces/Enderfga/mtCNN_sysu/utils/vision.py b/spaces/Enderfga/mtCNN_sysu/utils/vision.py deleted file mode 100644 index 69b807709c758611384ab50c6b7d61b774a88dfe..0000000000000000000000000000000000000000 --- a/spaces/Enderfga/mtCNN_sysu/utils/vision.py +++ /dev/null @@ -1,58 +0,0 @@ -from matplotlib.patches import Circle -import os -import sys -import matplotlib.pyplot as plt -import pylab -sys.path.append(os.getcwd()) - - -def vis_face(im_array, dets, landmarks, face_size, save_name): - """Visualize detection results - - Parameters: - ---------- - im_array: numpy.ndarray, shape(1, c, h, w) - test image in rgb - dets1: numpy.ndarray([[x1 y1 x2 y2 score]]) - detection results before calibration - dets2: numpy.ndarray([[x1 y1 x2 y2 score]]) - detection results after calibration - thresh: float - boxes with scores > thresh will be drawn in red otherwise yellow - - Returns: - ------- - """ - - pylab.imshow(im_array) - - for i in range(dets.shape[0]): - bbox = dets[i, :5] - - rect = pylab.Rectangle((bbox[0], bbox[1]), - bbox[2] - bbox[0], - bbox[3] - bbox[1], fill=False, - edgecolor='red', linewidth=0.9) - score = bbox[4] - plt.gca().text(bbox[0], bbox[1] - 2, - '{:.5f}'.format(score), - bbox=dict(facecolor='red', alpha=0.5), fontsize=8, color='white') - - pylab.gca().add_patch(rect) - - if landmarks is not None: - for i in range(landmarks.shape[0]): - landmarks_one = landmarks[i, :] - landmarks_one = landmarks_one.reshape((5, 2)) - for j in range(5): - - cir1 = Circle(xy=(landmarks_one[j, 0], landmarks_one[j, 1]), radius=face_size/12, alpha=0.4, color="red") - pylab.gca().add_patch(cir1) - - #pylab.savefig(save_name) - #只保存图片内容,不保存坐标轴 - pylab.axis('off') - #pylab.savefig(save_name, bbox_inches='tight', pad_inches=0.0) - #pylab.show() - # 返回图片对象 - return pylab.gcf() \ No newline at end of file diff --git a/spaces/EuroPython2022/Scratchpad-w-BLOOM/README.md b/spaces/EuroPython2022/Scratchpad-w-BLOOM/README.md deleted file mode 100644 index fcfa56327015c838d0c7134a46ed81022efc52a5..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/Scratchpad-w-BLOOM/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Length generalization via BLOOM -emoji: 📝 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EyanAn/vits-uma-genshin-honkai/app.py b/spaces/EyanAn/vits-uma-genshin-honkai/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/EyanAn/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
    VITS语音在线合成demo\n" - "
    主要有赛马娘,原神中文,原神日语,崩坏3的音色
    " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/Fazzie/Pokemon-GAI/README.md b/spaces/Fazzie/Pokemon-GAI/README.md deleted file mode 100644 index ad3146aab1f08078b218b3721866eccdb3b5610d..0000000000000000000000000000000000000000 --- a/spaces/Fazzie/Pokemon-GAI/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: This Pokémon Does Not Exist -emoji: 🧬 -colorFrom: gray -colorTo: green -sdk: gradio -python_version: 3.8.9 -app_file: start.py -models: -- minimaxir/ai-generated-pokemon-rudalle -pinned: true -duplicated_from: ronvolutional/ai-pokemon-card ---- diff --git a/spaces/Felix123456/bingo/next.config.js b/spaces/Felix123456/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/Felix123456/bingo/src/components/ui/sheet.tsx b/spaces/Felix123456/bingo/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - - {children} - -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - {children} - - - Close - - - -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/Felladrin/LaMini-Flan-T5-248M-Candle-Wasm/build/m-quantized_bg.wasm.d.ts b/spaces/Felladrin/LaMini-Flan-T5-248M-Candle-Wasm/build/m-quantized_bg.wasm.d.ts deleted file mode 100644 index 5a19e2874bd67afcbc35a34a54b78c0d8c01cc25..0000000000000000000000000000000000000000 --- a/spaces/Felladrin/LaMini-Flan-T5-248M-Candle-Wasm/build/m-quantized_bg.wasm.d.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ -export const memory: WebAssembly.Memory; -export function __wbg_modelencoder_free(a: number): void; -export function __wbg_modelconditionalgeneration_free(a: number): void; -export function modelconditionalgeneration_load(a: number, b: number, c: number, d: number, e: number, f: number, g: number): void; -export function modelconditionalgeneration_decode(a: number, b: number, c: number): void; -export function modelencoder_load(a: number, b: number, c: number, d: number, e: number, f: number, g: number): void; -export function modelencoder_decode(a: number, b: number, c: number): void; -export function main(a: number, b: number): number; -export function __wbindgen_malloc(a: number, b: number): number; -export function __wbindgen_realloc(a: number, b: number, c: number, d: number): number; -export function __wbindgen_add_to_stack_pointer(a: number): number; -export function __wbindgen_free(a: number, b: number, c: number): void; -export function __wbindgen_exn_store(a: number): void; -export function __wbindgen_start(): void; diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Xiaor.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Xiaor.py deleted file mode 100644 index 5757f9971157116cbbfabbe5420e3b7e88fed4e7..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Xiaor.py +++ /dev/null @@ -1,39 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://xiaor.eu.org' -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - data = { - 'model': model, - 'temperature': 0.7, - 'presence_penalty': 0, - 'messages': messages, - } - response = requests.post(url + '/p1/v1/chat/completions', - json=data, stream=True) - - if stream: - for chunk in response.iter_content(chunk_size=None): - chunk = chunk.decode('utf-8') - if chunk.strip(): - message = json.loads(chunk)['choices'][0]['message']['content'] - yield message - else: - message = response.json()['choices'][0]['message']['content'] - yield message - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/FoxMeo/fire-detector/models/yolo.py b/spaces/FoxMeo/fire-detector/models/yolo.py deleted file mode 100644 index 95a019c6aeec8c3f1d582907d5fe7ff3ed6b9369..0000000000000000000000000000000000000000 --- a/spaces/FoxMeo/fire-detector/models/yolo.py +++ /dev/null @@ -1,843 +0,0 @@ -import argparse -import logging -import sys -from copy import deepcopy - -sys.path.append('./') # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) -import torch -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr -from utils.loss import SigmoidBin - -try: - import thop # for FLOPS computation -except ImportError: - thop = None - - -class Detect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(Detect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IKeypoint(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw_conv_kpt=False): # detection layer - super(IKeypoint, self).__init__() - self.nc = nc # number of classes - self.nkpt = nkpt - self.dw_conv_kpt = dw_conv_kpt - self.no_det=(nc + 5) # number of outputs per anchor for box and class - self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints - self.no = self.no_det+self.no_kpt - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - self.flip_test = False - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch) - - if self.nkpt is not None: - if self.dw_conv_kpt: #keypoint head is slightly more complex - self.m_kpt = nn.ModuleList( - nn.Sequential(DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch) - else: #keypoint head is a single convolution - self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch) - - self.inplace = inplace # use in-place ops (e.g. slice assignment) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - if self.nkpt is None or self.nkpt==0: - x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv - else : - x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1) - - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - x_det = x[i][..., :6] - x_kpt = x[i][..., 6:] - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - kpt_grid_x = self.grid[i][..., 0:1] - kpt_grid_y = self.grid[i][..., 1:2] - - if self.nkpt == 0: - y = x[i].sigmoid() - else: - y = x_det.sigmoid() - - if self.inplace: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh - if self.nkpt != 0: - x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #print('=============') - #print(self.anchor_grid[i].shape) - #print(self.anchor_grid[i][...,0].unsqueeze(4).shape) - #print(x_kpt[..., 0::3].shape) - #x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid() - - y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1) - - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - if self.nkpt != 0: - y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy - y = torch.cat((xy, wh, y[..., 4:]), -1) - - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class IAuxDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IAuxDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv - self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl]) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl]) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - x[i+self.nl] = self.m2[i](x[i+self.nl]) - x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x[:self.nl]) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].data # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IAuxDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IBin(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer - super(IBin, self).__init__() - self.nc = nc # number of classes - self.bin_count = bin_count - - self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - # classes, x,y,obj - self.no = nc + 3 + \ - self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce - # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length() - - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - - #self.x_bin_sigmoid.use_fw_regression = True - #self.y_bin_sigmoid.use_fw_regression = True - self.w_bin_sigmoid.use_fw_regression = True - self.h_bin_sigmoid.use_fw_regression = True - - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - - - #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i] - #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i] - - pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0] - ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1] - - #y[..., 0] = px - #y[..., 1] = py - y[..., 2] = pw - y[..., 3] = ph - - y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1) - - z.append(y.view(bs, -1, y.shape[-1])) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class Model(nn.Module): - def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() - self.traced = False - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict - - # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IAuxDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward - #print(m.stride) - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_aux_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IBin): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_bin() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IKeypoint): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_kpt() # only run once - # print('Strides: %s' % m.stride.tolist()) - - # Init weights, biases - initialize_weights(self) - self.info() - logger.info('') - - def forward(self, x, augment=False, profile=False): - if augment: - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi[..., :4] /= si # de-scale - if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud - elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr - y.append(yi) - return torch.cat(y, 1), None # augmented inference, train - else: - return self.forward_once(x, profile) # single-scale inference, train - - def forward_once(self, x, profile=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - if not hasattr(self, 'traced'): - self.traced=False - - if self.traced: - if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint): - break - - if profile: - c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin)) - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS - for _ in range(10): - m(x.copy() if c else x) - t = time_synchronized() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_synchronized() - t) * 100) - print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) - - x = m(x) # run - - y.append(x if m.i in self.save else None) # save output - - if profile: - print('%.1fms total' % sum(dt)) - return x - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, mi2, s in zip(m.m, m.m2, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True) - - def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Bin() module - bc = m.bin_count - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - old = b[:, (0,1,2,bc+3)].data - obj_idx = 2*bc+4 - b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99)) - b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - b[:, (0,1,2,bc+3)].data = old - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - - # def _print_weights(self): - # for m in self.model.modules(): - # if type(m) is Bottleneck: - # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, RepConv): - #print(f" fuse_repvgg_block") - m.fuse_repvgg_block() - elif isinstance(m, RepConv_OREPA): - #print(f" switch_to_deploy") - m.switch_to_deploy() - elif type(m) is Conv and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward - elif isinstance(m, (IDetect, IAuxDetect)): - m.fuse() - m.forward = m.fuseforward - self.info() - return self - - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - print('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - print('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add autoShape module - print('Adding autoShape... ') - m = autoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - -def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC, - SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv, - Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - Res, ResCSPA, ResCSPB, ResCSPC, - RepRes, RepResCSPA, RepResCSPB, RepResCSPC, - ResX, ResXCSPA, ResXCSPB, ResXCSPC, - RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC, - Ghost, GhostCSPA, GhostCSPB, GhostCSPC, - SwinTransformerBlock, STCSPA, STCSPB, STCSPC, - SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]: - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in [DownC, SPPCSPC, GhostSPPCSPC, - BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - ResCSPA, ResCSPB, ResCSPC, - RepResCSPA, RepResCSPB, RepResCSPC, - ResXCSPA, ResXCSPB, ResXCSPC, - RepResXCSPA, RepResXCSPB, RepResXCSPC, - GhostCSPA, GhostCSPB, GhostCSPC, - STCSPA, STCSPB, STCSPC, - ST2CSPA, ST2CSPB, ST2CSPC]: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum([ch[x] for x in f]) - elif m is Chuncat: - c2 = sum([ch[x] for x in f]) - elif m is Shortcut: - c2 = ch[f[0]] - elif m is Foldcut: - c2 = ch[f] // 2 - elif m in [Detect, IDetect, IAuxDetect, IBin, IKeypoint]: - args.append([ch[x] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - elif m is ReOrg: - c2 = ch[f] * 4 - elif m is Contract: - c2 = ch[f] * args[0] ** 2 - elif m is Expand: - c2 = ch[f] // args[0] ** 2 - else: - c2 = ch[f] - - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file - set_logging() - device = select_device(opt.device) - - # Create model - model = Model(opt.cfg).to(device) - model.train() - - if opt.profile: - img = torch.rand(1, 3, 640, 640).to(device) - y = model(img, profile=True) - - # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) - # y = model(img, profile=True) - - # Tensorboard - # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter() - # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(model.model, img) # add model to tensorboard - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/spaces/GAIR/Factool/factool/utils/__init_.py b/spaces/GAIR/Factool/factool/utils/__init_.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/GageWeike/GPT4i-FreeWilly2/app.py b/spaces/GageWeike/GPT4i-FreeWilly2/app.py deleted file mode 100644 index 8be47e7462d04255ee691ae31eeae8b73920f87b..0000000000000000000000000000000000000000 --- a/spaces/GageWeike/GPT4i-FreeWilly2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/FreeWilly2").launch() \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/environments/environment_test.py b/spaces/Gen-Sim/Gen-Sim/cliport/environments/environment_test.py deleted file mode 100644 index 23f194905087414d1015e72d72734d5d587de9fd..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/environments/environment_test.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Tests for dvnets.environments.environment.""" - -from absl.testing import absltest - -from cliport import tasks -from cliport.environments import environment - -ASSETS_PATH = 'dvnets/environments/assets/' - - -class EnvironmentTest(absltest.TestCase): - - def test_environment_action(self): - env = environment.Environment(ASSETS_PATH) - task = tasks.BlockInsertion() - env.set_task(task) - env.seed(0) - agent = task.oracle(env) - obs = env.reset() - info = None - done = False - for _ in range(10): - act = agent.act(obs, info) - self.assertTrue(env.action_space.contains(act)) - obs, _, done, info = env.step(act) - if done: - break - - -if __name__ == '__main__': - absltest.main() diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/generate_gpt_datasets.sh b/spaces/Gen-Sim/Gen-Sim/scripts/generate_gpt_datasets.sh deleted file mode 100644 index 008219df8a9bfe1e02305f00d1e34f61ecccfac2..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/generate_gpt_datasets.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -DATA_DIR=/home/yzc/shared/project/GPT-CLIPort/data -TASK='put-block-in-bowl align-box-corner stack-block-pyramid-seq align-pair-colored-blocks-along-line vertical-insertion-blocks stack-blocks-in-container' -DISP=False - -echo "Generating dataset... Folder: $DATA_DIR" - -# sh scripts/generate_gpt_datasets.sh data "align-rope assembling-kits-seq-seen-colors assembling-kits-seq-unseen-colors packing-shapes packing-boxes-pairs-seen-colors packing-boxes-pairs-unseen-colors packing-seen-google-objects-seq packing-unseen-google-objects-seq packing-seen-google-objects-group packing-unseen-google-objects-group put-block-in-bowl-seen-colors put-block-in-bowl-unseen-colors stack-block-pyramid-seq-seen-colors stack-block-pyramid-seq-unseen-colors separating-piles-seen-colors separating-piles-unseen-colors towers-of-hanoi-seq-seen-colors towers-of-hanoi-seq-unseen-colors -# sh scripts/generate_gpt_datasets.sh data "assemble-single-car stack-color-coordinated-blocks color-structured-block-tower insert-blocks-into-fixture construct-corner-building colored-cylinder-in-square color-coordinated-block-tower build-house align-pair-colored-blocks-along-line insert-sphere-into-container build-wheel build-two-circles build-car build-bridge manipulating-two-ropes rainbow-stack mix-piles stack-blocks-in-container" -# You can parallelize these depending on how much resources you have - -############################# -## Language-Conditioned Tasks - -# LANG_TASKS='align-rope assembling-kits-seq-seen-colors' -# trap "kill 0" SIGINT - -# LANG_TASKS='place_red_in_green' -LANG_TASKS='rainbow-stack' - - -for task in $LANG_TASKS - do - python cliport/demos.py n=200 task=$task mode=train data_dir=$DATA_DIR disp=$DISP & - python cliport/demos.py n=50 task=$task mode=val data_dir=$DATA_DIR disp=$DISP & - python cliport/demos.py n=100 task=$task mode=test data_dir=$DATA_DIR disp=$DISP & - done -wait - -echo "Finished Language Tasks." - - diff --git a/spaces/GloryGranger80888/Gradio/README.md b/spaces/GloryGranger80888/Gradio/README.md deleted file mode 100644 index 69a6249d03104d41d211457dd05c22bf4d452653..0000000000000000000000000000000000000000 --- a/spaces/GloryGranger80888/Gradio/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gradio -emoji: 👁 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/cascade_rcnn_r50_fpn.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/cascade_rcnn_r50_fpn.py deleted file mode 100644 index cde2a96c41b49d463a480e505ff5abe04a68db32..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/cascade_rcnn_r50_fpn.py +++ /dev/null @@ -1,179 +0,0 @@ -# model settings -model = dict( - type='CascadeRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ]), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py deleted file mode 100644 index 5ca2a67cde62bff078b7c4c0d696a585265e4c3a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/visualization/__init__.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/visualization/__init__.py deleted file mode 100644 index 4ff995c0861490941f8cfc19ebbd41a2ee7e2d65..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/visualization/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .image import (color_val_matplotlib, imshow_det_bboxes, - imshow_gt_det_bboxes) - -__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib'] diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/atss.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/atss.py deleted file mode 100644 index db7139c6b4fcd7e83007cdb785520743ddae7066..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/atss.py +++ /dev/null @@ -1,17 +0,0 @@ -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class ATSS(SingleStageDetector): - """Implementation of `ATSS `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py deleted file mode 100644 index 1f21c6578bb8f820448f773fb6651b02e64b6123..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 08a6031f20234b1cc1d792ea5d4891613503a185..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 1a3c43495bbf9d302216d7ddf62df75446907a36..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py deleted file mode 100644 index b6087dcf9f7cc04e12a2b9bcbde7abc4a56e972e..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/spaces/GurudattaBS/GenDiseasePrediction/code/__init__.py b/spaces/GurudattaBS/GenDiseasePrediction/code/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Haleyok/stablelm-tuned-alpha-chat/app.py b/spaces/Haleyok/stablelm-tuned-alpha-chat/app.py deleted file mode 100644 index 939da979406734fcd2060acd9f62f99bd5ead85d..0000000000000000000000000000000000000000 --- a/spaces/Haleyok/stablelm-tuned-alpha-chat/app.py +++ /dev/null @@ -1,111 +0,0 @@ -import gradio as gr -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, StoppingCriteria, StoppingCriteriaList -import time -import numpy as np -from torch.nn import functional as F -import os -# auth_key = os.environ["HF_ACCESS_TOKEN"] -print(f"Starting to load the model to memory") -m = AutoModelForCausalLM.from_pretrained( - "stabilityai/stablelm-tuned-alpha-7b", torch_dtype=torch.float16).cuda() -tok = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-7b") -generator = pipeline('text-generation', model=m, tokenizer=tok, device=0) -print(f"Sucessfully loaded the model to the memory") - -start_message = """<|SYSTEM|># StableAssistant -- StableAssistant is A helpful and harmless Open Source AI Language Model developed by Stability and CarperAI. -- StableAssistant is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user. -- StableAssistant is more than just an information source, StableAssistant is also able to write poetry, short stories, and make jokes. -- StableAssistant will refuse to participate in anything that could harm a human.""" - - -class StopOnTokens(StoppingCriteria): - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: - stop_ids = [50278, 50279, 50277, 1, 0] - for stop_id in stop_ids: - if input_ids[0][-1] == stop_id: - return True - return False - - -def contrastive_generate(text, bad_text): - with torch.no_grad(): - tokens = tok(text, return_tensors="pt")[ - 'input_ids'].cuda()[:, :4096-1024] - bad_tokens = tok(bad_text, return_tensors="pt")[ - 'input_ids'].cuda()[:, :4096-1024] - history = None - bad_history = None - curr_output = list() - for i in range(1024): - out = m(tokens, past_key_values=history, use_cache=True) - logits = out.logits - history = out.past_key_values - bad_out = m(bad_tokens, past_key_values=bad_history, - use_cache=True) - bad_logits = bad_out.logits - bad_history = bad_out.past_key_values - probs = F.softmax(logits.float(), dim=-1)[0][-1].cpu() - bad_probs = F.softmax(bad_logits.float(), dim=-1)[0][-1].cpu() - logits = torch.log(probs) - bad_logits = torch.log(bad_probs) - logits[probs > 0.1] = logits[probs > 0.1] - bad_logits[probs > 0.1] - probs = F.softmax(logits) - out = int(torch.multinomial(probs, 1)) - if out in [50278, 50279, 50277, 1, 0]: - break - else: - curr_output.append(out) - out = np.array([out]) - tokens = torch.from_numpy(np.array([out])).to( - tokens.device) - bad_tokens = torch.from_numpy(np.array([out])).to( - tokens.device) - return tok.decode(curr_output) - - -def generate(text, bad_text=None): - stop = StopOnTokens() - result = generator(text, max_new_tokens=1024, num_return_sequences=1, num_beams=1, do_sample=True, - temperature=1.0, top_p=0.95, top_k=1000, stopping_criteria=StoppingCriteriaList([stop])) - return result[0]["generated_text"].replace(text, "") - - -def user(user_message, history): - history = history + [[user_message, ""]] - return "", history, history - - -def bot(history, curr_system_message): - messages = curr_system_message + \ - "".join(["".join(["<|USER|>"+item[0], "<|ASSISTANT|>"+item[1]]) - for item in history]) - output = generate(messages) - history[-1][1] = output - time.sleep(1) - return history, history - - -with gr.Blocks() as demo: - history = gr.State([]) - gr.Markdown("## StableLM-Tuned-Alpha-7b Chat") - gr.HTML('''
    Duplicate SpaceDuplicate the Space to skip the queue and run in a private space
    ''') - chatbot = gr.Chatbot().style(height=500) - with gr.Row(): - with gr.Column(scale=0.70): - msg = gr.Textbox(label="", placeholder="Chat Message Box") - with gr.Column(scale=0.30, min_width=0): - with gr.Row(): - submit = gr.Button("Submit") - clear = gr.Button("Clear") - system_msg = gr.Textbox( - start_message, label="System Message", interactive=False, visible=False) - - msg.submit(fn=user, inputs=[msg, history], outputs=[msg, chatbot, history], queue=False).then( - fn=bot, inputs=[chatbot, system_msg], outputs=[chatbot, history], queue=True) - submit.click(fn=user, inputs=[msg, history], outputs=[msg, chatbot, history], queue=False).then( - fn=bot, inputs=[chatbot, system_msg], outputs=[chatbot, history], queue=True) - clear.click(lambda: [None, []], None, [chatbot, history], queue=False) -demo.queue(concurrency_count=5) -demo.launch() diff --git a/spaces/HaloMaster/chinesesummary/fengshen/data/megatron_dataloader/blendable_dataset.py b/spaces/HaloMaster/chinesesummary/fengshen/data/megatron_dataloader/blendable_dataset.py deleted file mode 100644 index ee24d4056b86333a13d4926e79283a0bc96bbea3..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/data/megatron_dataloader/blendable_dataset.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Blendable dataset.""" - -import time - -import numpy as np -import torch - -from fengshen.data.megatron_dataloader.utils import print_rank_0 - - -class BlendableDataset(torch.utils.data.Dataset): - - def __init__(self, datasets, weights): - - self.datasets = datasets - num_datasets = len(datasets) - assert num_datasets == len(weights) - - self.size = 0 - for dataset in self.datasets: - self.size += len(dataset) - - # Normalize weights. - weights = np.array(weights, dtype=np.float64) - sum_weights = np.sum(weights) - assert sum_weights > 0.0 - weights /= sum_weights - - # Build indecies. - start_time = time.time() - assert num_datasets < 255 - self.dataset_index = np.zeros(self.size, dtype=np.uint8) - self.dataset_sample_index = np.zeros(self.size, dtype=np.int64) - - from fengshen.data.megatron_dataloader import helpers - helpers.build_blending_indices(self.dataset_index, - self.dataset_sample_index, - weights, num_datasets, self.size, - torch.distributed.get_rank() == 0) - print_rank_0('> elapsed time for building blendable dataset indices: ' - '{:.2f} (sec)'.format(time.time() - start_time)) - - def __len__(self): - return self.size - - def __getitem__(self, idx): - dataset_idx = self.dataset_index[idx] - sample_idx = self.dataset_sample_index[idx] - return self.datasets[dataset_idx][sample_idx] diff --git a/spaces/HarlanHong/DaGAN/modules/util.py b/spaces/HarlanHong/DaGAN/modules/util.py deleted file mode 100644 index 765c4f1568e245a8c43fef7f9e43e588bf2f4e2a..0000000000000000000000000000000000000000 --- a/spaces/HarlanHong/DaGAN/modules/util.py +++ /dev/null @@ -1,399 +0,0 @@ -from torch import nn - -import torch.nn.functional as F -import torch - -from sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d -import pdb -import torch.nn.utils.spectral_norm as spectral_norm -def kp2gaussian(kp, spatial_size, kp_variance): - """ - Transform a keypoint into gaussian like representation - """ - mean = kp['value'] - - coordinate_grid = make_coordinate_grid(spatial_size, mean.type()) - number_of_leading_dimensions = len(mean.shape) - 1 - shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape - coordinate_grid = coordinate_grid.view(*shape) - repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1) - coordinate_grid = coordinate_grid.repeat(*repeats) - - # Preprocess kp shape - shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2) - mean = mean.view(*shape) - - mean_sub = (coordinate_grid - mean) - - out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance) - - return out - - -def make_coordinate_grid(spatial_size, type): - """ - Create a meshgrid [-1,1] x [-1,1] of given spatial_size. - """ - h, w = spatial_size - x = torch.arange(w).type(type) - y = torch.arange(h).type(type) - - x = (2 * (x / (w - 1)) - 1) - y = (2 * (y / (h - 1)) - 1) - - yy = y.view(-1, 1).repeat(1, w) - xx = x.view(1, -1).repeat(h, 1) - - meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2) - - return meshed - - -class ResBlock2d(nn.Module): - """ - Res block, preserve spatial resolution. - """ - - def __init__(self, in_features, kernel_size, padding): - super(ResBlock2d, self).__init__() - self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, - padding=padding) - self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, - padding=padding) - self.norm1 = BatchNorm2d(in_features, affine=True) - self.norm2 = BatchNorm2d(in_features, affine=True) - - def forward(self, x): - out = self.norm1(x) - out = F.relu(out) - out = self.conv1(out) - out = self.norm2(out) - out = F.relu(out) - out = self.conv2(out) - out += x - return out - - -class UpBlock2d(nn.Module): - """ - Upsampling block for use in decoder. - """ - - def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1): - super(UpBlock2d, self).__init__() - - self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, - padding=padding, groups=groups) - self.norm = BatchNorm2d(out_features, affine=True) - - def forward(self, x): - out = F.interpolate(x, scale_factor=2) - out = self.conv(out) - out = self.norm(out) - out = F.relu(out) - return out - - -class DownBlock2d(nn.Module): - """ - Downsampling block for use in encoder. - """ - - def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1): - super(DownBlock2d, self).__init__() - self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, - padding=padding, groups=groups) - self.norm = BatchNorm2d(out_features, affine=True) - self.pool = nn.AvgPool2d(kernel_size=(2, 2)) - - def forward(self, x): - out = self.conv(x) - out = self.norm(out) - out = F.relu(out) - out = self.pool(out) - return out - - -class SameBlock2d(nn.Module): - """ - Simple block, preserve spatial resolution. - """ - - def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1): - super(SameBlock2d, self).__init__() - self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, - kernel_size=kernel_size, padding=padding, groups=groups) - self.norm = BatchNorm2d(out_features, affine=True) - - def forward(self, x): - out = self.conv(x) - out = self.norm(out) - out = F.relu(out) - return out - - -class Encoder(nn.Module): - """ - Hourglass Encoder - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Encoder, self).__init__() - - down_blocks = [] - for i in range(num_blocks): - down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), - min(max_features, block_expansion * (2 ** (i + 1))), - kernel_size=3, padding=1)) - self.down_blocks = nn.ModuleList(down_blocks) - - def forward(self, x): - outs = [x] - for down_block in self.down_blocks: - outs.append(down_block(outs[-1])) - return outs - - -class Decoder(nn.Module): - """ - Hourglass Decoder - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Decoder, self).__init__() - - up_blocks = [] - - for i in range(num_blocks)[::-1]: - in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1))) - out_filters = min(max_features, block_expansion * (2 ** i)) - up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1)) - - self.up_blocks = nn.ModuleList(up_blocks) - self.out_filters = block_expansion + in_features - - def forward(self, x): - out = x.pop() - for up_block in self.up_blocks: - out = up_block(out) - skip = x.pop() - out = torch.cat([out, skip], dim=1) - return out - - -class Decoder_w_emb(nn.Module): - """ - Hourglass Decoder - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Decoder_w_emb, self).__init__() - - up_blocks = [] - - for i in range(num_blocks)[::-1]: - in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1))) - out_filters = min(max_features, block_expansion * (2 ** i)) - up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1)) - - self.up_blocks = nn.ModuleList(up_blocks) - self.out_filters = block_expansion + in_features - - def forward(self, x): - feats = [] - out = x.pop() - feats.append(out) - for ind,up_block in enumerate(self.up_blocks): - out = up_block(out) - skip = x.pop() - feats.append(skip) - out = torch.cat([out, skip], dim=1) - return out,feats - -class Decoder_2branch(nn.Module): - """ - Hourglass Decoder - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Decoder_2branch, self).__init__() - up_blocks = [] - for i in range(num_blocks)[::-1]: - in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1))) - out_filters = min(max_features, block_expansion * (2 ** i)) - up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1)) - - self.up_blocks = nn.ModuleList(up_blocks) - self.out_filters = block_expansion + in_features - - def forward(self, x): - # out = x.pop() - num_feat = len(x) - out=x[-1] - for i in range(len(self.up_blocks)): - out = self.up_blocks[i](out) - skip = x[-(i+1+1)] - out = torch.cat([out, skip], dim=1) - return out - - - -class Hourglass(nn.Module): - """ - Hourglass architecture. - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Hourglass, self).__init__() - self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features) - self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features) - self.out_filters = self.decoder.out_filters - def forward(self, x): - return self.decoder(self.encoder(x)) - -class Hourglass_2branch(nn.Module): - """ - Hourglass architecture. - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Hourglass_2branch, self).__init__() - self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features) - self.decoder_kp = Decoder_2branch(block_expansion, in_features, num_blocks, max_features) - self.decoder_mask = Decoder_2branch(block_expansion, in_features, num_blocks, max_features) - - self.out_filters = self.decoder_kp.out_filters - def forward(self, x): - embd= self.encoder(x) - kp_feat = self.decoder_kp(embd) - mask_feat = self.decoder_mask(embd) - return kp_feat,mask_feat - - -class Hourglass_w_emb(nn.Module): - """ - Hourglass architecture. - """ - - def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): - super(Hourglass_w_emb, self).__init__() - self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features) - self.decoder = Decoder_w_emb(block_expansion, in_features, num_blocks, max_features) - self.out_filters = self.decoder.out_filters - - def forward(self, x): - embs = self.encoder(x) - result,feats = self.decoder(embs) - return feats,result -class AntiAliasInterpolation2d(nn.Module): - """ - Band-limited downsampling, for better preservation of the input signal. - """ - def __init__(self, channels, scale): - super(AntiAliasInterpolation2d, self).__init__() - sigma = (1 / scale - 1) / 2 - kernel_size = 2 * round(sigma * 4) + 1 - self.ka = kernel_size // 2 - self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka - - kernel_size = [kernel_size, kernel_size] - sigma = [sigma, sigma] - # The gaussian kernel is the product of the - # gaussian function of each dimension. - kernel = 1 - meshgrids = torch.meshgrid( - [ - torch.arange(size, dtype=torch.float32) - for size in kernel_size - ] - ) - for size, std, mgrid in zip(kernel_size, sigma, meshgrids): - mean = (size - 1) / 2 - kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2)) - - # Make sure sum of values in gaussian kernel equals 1. - kernel = kernel / torch.sum(kernel) - # Reshape to depthwise convolutional weight - kernel = kernel.view(1, 1, *kernel.size()) - kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) - - self.register_buffer('weight', kernel) - self.groups = channels - self.scale = scale - inv_scale = 1 / scale - self.int_inv_scale = int(inv_scale) - - def forward(self, input): - if self.scale == 1.0: - return input - - out = F.pad(input, (self.ka, self.kb, self.ka, self.kb)) - out = F.conv2d(out, weight=self.weight, groups=self.groups) - out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale] - - return out - - -class SPADE(nn.Module): - def __init__(self, norm_nc, label_nc): - super().__init__() - - self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) - nhidden = 128 - - self.mlp_shared = nn.Sequential( - nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1), - nn.ReLU()) - self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1) - self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1) - - def forward(self, x, segmap): - normalized = self.param_free_norm(x) - segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest') - actv = self.mlp_shared(segmap) - gamma = self.mlp_gamma(actv) - beta = self.mlp_beta(actv) - out = normalized * (1 + gamma) + beta - return out - - -class SPADEResnetBlock(nn.Module): - def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1): - super().__init__() - # Attributes - self.learned_shortcut = (fin != fout) - fmiddle = min(fin, fout) - self.use_se = use_se - # create conv layers - self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation) - self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation) - if self.learned_shortcut: - self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) - # apply spectral norm if specified - if 'spectral' in norm_G: - self.conv_0 = spectral_norm(self.conv_0) - self.conv_1 = spectral_norm(self.conv_1) - if self.learned_shortcut: - self.conv_s = spectral_norm(self.conv_s) - # define normalization layers - self.norm_0 = SPADE(fin, label_nc) - self.norm_1 = SPADE(fmiddle, label_nc) - if self.learned_shortcut: - self.norm_s = SPADE(fin, label_nc) - - def forward(self, x, seg1): - x_s = self.shortcut(x, seg1) - dx = self.conv_0(self.actvn(self.norm_0(x, seg1))) - dx = self.conv_1(self.actvn(self.norm_1(dx, seg1))) - out = x_s + dx - return out - - def shortcut(self, x, seg1): - if self.learned_shortcut: - x_s = self.conv_s(self.norm_s(x, seg1)) - else: - x_s = x - return x_s - - def actvn(self, x): - return F.leaky_relu(x, 2e-1) \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/utils/wer_utils.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/utils/wer_utils.py deleted file mode 100644 index cf6f3d09ba41a46ad4d7968fb3c286dd53d15c38..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/utils/wer_utils.py +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import re -from collections import deque -from enum import Enum - -import numpy as np - - -""" - Utility modules for computation of Word Error Rate, - Alignments, as well as more granular metrics like - deletion, insersion and substitutions. -""" - - -class Code(Enum): - match = 1 - substitution = 2 - insertion = 3 - deletion = 4 - - -class Token(object): - def __init__(self, lbl="", st=np.nan, en=np.nan): - if np.isnan(st): - self.label, self.start, self.end = "", 0.0, 0.0 - else: - self.label, self.start, self.end = lbl, st, en - - -class AlignmentResult(object): - def __init__(self, refs, hyps, codes, score): - self.refs = refs # std::deque - self.hyps = hyps # std::deque - self.codes = codes # std::deque - self.score = score # float - - -def coordinate_to_offset(row, col, ncols): - return int(row * ncols + col) - - -def offset_to_row(offset, ncols): - return int(offset / ncols) - - -def offset_to_col(offset, ncols): - return int(offset % ncols) - - -def trimWhitespace(str): - return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str))) - - -def str2toks(str): - pieces = trimWhitespace(str).split(" ") - toks = [] - for p in pieces: - toks.append(Token(p, 0.0, 0.0)) - return toks - - -class EditDistance(object): - def __init__(self, time_mediated): - self.time_mediated_ = time_mediated - self.scores_ = np.nan # Eigen::Matrix - self.backtraces_ = ( - np.nan - ) # Eigen::Matrix backtraces_; - self.confusion_pairs_ = {} - - def cost(self, ref, hyp, code): - if self.time_mediated_: - if code == Code.match: - return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) - elif code == Code.insertion: - return hyp.end - hyp.start - elif code == Code.deletion: - return ref.end - ref.start - else: # substitution - return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1 - else: - if code == Code.match: - return 0 - elif code == Code.insertion or code == Code.deletion: - return 3 - else: # substitution - return 4 - - def get_result(self, refs, hyps): - res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan) - - num_rows, num_cols = self.scores_.shape - res.score = self.scores_[num_rows - 1, num_cols - 1] - - curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols) - - while curr_offset != 0: - curr_row = offset_to_row(curr_offset, num_cols) - curr_col = offset_to_col(curr_offset, num_cols) - - prev_offset = self.backtraces_[curr_row, curr_col] - - prev_row = offset_to_row(prev_offset, num_cols) - prev_col = offset_to_col(prev_offset, num_cols) - - res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++ - res.hyps.appendleft(curr_col - 1) - if curr_row - 1 == prev_row and curr_col == prev_col: - res.codes.appendleft(Code.deletion) - elif curr_row == prev_row and curr_col - 1 == prev_col: - res.codes.appendleft(Code.insertion) - else: - # assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col) - ref_str = refs[res.refs[0]].label - hyp_str = hyps[res.hyps[0]].label - - if ref_str == hyp_str: - res.codes.appendleft(Code.match) - else: - res.codes.appendleft(Code.substitution) - - confusion_pair = "%s -> %s" % (ref_str, hyp_str) - if confusion_pair not in self.confusion_pairs_: - self.confusion_pairs_[confusion_pair] = 1 - else: - self.confusion_pairs_[confusion_pair] += 1 - - curr_offset = prev_offset - - return res - - def align(self, refs, hyps): - if len(refs) == 0 and len(hyps) == 0: - return np.nan - - # NOTE: we're not resetting the values in these matrices because every value - # will be overridden in the loop below. If this assumption doesn't hold, - # be sure to set all entries in self.scores_ and self.backtraces_ to 0. - self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1)) - self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1)) - - num_rows, num_cols = self.scores_.shape - - for i in range(num_rows): - for j in range(num_cols): - if i == 0 and j == 0: - self.scores_[i, j] = 0.0 - self.backtraces_[i, j] = 0 - continue - - if i == 0: - self.scores_[i, j] = self.scores_[i, j - 1] + self.cost( - None, hyps[j - 1], Code.insertion - ) - self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols) - continue - - if j == 0: - self.scores_[i, j] = self.scores_[i - 1, j] + self.cost( - refs[i - 1], None, Code.deletion - ) - self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols) - continue - - # Below here both i and j are greater than 0 - ref = refs[i - 1] - hyp = hyps[j - 1] - best_score = self.scores_[i - 1, j - 1] + ( - self.cost(ref, hyp, Code.match) - if (ref.label == hyp.label) - else self.cost(ref, hyp, Code.substitution) - ) - - prev_row = i - 1 - prev_col = j - 1 - ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion) - if ins < best_score: - best_score = ins - prev_row = i - prev_col = j - 1 - - delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion) - if delt < best_score: - best_score = delt - prev_row = i - 1 - prev_col = j - - self.scores_[i, j] = best_score - self.backtraces_[i, j] = coordinate_to_offset( - prev_row, prev_col, num_cols - ) - - return self.get_result(refs, hyps) - - -class WERTransformer(object): - def __init__(self, hyp_str, ref_str, verbose=True): - self.ed_ = EditDistance(False) - self.id2oracle_errs_ = {} - self.utts_ = 0 - self.words_ = 0 - self.insertions_ = 0 - self.deletions_ = 0 - self.substitutions_ = 0 - - self.process(["dummy_str", hyp_str, ref_str]) - - if verbose: - print("'%s' vs '%s'" % (hyp_str, ref_str)) - self.report_result() - - def process(self, input): # std::vector&& input - if len(input) < 3: - print( - "Input must be of the form ... , got ", - len(input), - " inputs:", - ) - return None - - # Align - # std::vector hyps; - # std::vector refs; - - hyps = str2toks(input[-2]) - refs = str2toks(input[-1]) - - alignment = self.ed_.align(refs, hyps) - if alignment is None: - print("Alignment is null") - return np.nan - - # Tally errors - ins = 0 - dels = 0 - subs = 0 - for code in alignment.codes: - if code == Code.substitution: - subs += 1 - elif code == Code.insertion: - ins += 1 - elif code == Code.deletion: - dels += 1 - - # Output - row = input - row.append(str(len(refs))) - row.append(str(ins)) - row.append(str(dels)) - row.append(str(subs)) - # print(row) - - # Accumulate - kIdIndex = 0 - kNBestSep = "/" - - pieces = input[kIdIndex].split(kNBestSep) - - if len(pieces) == 0: - print( - "Error splitting ", - input[kIdIndex], - " on '", - kNBestSep, - "', got empty list", - ) - return np.nan - - id = pieces[0] - if id not in self.id2oracle_errs_: - self.utts_ += 1 - self.words_ += len(refs) - self.insertions_ += ins - self.deletions_ += dels - self.substitutions_ += subs - self.id2oracle_errs_[id] = [ins, dels, subs] - else: - curr_err = ins + dels + subs - prev_err = np.sum(self.id2oracle_errs_[id]) - if curr_err < prev_err: - self.id2oracle_errs_[id] = [ins, dels, subs] - - return 0 - - def report_result(self): - # print("---------- Summary ---------------") - if self.words_ == 0: - print("No words counted") - return - - # 1-best - best_wer = ( - 100.0 - * (self.insertions_ + self.deletions_ + self.substitutions_) - / self.words_ - ) - - print( - "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " - "%0.2f%% dels, %0.2f%% subs)" - % ( - best_wer, - self.utts_, - self.words_, - 100.0 * self.insertions_ / self.words_, - 100.0 * self.deletions_ / self.words_, - 100.0 * self.substitutions_ / self.words_, - ) - ) - - def wer(self): - if self.words_ == 0: - wer = np.nan - else: - wer = ( - 100.0 - * (self.insertions_ + self.deletions_ + self.substitutions_) - / self.words_ - ) - return wer - - def stats(self): - if self.words_ == 0: - stats = {} - else: - wer = ( - 100.0 - * (self.insertions_ + self.deletions_ + self.substitutions_) - / self.words_ - ) - stats = dict( - { - "wer": wer, - "utts": self.utts_, - "numwords": self.words_, - "ins": self.insertions_, - "dels": self.deletions_, - "subs": self.substitutions_, - "confusion_pairs": self.ed_.confusion_pairs_, - } - ) - return stats - - -def calc_wer(hyp_str, ref_str): - t = WERTransformer(hyp_str, ref_str, verbose=0) - return t.wer() - - -def calc_wer_stats(hyp_str, ref_str): - t = WERTransformer(hyp_str, ref_str, verbose=0) - return t.stats() - - -def get_wer_alignment_codes(hyp_str, ref_str): - """ - INPUT: hypothesis string, reference string - OUTPUT: List of alignment codes (intermediate results from WER computation) - """ - t = WERTransformer(hyp_str, ref_str, verbose=0) - return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes - - -def merge_counts(x, y): - # Merge two hashes which have 'counts' as their values - # This can be used for example to merge confusion pair counts - # conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs']) - for k, v in y.items(): - if k not in x: - x[k] = 0 - x[k] += v - return x diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/translation/prepare-iwslt17-multilingual.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/translation/prepare-iwslt17-multilingual.sh deleted file mode 100644 index 23be87555322bc03b13e9d95951d88b1a442f97a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/translation/prepare-iwslt17-multilingual.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -SRCS=( - "de" - "fr" -) -TGT=en - -ROOT=$(dirname "$0") -SCRIPTS=$ROOT/../../scripts -SPM_TRAIN=$SCRIPTS/spm_train.py -SPM_ENCODE=$SCRIPTS/spm_encode.py - -BPESIZE=16384 -ORIG=$ROOT/iwslt17_orig -DATA=$ROOT/iwslt17.de_fr.en.bpe16k -mkdir -p "$ORIG" "$DATA" - -TRAIN_MINLEN=1 # remove sentences with <1 BPE token -TRAIN_MAXLEN=250 # remove sentences with >250 BPE tokens - -URLS=( - "https://wit3.fbk.eu/archive/2017-01-trnted/texts/de/en/de-en.tgz" - "https://wit3.fbk.eu/archive/2017-01-trnted/texts/fr/en/fr-en.tgz" -) -ARCHIVES=( - "de-en.tgz" - "fr-en.tgz" -) -VALID_SETS=( - "IWSLT17.TED.dev2010.de-en IWSLT17.TED.tst2010.de-en IWSLT17.TED.tst2011.de-en IWSLT17.TED.tst2012.de-en IWSLT17.TED.tst2013.de-en IWSLT17.TED.tst2014.de-en IWSLT17.TED.tst2015.de-en" - "IWSLT17.TED.dev2010.fr-en IWSLT17.TED.tst2010.fr-en IWSLT17.TED.tst2011.fr-en IWSLT17.TED.tst2012.fr-en IWSLT17.TED.tst2013.fr-en IWSLT17.TED.tst2014.fr-en IWSLT17.TED.tst2015.fr-en" -) - -# download and extract data -for ((i=0;i<${#URLS[@]};++i)); do - ARCHIVE=$ORIG/${ARCHIVES[i]} - if [ -f "$ARCHIVE" ]; then - echo "$ARCHIVE already exists, skipping download" - else - URL=${URLS[i]} - wget -P "$ORIG" "$URL" - if [ -f "$ARCHIVE" ]; then - echo "$URL successfully downloaded." - else - echo "$URL not successfully downloaded." - exit 1 - fi - fi - FILE=${ARCHIVE: -4} - if [ -e "$FILE" ]; then - echo "$FILE already exists, skipping extraction" - else - tar -C "$ORIG" -xzvf "$ARCHIVE" - fi -done - -echo "pre-processing train data..." -for SRC in "${SRCS[@]}"; do - for LANG in "${SRC}" "${TGT}"; do - cat "$ORIG/${SRC}-${TGT}/train.tags.${SRC}-${TGT}.${LANG}" \ - | grep -v '' \ - | grep -v '' \ - | grep -v '' \ - | grep -v '' \ - | grep -v '' \ - | sed -e 's///g' \ - | sed -e 's/<\/title>//g' \ - | sed -e 's/<description>//g' \ - | sed -e 's/<\/description>//g' \ - | sed 's/^\s*//g' \ - | sed 's/\s*$//g' \ - > "$DATA/train.${SRC}-${TGT}.${LANG}" - done -done - -echo "pre-processing valid data..." -for ((i=0;i<${#SRCS[@]};++i)); do - SRC=${SRCS[i]} - VALID_SET=(${VALID_SETS[i]}) - for ((j=0;j<${#VALID_SET[@]};++j)); do - FILE=${VALID_SET[j]} - for LANG in "$SRC" "$TGT"; do - grep '<seg id' "$ORIG/${SRC}-${TGT}/${FILE}.${LANG}.xml" \ - | sed -e 's/<seg id="[0-9]*">\s*//g' \ - | sed -e 's/\s*<\/seg>\s*//g' \ - | sed -e "s/\’/\'/g" \ - > "$DATA/valid${j}.${SRC}-${TGT}.${LANG}" - done - done -done - -# learn BPE with sentencepiece -TRAIN_FILES=$(for SRC in "${SRCS[@]}"; do echo $DATA/train.${SRC}-${TGT}.${SRC}; echo $DATA/train.${SRC}-${TGT}.${TGT}; done | tr "\n" ",") -echo "learning joint BPE over ${TRAIN_FILES}..." -python "$SPM_TRAIN" \ - --input=$TRAIN_FILES \ - --model_prefix=$DATA/sentencepiece.bpe \ - --vocab_size=$BPESIZE \ - --character_coverage=1.0 \ - --model_type=bpe - -# encode train/valid -echo "encoding train with learned BPE..." -for SRC in "${SRCS[@]}"; do - python "$SPM_ENCODE" \ - --model "$DATA/sentencepiece.bpe.model" \ - --output_format=piece \ - --inputs $DATA/train.${SRC}-${TGT}.${SRC} $DATA/train.${SRC}-${TGT}.${TGT} \ - --outputs $DATA/train.bpe.${SRC}-${TGT}.${SRC} $DATA/train.bpe.${SRC}-${TGT}.${TGT} \ - --min-len $TRAIN_MINLEN --max-len $TRAIN_MAXLEN -done - -echo "encoding valid with learned BPE..." -for ((i=0;i<${#SRCS[@]};++i)); do - SRC=${SRCS[i]} - VALID_SET=(${VALID_SETS[i]}) - for ((j=0;j<${#VALID_SET[@]};++j)); do - python "$SPM_ENCODE" \ - --model "$DATA/sentencepiece.bpe.model" \ - --output_format=piece \ - --inputs $DATA/valid${j}.${SRC}-${TGT}.${SRC} $DATA/valid${j}.${SRC}-${TGT}.${TGT} \ - --outputs $DATA/valid${j}.bpe.${SRC}-${TGT}.${SRC} $DATA/valid${j}.bpe.${SRC}-${TGT}.${TGT} - done -done diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py deleted file mode 100644 index eb81ded341257ba0a43c4d0867e8f3c83f276bc7..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py +++ /dev/null @@ -1,600 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from collections import namedtuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import options, utils -from fairseq.modules import ( - AdaptiveSoftmax, - LayerNorm, - MultiheadAttention, - PositionalEmbedding, -) - - -EncoderOut = namedtuple( - "TransformerEncoderOut", - [ - "encoder_out", # T x B x C - "encoder_padding_mask", # B x T - "encoder_embedding", # B x T x C - "encoder_states", # List[T x B x C] - ], -) - - -class TransformerEncoderEmbedding(nn.Module): - """ Encoder Embedding + Positional Embedding """ - - def __init__(self, args, embed_tokens): - super().__init__() - self.dropout = args.dropout - self.max_source_positions = args.max_source_positions - self.embed_tokens = embed_tokens - if isinstance(embed_tokens, nn.ModuleList): - self.padding_idx = embed_tokens[0].padding_idx - embed_dim = sum(e.embedding_dim for e in embed_tokens) - else: - self.padding_idx = embed_tokens.padding_idx - embed_dim = embed_tokens.embedding_dim - self.embed_scale = math.sqrt(embed_dim) - self.embed_positions = ( - PositionalEmbedding( - args.max_source_positions, - embed_dim, - self.padding_idx, - learned=args.encoder_learned_pos, - ) - if not args.no_token_positional_embeddings - else None - ) - if getattr(args, "layernorm_embedding", False): - self.layernorm_embedding = LayerNorm(embed_dim) - else: - self.layernorm_embedding = None - - def forward(self, input): - # embed tokens and positions - src_tokens = input[0] - prev_output_tokens = input[2] - if isinstance(self.embed_tokens, nn.ModuleList): - x_embed_list = [] - for embed_tokens_part in self.embed_tokens: - x_embed_list.append(embed_tokens_part(src_tokens)) - - embedded = torch.cat(x_embed_list, dim=-1) - else: - embedded = self.embed_tokens(src_tokens) - x = embed = self.embed_scale * embedded - if self.embed_positions is not None: - x = embed + self.embed_positions(src_tokens) - if self.layernorm_embedding: - x = self.layernorm_embedding(x) - x = F.dropout(x, p=self.dropout, training=self.training) - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # compute padding mask - encoder_padding_mask = src_tokens.eq(self.padding_idx) - return (x, encoder_padding_mask, prev_output_tokens) - - -class TransformerEncoderLayerNorm(nn.Module): - """ - Layer norm at the the end of all encoder layers if - args.encoder_enormalize_before = True - """ - - def __init__(self, args, embed_dim): - super().__init__() - if args.encoder_normalize_before: - self.layer_norm = LayerNorm(embed_dim) - else: - self.layer_norm = None - - def forward(self, input): - x = input[0] - encoder_padding_mask = input[1] - prev_output_tokens = input[2] - if self.layer_norm: - x = self.layer_norm(x) - # keeping track of the incremental_state is not supported yet - return (x, encoder_padding_mask, prev_output_tokens) - - -class TransformerDecoderEmbedding(nn.Module): - """ Decoder Embedding + Positional Embedding """ - - def __init__(self, args, embed_tokens): - super().__init__() - self.dropout = args.dropout - self.share_input_output_embed = args.share_decoder_input_output_embed - input_embed_dim = ( - sum(e.embedding_dim for e in embed_tokens) - if isinstance(embed_tokens, nn.ModuleList) - else embed_tokens.embedding_dim - ) - embed_dim = args.decoder_embed_dim - self.output_embed_dim = args.decoder_output_dim - - padding_idx = ( - embed_tokens[0].padding_idx - if isinstance(embed_tokens, nn.ModuleList) - else embed_tokens.padding_idx - ) - self.max_target_positions = args.max_target_positions - - self.embed_tokens = embed_tokens - self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim - - self.project_in_dim = ( - Linear(input_embed_dim, embed_dim, bias=False) - if embed_dim != input_embed_dim - else None - ) - - self.embed_positions = ( - PositionalEmbedding( - args.max_target_positions, - embed_dim, - padding_idx, - learned=args.decoder_learned_pos, - ) - if not args.no_token_positional_embeddings - else None - ) - - def forward(self, input): - mt_task = False - if isinstance(input, tuple): - if len(input) == 3: - encoder_out = input[0] - encoder_padding_mask = input[1] - prev_output_tokens = input[2] - incremental_state = None # Hardcoding to avoid passing of None objects - mt_task = True - else: - # HACK for now, need to fix (TODO sidgoyal) - prev_output_tokens = input[0] - # discard "src_lengths" - encoder_out = None - encoder_padding_mask = None - incremental_state = None - - else: - prev_output_tokens = input - encoder_out = None - encoder_padding_mask = None - incremental_state = None - - positions = ( - self.embed_positions( - prev_output_tokens, - incremental_state=incremental_state, - ) - if self.embed_positions is not None - else None - ) - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - if positions is not None: - positions = positions[:, -1:] - - # embed tokens and positions - - if isinstance(self.embed_tokens, nn.ModuleList): - x_embed_list = [] - for embed_tokens_part in self.embed_tokens: - x_embed_list.append(embed_tokens_part(prev_output_tokens)) - - x = self.embed_scale * torch.cat(x_embed_list, dim=-1) - else: - x = self.embed_scale * self.embed_tokens(prev_output_tokens) - - if self.project_in_dim is not None: - x = self.project_in_dim(x) - - if positions is not None: - x += positions - x = F.dropout(x, p=self.dropout, training=self.training) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - if mt_task: - return (x, encoder_out, encoder_padding_mask) - return x - - -class TransformerDecoderOutputLayer(nn.Module): - def __init__(self, args, embed_tokens, dictionary): - super().__init__() - self.share_input_output_embed = args.share_decoder_input_output_embed - self.embed_tokens = embed_tokens - self.output_embed_dim = args.decoder_output_dim - embed_dim = args.decoder_embed_dim - - self.project_out_dim = ( - Linear(embed_dim, self.output_embed_dim, bias=False) - if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights - else None - ) - self.adaptive_softmax = None - if args.adaptive_softmax_cutoff is not None: - assert not isinstance(embed_tokens, nn.ModuleList) - self.adaptive_softmax = AdaptiveSoftmax( - len(dictionary), - self.output_embed_dim, - options.eval_str_list(args.adaptive_softmax_cutoff, type=int), - dropout=args.adaptive_softmax_dropout, - adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, - factor=args.adaptive_softmax_factor, - tie_proj=args.tie_adaptive_proj, - ) - elif not self.share_input_output_embed: - self.embed_tokens = nn.Parameter( - torch.Tensor(len(dictionary), self.output_embed_dim) - ) - nn.init.normal_( - self.embed_tokens, mean=0, std=self.output_embed_dim ** -0.5 - ) - - if args.decoder_normalize_before and not getattr( - args, "no_decoder_final_norm", False - ): - self.layer_norm = LayerNorm(embed_dim) - else: - self.layer_norm = None - - def forward(self, input, apply_final_proj=True): - if isinstance(input, tuple): - x = input[0] - else: - x = input - - if self.layer_norm: - x = self.layer_norm(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - if self.project_out_dim is not None: - x = self.project_out_dim(x) - if apply_final_proj: - x = self.output_layer(x) - return x - - def output_layer(self, features, **kwargs): - """Project features to the vocabulary size.""" - if self.adaptive_softmax is None: - # project back to size of vocabulary - if self.share_input_output_embed: - if isinstance(self.embed_tokens, nn.ModuleList): - output = None - for i, emb in enumerate(self.embed_tokens): - sidx = i * emb.embedding_dim - eidx = (i + 1) * emb.embedding_dim - if output is None: - output = F.linear(features[:, :, sidx:eidx], emb.weight) - else: - output += F.linear(features[:, :, sidx:eidx], emb.weight) - - return output - else: - return F.linear(features, self.embed_tokens.weight) - else: - return F.linear(features, self.embed_tokens) - else: - return features - - -class TransformerEncoderLayer(nn.Module): - """Encoder layer block. - In the original paper each operation (multi-head attention or FFN) is - postprocessed with: `dropout -> add residual -> layernorm`. In the - tensor2tensor code they suggest that learning is more robust when - preprocessing each layer with layernorm and postprocessing with: - `dropout -> add residual`. We default to the approach in the paper, but the - tensor2tensor approach can be enabled by setting - *args.encoder_normalize_before* to ``True``. - - Args: - args (argparse.Namespace): parsed command-line arguments - """ - - def __init__(self, args): - super().__init__() - self.embed_dim = args.encoder_embed_dim - self.self_attn = MultiheadAttention( - self.embed_dim, - args.encoder_attention_heads, - dropout=args.attention_dropout, - self_attention=True, - ) - self.self_attn_layer_norm = LayerNorm(self.embed_dim) - self.dropout = args.dropout - self.activation_fn = utils.get_activation_fn( - activation=getattr(args, "activation_fn", "relu") - ) - self.activation_dropout = getattr(args, "activation_dropout", 0) - if self.activation_dropout == 0: - # for backwards compatibility with models that use args.relu_dropout - self.activation_dropout = getattr(args, "relu_dropout", 0) - self.normalize_before = args.encoder_normalize_before - self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) - self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) - self.final_layer_norm = LayerNorm(self.embed_dim) - - def upgrade_state_dict_named(self, state_dict, name): - """ - Rename layer norm states from `...layer_norms.0.weight` to - `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to - `...final_layer_norm.weight` - """ - layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} - for old, new in layer_norm_map.items(): - for m in ("weight", "bias"): - k = "{}.layer_norms.{}.{}".format(name, old, m) - if k in state_dict: - state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] - del state_dict[k] - - def forward(self, input): - """ - Args: - input (Tuple): - input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` - input[1] (ByteTensor/FloatTensor): encoder padding mask - - binary ByteTensor of shape `(batch, src_len)` where padding elements - are indicated by ``1``. - input[2] (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for teacher forcing) - Returns: - output (Tuple): - output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)` - output[1] (ByteTensor/FloatTensor): encoder padding mask - output[2] (LongTensor): previous decoder outputs - """ - x = input[0] - encoder_padding_mask = input[1] - prev_output_tokens = input[2] - residual = x - x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) - x, _ = self.self_attn( - query=x, key=x, value=x, key_padding_mask=encoder_padding_mask - ) - x = F.dropout(x, p=self.dropout, training=self.training) - x = residual + x - x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) - - residual = x - x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) - x = self.activation_fn(self.fc1(x)) - x = F.dropout(x, p=self.activation_dropout, training=self.training) - x = self.fc2(x) - x = F.dropout(x, p=self.dropout, training=self.training) - x = residual + x - x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) - return (x, encoder_padding_mask, prev_output_tokens) - - def maybe_layer_norm(self, layer_norm, x, before=False, after=False): - assert before ^ after - if after ^ self.normalize_before: - return layer_norm(x) - else: - return x - - -class TransformerDecoderLayer(nn.Module): - """Decoder layer block. - - In the original paper each operation (multi-head attention, encoder - attention or FFN) is postprocessed with: `dropout -> add residual -> - layernorm`. In the tensor2tensor code they suggest that learning is more - robust when preprocessing each layer with layernorm and postprocessing with: - `dropout -> add residual`. We default to the approach in the paper, but the - tensor2tensor approach can be enabled by setting - *args.decoder_normalize_before* to ``True``. - - Args: - args (argparse.Namespace): parsed command-line arguments - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - """ - - def __init__( - self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False - ): - super().__init__() - self.embed_dim = args.decoder_embed_dim - self.self_attn = MultiheadAttention( - embed_dim=self.embed_dim, - num_heads=args.decoder_attention_heads, - dropout=args.attention_dropout, - add_bias_kv=add_bias_kv, - add_zero_attn=add_zero_attn, - self_attention=True, - ) - self.dropout = args.dropout - self.activation_fn = utils.get_activation_fn( - activation=getattr(args, "activation_fn", "relu") - ) - self.activation_dropout = getattr(args, "activation_dropout", 0) - if self.activation_dropout == 0: - # for backwards compatibility with models that use args.relu_dropout - self.activation_dropout = getattr(args, "relu_dropout", 0) - self.normalize_before = args.decoder_normalize_before - - # use layerNorm rather than FusedLayerNorm for exporting. - # char_inputs can be used to determint this. - # TODO remove this once we update apex with the fix - export = getattr(args, "char_inputs", False) - self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) - - if no_encoder_attn: - self.encoder_attn = None - self.encoder_attn_layer_norm = None - else: - self.encoder_attn = MultiheadAttention( - self.embed_dim, - args.decoder_attention_heads, - kdim=getattr(args, "encoder_embed_dim", None), - vdim=getattr(args, "encoder_embed_dim", None), - dropout=args.attention_dropout, - encoder_decoder_attention=True, - ) - self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) - - self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) - self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) - - self.final_layer_norm = LayerNorm(self.embed_dim, export=export) - self.need_attn = True - - self.onnx_trace = False - - def prepare_for_onnx_export_(self): - self.onnx_trace = True - - def forward(self, input): - """ - Args: - input (Tuple): - input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` - input[1] (Tensor): encoder output of shape `(batch, src_len, embed_dim)` - input[2] (ByteTensor/FloatTensor): encoder padding mask - - binary ByteTensor of shape `(batch, src_len)` where padding elements - are indicated by ``1``. - Returns: - output (Tuple): - output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)` - output[1] (ByteTensor/FloatTensor): encoder padding mask - output[2] (LongTensor): previous decoder outputs - """ - # Note: incremental state is not yet supported - mt_task = False - if isinstance(input, tuple): - x = input[0] - encoder_out = input[1] - encoder_padding_mask = input[2] - incremental_state = None - mt_task = True - else: - x = input - encoder_out = None - encoder_padding_mask = None - incremental_state = None - - if incremental_state is None: - self_attn_mask = self.buffered_future_mask(x) - else: - self_attn_mask = None - - # TODO: add back prev_self_attn_state, prev_attn_state, - # self_attn_padding_mask - prev_self_attn_state = None - prev_attn_state = None - self_attn_padding_mask = None - - residual = x - x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) - if prev_self_attn_state is not None: - if incremental_state is None: - incremental_state = {} - prev_key, prev_value = prev_self_attn_state - saved_state = {"prev_key": prev_key, "prev_value": prev_value} - self.self_attn._set_input_buffer(incremental_state, saved_state) - x, attn = self.self_attn( - query=x, - key=x, - value=x, - key_padding_mask=self_attn_padding_mask, - incremental_state=incremental_state, - need_weights=False, - attn_mask=self_attn_mask, - ) - x = F.dropout(x, p=self.dropout, training=self.training) - x = residual + x - x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) - - if self.encoder_attn is not None: - residual = x - x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) - if prev_attn_state is not None: - if incremental_state is None: - incremental_state = {} - prev_key, prev_value = prev_attn_state - saved_state = {"prev_key": prev_key, "prev_value": prev_value} - self.encoder_attn._set_input_buffer(incremental_state, saved_state) - x, attn = self.encoder_attn( - query=x, - key=encoder_out, - value=encoder_out, - key_padding_mask=encoder_padding_mask, - incremental_state=incremental_state, - static_kv=True, - need_weights=(not self.training and self.need_attn), - ) - x = F.dropout(x, p=self.dropout, training=self.training) - x = residual + x - x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) - - residual = x - x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) - x = self.activation_fn(self.fc1(x)) - x = F.dropout(x, p=self.activation_dropout, training=self.training) - x = self.fc2(x) - x = F.dropout(x, p=self.dropout, training=self.training) - x = residual + x - x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) - - if mt_task: - return (x, encoder_out, encoder_padding_mask) - return x - - def buffered_future_mask(self, tensor): - dim = tensor.size(0) - if ( - not hasattr(self, "_future_mask") - or self._future_mask is None - or self._future_mask.device != tensor.device - ): - self._future_mask = torch.triu( - utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 - ) - if self._future_mask.size(0) < dim: - self._future_mask = torch.triu( - utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 - ) - return self._future_mask[:dim, :dim] - - def maybe_layer_norm(self, layer_norm, x, before=False, after=False): - assert before ^ after - if after ^ self.normalize_before: - return layer_norm(x) - else: - return x - - def make_generation_fast_(self, need_attn=False, **kwargs): - self.need_attn = need_attn - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def Linear(in_features, out_features, bias=True): - m = nn.Linear(in_features, out_features, bias) - nn.init.xavier_uniform_(m.weight) - if bias: - nn.init.constant_(m.bias, 0.0) - return m diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/speech_to_text/convtransformer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/speech_to_text/convtransformer.py deleted file mode 100644 index eba000d7b0826d2ecf5dc471156f8f8cc9f5e402..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/speech_to_text/convtransformer.py +++ /dev/null @@ -1,448 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import math -from typing import Dict, List, Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import checkpoint_utils, utils -from fairseq.data.data_utils import lengths_to_padding_mask -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - register_model, - register_model_architecture, -) -from fairseq.models.transformer import Embedding, TransformerDecoder -from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer -from torch import Tensor - -logger = logging.getLogger(__name__) - - -@register_model("convtransformer") -class ConvTransformerModel(FairseqEncoderDecoderModel): - """ - Transformer-based Speech translation model from ESPNet-ST - https://arxiv.org/abs/2004.10234 - """ - - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--input-feat-per-channel", - type=int, - metavar="N", - help="encoder input dimension per input channel", - ) - parser.add_argument( - "--activation-fn", - choices=utils.get_available_activation_fns(), - help="activation function to use", - ) - parser.add_argument( - "--dropout", type=float, metavar="D", help="dropout probability" - ) - parser.add_argument( - "--attention-dropout", - type=float, - metavar="D", - help="dropout probability for attention weights", - ) - parser.add_argument( - "--activation-dropout", - "--relu-dropout", - type=float, - metavar="D", - help="dropout probability after activation in FFN.", - ) - parser.add_argument( - "--encoder-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension", - ) - parser.add_argument( - "--encoder-ffn-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension for FFN", - ) - parser.add_argument( - "--encoder-layers", type=int, metavar="N", help="num encoder layers" - ) - parser.add_argument( - "--encoder-attention-heads", - type=int, - metavar="N", - help="num encoder attention heads", - ) - parser.add_argument( - "--encoder-normalize-before", - action="store_true", - help="apply layernorm before each encoder block", - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-ffn-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension for FFN", - ) - parser.add_argument( - "--decoder-layers", type=int, metavar="N", help="num decoder layers" - ) - parser.add_argument( - "--decoder-attention-heads", - type=int, - metavar="N", - help="num decoder attention heads", - ) - parser.add_argument( - "--decoder-normalize-before", - action="store_true", - help="apply layernorm before each decoder block", - ) - parser.add_argument( - "--decoder-output-dim", - type=int, - metavar="N", - help="decoder output dimension (extra linear layer if different from decoder embed dim)", - ) - parser.add_argument( - "--share-decoder-input-output-embed", - action="store_true", - help="share decoder input and output embeddings", - ) - parser.add_argument( - "--layernorm-embedding", - action="store_true", - help="add layernorm to embedding", - ) - parser.add_argument( - "--no-scale-embedding", - action="store_true", - help="if True, dont scale embeddings", - ) - parser.add_argument( - "--load-pretrained-encoder-from", - type=str, - metavar="STR", - help="model to take encoder weights from (for initialization)", - ) - parser.add_argument( - "--load-pretrained-decoder-from", - type=str, - metavar="STR", - help="model to take decoder weights from (for initialization)", - ) - parser.add_argument( - "--conv-out-channels", - type=int, - metavar="INT", - help="the number of output channels of conv layer", - ) - - @classmethod - def build_encoder(cls, args): - encoder = ConvTransformerEncoder(args) - if getattr(args, "load_pretrained_encoder_from", None): - encoder = checkpoint_utils.load_pretrained_component_from_model( - component=encoder, checkpoint=args.load_pretrained_encoder_from - ) - return encoder - - @classmethod - def build_decoder(cls, args, task, embed_tokens): - decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens) - if getattr(args, "load_pretrained_decoder_from", None): - decoder = checkpoint_utils.load_pretrained_component_from_model( - component=decoder, checkpoint=args.load_pretrained_decoder_from - ) - return decoder - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present in older models - base_architecture(args) - - def build_embedding(dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - return Embedding(num_embeddings, embed_dim, padding_idx) - - decoder_embed_tokens = build_embedding( - task.target_dictionary, args.decoder_embed_dim - ) - encoder = cls.build_encoder(args) - decoder = cls.build_decoder(args, task, decoder_embed_tokens) - return cls(encoder, decoder) - - @staticmethod - @torch.jit.unused - def set_batch_first(lprobs): - lprobs.batch_first = True - - def get_normalized_probs( - self, - net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], - log_probs: bool, - sample: Optional[Dict[str, Tensor]] = None, - ): - # net_output['encoder_out'] is a (B, T, D) tensor - lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) - if self.training: - self.set_batch_first(lprobs) - return lprobs - - def output_layout(self): - return "BTD" - - """ - The forward method inherited from the base class has a **kwargs argument in - its input, which is not supported in torchscript. This method overrites the forward - method definition without **kwargs. - """ - - def forward(self, src_tokens, src_lengths, prev_output_tokens): - encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) - decoder_out = self.decoder( - prev_output_tokens=prev_output_tokens, encoder_out=encoder_out - ) - return decoder_out - - -class ConvTransformerEncoder(FairseqEncoder): - """Conv + Transformer encoder""" - - def __init__(self, args): - """Construct an Encoder object.""" - super().__init__(None) - - self.dropout = args.dropout - self.embed_scale = ( - 1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim) - ) - self.padding_idx = 1 - self.in_channels = 1 - self.input_dim = args.input_feat_per_channel - self.conv = torch.nn.Sequential( - torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2), - torch.nn.ReLU(), - torch.nn.Conv2d( - args.conv_out_channels, - args.conv_out_channels, - 3, - stride=2, - padding=3 // 2, - ), - torch.nn.ReLU(), - ) - transformer_input_dim = self.infer_conv_output_dim( - self.in_channels, self.input_dim, args.conv_out_channels - ) - self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim) - self.embed_positions = PositionalEmbedding( - args.max_source_positions, - args.encoder_embed_dim, - self.padding_idx, - learned=False, - ) - - self.transformer_layers = nn.ModuleList([]) - self.transformer_layers.extend( - [TransformerEncoderLayer(args) for i in range(args.encoder_layers)] - ) - if args.encoder_normalize_before: - self.layer_norm = LayerNorm(args.encoder_embed_dim) - else: - self.layer_norm = None - - def pooling_ratio(self): - return 4 - - def infer_conv_output_dim(self, in_channels, input_dim, out_channels): - sample_seq_len = 200 - sample_bsz = 10 - x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) - x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=3 // 2)(x) - x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x) - x = x.transpose(1, 2) - mb, seq = x.size()[:2] - return x.contiguous().view(mb, seq, -1).size(-1) - - def forward(self, src_tokens, src_lengths): - """Encode input sequence. - :param torch.Tensor xs: input tensor - :param torch.Tensor masks: input mask - :return: position embedded tensor and mask - :rtype Tuple[torch.Tensor, torch.Tensor]: - """ - bsz, max_seq_len, _ = src_tokens.size() - x = ( - src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) - .transpose(1, 2) - .contiguous() - ) - x = self.conv(x) - bsz, _, output_seq_len, _ = x.size() - x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) - x = self.out(x) - x = self.embed_scale * x - - subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) - input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long() - input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to( - input_len_0.device - ) - input_lengths = torch.min(input_len_0, input_len_1) - - encoder_padding_mask = lengths_to_padding_mask(input_lengths) - - positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) - x += positions - x = F.dropout(x, p=self.dropout, training=self.training) - - for layer in self.transformer_layers: - x = layer(x, encoder_padding_mask) - - if not encoder_padding_mask.any(): - maybe_encoder_padding_mask = None - else: - maybe_encoder_padding_mask = encoder_padding_mask - - return { - "encoder_out": [x], - "encoder_padding_mask": [maybe_encoder_padding_mask] - if maybe_encoder_padding_mask is not None - else [], - "encoder_embedding": [], - "encoder_states": [], - "src_tokens": [], - "src_lengths": [], - } - - @torch.jit.export - def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): - """ - Reorder encoder output according to *new_order*. - - Args: - encoder_out: output from the ``forward()`` method - new_order (LongTensor): desired order - - Returns: - *encoder_out* rearranged according to *new_order* - """ - new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] - if len(encoder_out["encoder_padding_mask"]) == 0: - new_encoder_padding_mask = [] - else: - new_encoder_padding_mask = [ - (encoder_out["encoder_padding_mask"][0]).index_select(0, new_order) - ] - if len(encoder_out["encoder_embedding"]) == 0: - new_encoder_embedding = [] - else: - new_encoder_embedding = [ - (encoder_out["encoder_embedding"][0]).index_select(0, new_order) - ] - encoder_states = encoder_out["encoder_states"] - if len(encoder_states) > 0: - for idx, state in enumerate(encoder_states): - encoder_states[idx] = state.index_select(1, new_order) - - return { - "encoder_out": new_encoder_out, - "encoder_padding_mask": new_encoder_padding_mask, - "encoder_embedding": new_encoder_embedding, - "encoder_states": encoder_states, - "src_tokens": [], - "src_lengths": [], - } - - -class TransformerDecoderNoExtra(TransformerDecoder): - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Dict[str, List[Tensor]]], - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - full_context_alignment: bool = False, - alignment_layer: Optional[int] = None, - alignment_heads: Optional[int] = None, - ): - # call scriptable method from parent class - x, _ = self.extract_features_scriptable( - prev_output_tokens, - encoder_out, - incremental_state, - full_context_alignment, - alignment_layer, - alignment_heads, - ) - return x, None - - -@register_model_architecture(model_name="convtransformer", arch_name="convtransformer") -def base_architecture(args): - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - args.no_scale_embedding = getattr(args, "no_scale_embedding", False) - args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) - args.max_source_positions = getattr(args, "max_source_positions", 3000) - args.max_target_positions = getattr(args, "max_target_positions", 1024) - args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) - args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim) - - -@register_model_architecture("convtransformer", "convtransformer_espnet") -def convtransformer_espnet(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_layers = getattr(args, "encoder_layers", 12) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) diff --git a/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/SuperGluePretrainedNetwork/models/__init__.py b/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/SuperGluePretrainedNetwork/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HighCWu/GPEN/retinaface/utils/__init__.py b/spaces/HighCWu/GPEN/retinaface/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/interface.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/interface.py deleted file mode 100644 index 8f93b1a5575b3a4589d6412ac6139377c81c67ef..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/interface.py +++ /dev/null @@ -1,844 +0,0 @@ -""" -This is the core file in the `gradio` package, and defines the Interface class, -including various methods for constructing an interface and then launching it. -""" - -from __future__ import annotations - -import inspect -import json -import os -import pkgutil -import re -import warnings -import weakref -from typing import TYPE_CHECKING, Any, Callable, List, Tuple - -from markdown_it import MarkdownIt -from mdit_py_plugins.dollarmath.index import dollarmath_plugin -from mdit_py_plugins.footnote.index import footnote_plugin - -from gradio import Examples, interpretation, utils -from gradio.blocks import Blocks -from gradio.components import ( - Button, - Interpretation, - IOComponent, - Markdown, - State, - get_component_instance, -) -from gradio.data_classes import InterfaceTypes -from gradio.documentation import document, set_documentation_group -from gradio.events import Changeable, Streamable -from gradio.flagging import CSVLogger, FlaggingCallback, FlagMethod -from gradio.layouts import Column, Row, Tab, Tabs -from gradio.pipelines import load_from_pipeline - -set_documentation_group("interface") - -if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from transformers.pipelines.base import Pipeline - - -@document("launch", "load", "from_pipeline", "integrate", "queue") -class Interface(Blocks): - """ - Interface is Gradio's main high-level class, and allows you to create a web-based GUI / demo - around a machine learning model (or any Python function) in a few lines of code. - You must specify three parameters: (1) the function to create a GUI for (2) the desired input components and - (3) the desired output components. Additional parameters can be used to control the appearance - and behavior of the demo. - - Example: - import gradio as gr - - def image_classifier(inp): - return {'cat': 0.3, 'dog': 0.7} - - demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label") - demo.launch() - Demos: hello_world, hello_world_3, gpt_j - Guides: quickstart, key_features, sharing_your_app, interface_state, reactive_interfaces, advanced_interface_features, setting_up_a_gradio_demo_for_maximum_performance - """ - - # stores references to all currently existing Interface instances - instances: weakref.WeakSet = weakref.WeakSet() - - @classmethod - def get_instances(cls) -> List[Interface]: - """ - :return: list of all current instances. - """ - return list(Interface.instances) - - @classmethod - def load( - cls, - name: str, - src: str | None = None, - api_key: str | None = None, - alias: str | None = None, - **kwargs, - ) -> Interface: - """ - Class method that constructs an Interface from a Hugging Face repo. Can accept - model repos (if src is "models") or Space repos (if src is "spaces"). The input - and output components are automatically loaded from the repo. - Parameters: - name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base") - src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`) - api_key: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens - alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x) - Returns: - a Gradio Interface object for the given model - Example: - import gradio as gr - description = "Story generation with GPT" - examples = [["An adventurer is approached by a mysterious stranger in the tavern for a new quest."]] - demo = gr.Interface.load("models/EleutherAI/gpt-neo-1.3B", description=description, examples=examples) - demo.launch() - """ - return super().load(name=name, src=src, api_key=api_key, alias=alias, **kwargs) - - @classmethod - def from_pipeline(cls, pipeline: Pipeline, **kwargs) -> Interface: - """ - Class method that constructs an Interface from a Hugging Face transformers.Pipeline object. - The input and output components are automatically determined from the pipeline. - Parameters: - pipeline: the pipeline object to use. - Returns: - a Gradio Interface object from the given Pipeline - Example: - import gradio as gr - from transformers import pipeline - pipe = pipeline("image-classification") - gr.Interface.from_pipeline(pipe).launch() - """ - interface_info = load_from_pipeline(pipeline) - kwargs = dict(interface_info, **kwargs) - interface = cls(**kwargs) - return interface - - def __init__( - self, - fn: Callable, - inputs: str | IOComponent | List[str | IOComponent] | None, - outputs: str | IOComponent | List[str | IOComponent] | None, - examples: List[Any] | List[List[Any]] | str | None = None, - cache_examples: bool | None = None, - examples_per_page: int = 10, - live: bool = False, - interpretation: Callable | str | None = None, - num_shap: float = 2.0, - title: str | None = None, - description: str | None = None, - article: str | None = None, - thumbnail: str | None = None, - theme: str = "default", - css: str | None = None, - allow_flagging: str | None = None, - flagging_options: List[str] | None = None, - flagging_dir: str = "flagged", - flagging_callback: FlaggingCallback = CSVLogger(), - analytics_enabled: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - _api_mode: bool = False, - **kwargs, - ): - """ - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: a single Gradio component, or list of Gradio components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn. If set to None, then only the output components will be displayed. - outputs: a single Gradio component, or list of Gradio components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn. If set to None, then only the input components will be displayed. - examples: sample inputs for the function; if provided, appear below the UI components and can be clicked to populate the interface. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided, but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs. - cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False. - examples_per_page: If examples are provided, how many to display per page. - live: whether the interface should automatically rerun if any of the inputs change. - interpretation: function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function. For more information on the different interpretation methods, see the Advanced Interface Features guide. - num_shap: a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is "shap". - title: a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window. - description: a description for the interface; if provided, appears above the input and output components and beneath the title in regular font. Accepts Markdown and HTML content. - article: an expanded article explaining the interface; if provided, appears below the input and output components in regular font. Accepts Markdown and HTML content. - thumbnail: path or url to image to use as display image when the web demo is shared on social media. - theme: Theme to use - right now, only "default" is supported. Can be set with the GRADIO_THEME environment variable. - css: custom css or path to custom css file to use with interface. - allow_flagging: one of "never", "auto", or "manual". If "never" or "auto", users will not see a button to flag an input and output. If "manual", users will see a button to flag. If "auto", every input the user submits will be automatically flagged (outputs are not flagged). If "manual", both the input and outputs are flagged when the user clicks flag button. This parameter can be set with environmental variable GRADIO_ALLOW_FLAGGING; otherwise defaults to "manual". - flagging_options: if provided, allows user to select from the list of options when flagging. Only applies if allow_flagging is "manual". - flagging_dir: what to name the directory where flagged data is stored. - flagging_callback: An instance of a subclass of FlaggingCallback which will be called when a sample is flagged. By default logs to a local CSV file. - analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True. - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - """ - super().__init__( - analytics_enabled=analytics_enabled, - mode="interface", - css=css, - title=title or "Gradio", - theme=theme, - **kwargs, - ) - - if isinstance(fn, list): - raise DeprecationWarning( - "The `fn` parameter only accepts a single function, support for a list " - "of functions has been deprecated. Please use gradio.mix.Parallel " - "instead." - ) - - self.interface_type = InterfaceTypes.STANDARD - if (inputs is None or inputs == []) and (outputs is None or outputs == []): - raise ValueError("Must provide at least one of `inputs` or `outputs`") - elif outputs is None or outputs == []: - outputs = [] - self.interface_type = InterfaceTypes.INPUT_ONLY - elif inputs is None or inputs == []: - inputs = [] - self.interface_type = InterfaceTypes.OUTPUT_ONLY - - assert isinstance(inputs, (str, list, IOComponent)) - assert isinstance(outputs, (str, list, IOComponent)) - - if not isinstance(inputs, list): - inputs = [inputs] - if not isinstance(outputs, list): - outputs = [outputs] - - if self.is_space and cache_examples is None: - self.cache_examples = True - else: - self.cache_examples = cache_examples or False - - state_input_indexes = [ - idx for idx, i in enumerate(inputs) if i == "state" or isinstance(i, State) - ] - state_output_indexes = [ - idx for idx, o in enumerate(outputs) if o == "state" or isinstance(o, State) - ] - - if len(state_input_indexes) == 0 and len(state_output_indexes) == 0: - pass - elif len(state_input_indexes) != 1 or len(state_output_indexes) != 1: - raise ValueError( - "If using 'state', there must be exactly one state input and one state output." - ) - else: - state_input_index = state_input_indexes[0] - state_output_index = state_output_indexes[0] - if inputs[state_input_index] == "state": - default = utils.get_default_args(fn)[state_input_index] - state_variable = State(value=default) # type: ignore - else: - state_variable = inputs[state_input_index] - - inputs[state_input_index] = state_variable - outputs[state_output_index] = state_variable - - if cache_examples: - warnings.warn( - "Cache examples cannot be used with state inputs and outputs." - "Setting cache_examples to False." - ) - self.cache_examples = False - - self.input_components = [ - get_component_instance(i, render=False) for i in inputs - ] - self.output_components = [ - get_component_instance(o, render=False) for o in outputs - ] - - for component in self.input_components + self.output_components: - if not (isinstance(component, IOComponent)): - raise ValueError( - f"{component} is not a valid input/output component for Interface." - ) - - if len(self.input_components) == len(self.output_components): - same_components = [ - i is o for i, o in zip(self.input_components, self.output_components) - ] - if all(same_components): - self.interface_type = InterfaceTypes.UNIFIED - - if self.interface_type in [ - InterfaceTypes.STANDARD, - InterfaceTypes.OUTPUT_ONLY, - ]: - for o in self.output_components: - assert isinstance(o, IOComponent) - o.interactive = False # Force output components to be non-interactive - - if ( - interpretation is None - or isinstance(interpretation, list) - or callable(interpretation) - ): - self.interpretation = interpretation - elif isinstance(interpretation, str): - self.interpretation = [ - interpretation.lower() for _ in self.input_components - ] - else: - raise ValueError("Invalid value for parameter: interpretation") - - self.api_mode = _api_mode - self.fn = fn - self.fn_durations = [0, 0] - self.__name__ = getattr(fn, "__name__", "fn") - self.live = live - self.title = title - - CLEANER = re.compile("<.*?>") - - def clean_html(raw_html): - cleantext = re.sub(CLEANER, "", raw_html) - return cleantext - - md = ( - MarkdownIt( - "js-default", - { - "linkify": True, - "typographer": True, - "html": True, - }, - ) - .use(dollarmath_plugin) - .use(footnote_plugin) - .enable("table") - ) - - simple_description = None - if description is not None: - description = md.render(description) - simple_description = clean_html(description) - self.simple_description = simple_description - self.description = description - if article is not None: - article = utils.readme_to_html(article) - article = md.render(article) - self.article = article - - self.thumbnail = thumbnail - self.theme = theme or os.getenv("GRADIO_THEME", "default") - if not (self.theme == "default"): - warnings.warn("Currently, only the 'default' theme is supported.") - - self.examples = examples - self.num_shap = num_shap - self.examples_per_page = examples_per_page - - self.simple_server = None - - # For analytics_enabled and allow_flagging: (1) first check for - # parameter, (2) check for env variable, (3) default to True/"manual" - self.analytics_enabled = ( - analytics_enabled - if analytics_enabled is not None - else os.getenv("GRADIO_ANALYTICS_ENABLED", "True") == "True" - ) - if allow_flagging is None: - allow_flagging = os.getenv("GRADIO_ALLOW_FLAGGING", "manual") - if allow_flagging is True: - warnings.warn( - "The `allow_flagging` parameter in `Interface` now" - "takes a string value ('auto', 'manual', or 'never')" - ", not a boolean. Setting parameter to: 'manual'." - ) - self.allow_flagging = "manual" - elif allow_flagging == "manual": - self.allow_flagging = "manual" - elif allow_flagging is False: - warnings.warn( - "The `allow_flagging` parameter in `Interface` now" - "takes a string value ('auto', 'manual', or 'never')" - ", not a boolean. Setting parameter to: 'never'." - ) - self.allow_flagging = "never" - elif allow_flagging == "never": - self.allow_flagging = "never" - elif allow_flagging == "auto": - self.allow_flagging = "auto" - else: - raise ValueError( - "Invalid value for `allow_flagging` parameter." - "Must be: 'auto', 'manual', or 'never'." - ) - - self.flagging_options = flagging_options - self.flagging_callback = flagging_callback - self.flagging_dir = flagging_dir - self.batch = batch - self.max_batch_size = max_batch_size - - self.save_to = None # Used for selenium tests - self.share = None - self.share_url = None - self.local_url = None - - self.favicon_path = None - - if self.analytics_enabled: - data = { - "mode": self.mode, - "fn": fn, - "inputs": inputs, - "outputs": outputs, - "live": live, - "ip_address": self.ip_address, - "interpretation": interpretation, - "allow_flagging": allow_flagging, - "custom_css": self.css is not None, - "theme": self.theme, - "version": (pkgutil.get_data(__name__, "version.txt") or b"") - .decode("ascii") - .strip(), - } - utils.initiated_analytics(data) - - utils.version_check() - Interface.instances.add(self) - - param_names = inspect.getfullargspec(self.fn)[0] - for component, param_name in zip(self.input_components, param_names): - assert isinstance(component, IOComponent) - if component.label is None: - component.label = param_name - for i, component in enumerate(self.output_components): - assert isinstance(component, IOComponent) - if component.label is None: - if len(self.output_components) == 1: - component.label = "output" - else: - component.label = "output " + str(i) - - if self.allow_flagging != "never": - if ( - self.interface_type == InterfaceTypes.UNIFIED - or self.allow_flagging == "auto" - ): - self.flagging_callback.setup(self.input_components, self.flagging_dir) # type: ignore - elif self.interface_type == InterfaceTypes.INPUT_ONLY: - pass - else: - self.flagging_callback.setup( - self.input_components + self.output_components, self.flagging_dir # type: ignore - ) - - # Render the Gradio UI - with self: - self.render_title_description() - - submit_btn, clear_btn, stop_btn, flag_btns = None, None, None, None - interpretation_btn, interpretation_set = None, None - input_component_column, interpret_component_column = None, None - - with Row().style(equal_height=False): - if self.interface_type in [ - InterfaceTypes.STANDARD, - InterfaceTypes.INPUT_ONLY, - InterfaceTypes.UNIFIED, - ]: - ( - submit_btn, - clear_btn, - stop_btn, - flag_btns, - input_component_column, - interpret_component_column, - interpretation_set, - ) = self.render_input_column() - if self.interface_type in [ - InterfaceTypes.STANDARD, - InterfaceTypes.OUTPUT_ONLY, - ]: - ( - submit_btn_out, - clear_btn_2_out, - stop_btn_2_out, - flag_btns_out, - interpretation_btn, - ) = self.render_output_column(submit_btn) - submit_btn = submit_btn or submit_btn_out - clear_btn = clear_btn or clear_btn_2_out - stop_btn = stop_btn or stop_btn_2_out - flag_btns = flag_btns or flag_btns_out - - assert clear_btn is not None, "Clear button not rendered" - - self.attach_submit_events(submit_btn, stop_btn) - self.attach_clear_events( - clear_btn, input_component_column, interpret_component_column - ) - self.attach_interpretation_events( - interpretation_btn, - interpretation_set, - input_component_column, - interpret_component_column, - ) - - self.render_flagging_buttons(flag_btns) - self.render_examples() - self.render_article() - - self.config = self.get_config_file() - - def render_title_description(self) -> None: - if self.title: - Markdown( - "<h1 style='text-align: center; margin-bottom: 1rem'>" - + self.title - + "</h1>" - ) - if self.description: - Markdown(self.description) - - def render_flag_btns(self) -> List[Tuple[Button, str | None]]: - if self.flagging_options is None: - return [(Button("Flag"), None)] - else: - return [ - ( - Button("Flag as " + flag_option), - flag_option, - ) - for flag_option in self.flagging_options - ] - - def render_input_column( - self, - ) -> Tuple[ - Button | None, - Button | None, - Button | None, - List | None, - Column, - Column | None, - List[Interpretation] | None, - ]: - submit_btn, clear_btn, stop_btn, flag_btns = None, None, None, None - interpret_component_column, interpretation_set = None, None - - with Column(variant="panel"): - input_component_column = Column() - with input_component_column: - for component in self.input_components: - component.render() - if self.interpretation: - interpret_component_column = Column(visible=False) - interpretation_set = [] - with interpret_component_column: - for component in self.input_components: - interpretation_set.append(Interpretation(component)) - with Row(): - if self.interface_type in [ - InterfaceTypes.STANDARD, - InterfaceTypes.INPUT_ONLY, - ]: - clear_btn = Button("Clear") - if not self.live: - submit_btn = Button("Submit", variant="primary") - # Stopping jobs only works if the queue is enabled - # We don't know if the queue is enabled when the interface - # is created. We use whether a generator function is provided - # as a proxy of whether the queue will be enabled. - # Using a generator function without the queue will raise an error. - if inspect.isgeneratorfunction(self.fn): - stop_btn = Button("Stop", variant="stop") - elif self.interface_type == InterfaceTypes.UNIFIED: - clear_btn = Button("Clear") - submit_btn = Button("Submit", variant="primary") - if inspect.isgeneratorfunction(self.fn) and not self.live: - stop_btn = Button("Stop", variant="stop") - if self.allow_flagging == "manual": - flag_btns = self.render_flag_btns() - elif self.allow_flagging == "auto": - flag_btns = [(submit_btn, None)] - return ( - submit_btn, - clear_btn, - stop_btn, - flag_btns, - input_component_column, - interpret_component_column, - interpretation_set, - ) - - def render_output_column( - self, - submit_btn_in: Button | None, - ) -> Tuple[Button | None, Button | None, Button | None, List | None, Button | None]: - submit_btn = submit_btn_in - interpretation_btn, clear_btn, flag_btns, stop_btn = None, None, None, None - - with Column(variant="panel"): - for component in self.output_components: - if not (isinstance(component, State)): - component.render() - with Row(): - if self.interface_type == InterfaceTypes.OUTPUT_ONLY: - clear_btn = Button("Clear") - submit_btn = Button("Generate", variant="primary") - if inspect.isgeneratorfunction(self.fn) and not self.live: - # Stopping jobs only works if the queue is enabled - # We don't know if the queue is enabled when the interface - # is created. We use whether a generator function is provided - # as a proxy of whether the queue will be enabled. - # Using a generator function without the queue will raise an error. - stop_btn = Button("Stop", variant="stop") - if self.allow_flagging == "manual": - flag_btns = self.render_flag_btns() - elif self.allow_flagging == "auto": - assert submit_btn is not None, "Submit button not rendered" - flag_btns = [(submit_btn, None)] - if self.interpretation: - interpretation_btn = Button("Interpret") - - return submit_btn, clear_btn, stop_btn, flag_btns, interpretation_btn - - def render_article(self): - if self.article: - Markdown(self.article) - - def attach_submit_events(self, submit_btn: Button | None, stop_btn: Button | None): - if self.live: - if self.interface_type == InterfaceTypes.OUTPUT_ONLY: - assert submit_btn is not None, "Submit button not rendered" - super().load(self.fn, None, self.output_components) - # For output-only interfaces, the user probably still want a "generate" - # button even if the Interface is live - submit_btn.click( - self.fn, - None, - self.output_components, - api_name="predict", - preprocess=not (self.api_mode), - postprocess=not (self.api_mode), - batch=self.batch, - max_batch_size=self.max_batch_size, - ) - else: - for component in self.input_components: - if isinstance(component, Streamable) and component.streaming: - component.stream( - self.fn, - self.input_components, - self.output_components, - api_name="predict", - preprocess=not (self.api_mode), - postprocess=not (self.api_mode), - ) - continue - if isinstance(component, Changeable): - component.change( - self.fn, - self.input_components, - self.output_components, - api_name="predict", - preprocess=not (self.api_mode), - postprocess=not (self.api_mode), - ) - else: - assert submit_btn is not None, "Submit button not rendered" - pred = submit_btn.click( - self.fn, - self.input_components, - self.output_components, - api_name="predict", - scroll_to_output=True, - preprocess=not (self.api_mode), - postprocess=not (self.api_mode), - batch=self.batch, - max_batch_size=self.max_batch_size, - ) - if stop_btn: - stop_btn.click( - None, - inputs=None, - outputs=None, - cancels=[pred], - ) - - def attach_clear_events( - self, - clear_btn: Button, - input_component_column: Column | None, - interpret_component_column: Column | None, - ): - clear_btn.click( - None, - [], - ( - self.input_components - + self.output_components - + ([input_component_column] if input_component_column else []) - + ([interpret_component_column] if self.interpretation else []) - ), # type: ignore - _js=f"""() => {json.dumps( - [getattr(component, "cleared_value", None) - for component in self.input_components + self.output_components] + ( - [Column.update(visible=True)] - if self.interface_type - in [ - InterfaceTypes.STANDARD, - InterfaceTypes.INPUT_ONLY, - InterfaceTypes.UNIFIED, - ] - else [] - ) - + ([Column.update(visible=False)] if self.interpretation else []) - )} - """, - ) - - def attach_interpretation_events( - self, - interpretation_btn: Button | None, - interpretation_set: List[Interpretation] | None, - input_component_column: Column | None, - interpret_component_column: Column | None, - ): - if interpretation_btn: - interpretation_btn.click( - self.interpret_func, - inputs=self.input_components + self.output_components, - outputs=interpretation_set - or [] + [input_component_column, interpret_component_column], # type: ignore - preprocess=False, - ) - - def render_flagging_buttons(self, flag_btns: List | None): - if flag_btns: - if self.interface_type in [ - InterfaceTypes.STANDARD, - InterfaceTypes.OUTPUT_ONLY, - InterfaceTypes.UNIFIED, - ]: - if ( - self.interface_type == InterfaceTypes.UNIFIED - or self.allow_flagging == "auto" - ): - flag_components = self.input_components - else: - flag_components = self.input_components + self.output_components - for flag_btn, flag_option in flag_btns: - flag_method = FlagMethod(self.flagging_callback, flag_option) - flag_btn.click( - flag_method, - inputs=flag_components, - outputs=[], - preprocess=False, - queue=False, - ) - - def render_examples(self): - if self.examples: - non_state_inputs = [ - c for c in self.input_components if not isinstance(c, State) - ] - non_state_outputs = [ - c for c in self.output_components if not isinstance(c, State) - ] - self.examples_handler = Examples( - examples=self.examples, - inputs=non_state_inputs, # type: ignore - outputs=non_state_outputs, # type: ignore - fn=self.fn, - cache_examples=self.cache_examples, - examples_per_page=self.examples_per_page, - _api_mode=self.api_mode, - batch=self.batch, - ) - - def __str__(self): - return self.__repr__() - - def __repr__(self): - repr = f"Gradio Interface for: {self.__name__}" - repr += "\n" + "-" * len(repr) - repr += "\ninputs:" - for component in self.input_components: - repr += "\n|-{}".format(str(component)) - repr += "\noutputs:" - for component in self.output_components: - repr += "\n|-{}".format(str(component)) - return repr - - async def interpret_func(self, *args): - return await self.interpret(list(args)) + [ - Column.update(visible=False), - Column.update(visible=True), - ] - - async def interpret(self, raw_input: List[Any]) -> List[Any]: - return [ - {"original": raw_value, "interpretation": interpretation} - for interpretation, raw_value in zip( - (await interpretation.run_interpret(self, raw_input))[0], raw_input - ) - ] - - def test_launch(self) -> None: - """ - Deprecated. - """ - warnings.warn("The Interface.test_launch() function is deprecated.") - - -@document() -class TabbedInterface(Blocks): - """ - A TabbedInterface is created by providing a list of Interfaces, each of which gets - rendered in a separate tab. - Demos: stt_or_tts - """ - - def __init__( - self, - interface_list: List[Interface], - tab_names: List[str] | None = None, - title: str | None = None, - theme: str = "default", - analytics_enabled: bool | None = None, - css: str | None = None, - ): - """ - Parameters: - interface_list: a list of interfaces to be rendered in tabs. - tab_names: a list of tab names. If None, the tab names will be "Tab 1", "Tab 2", etc. - title: a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window. - theme: which theme to use - right now, only "default" is supported. - analytics_enabled: whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True. - css: custom css or path to custom css file to apply to entire Blocks - Returns: - a Gradio Tabbed Interface for the given interfaces - """ - super().__init__( - title=title or "Gradio", - theme=theme, - analytics_enabled=analytics_enabled, - mode="tabbed_interface", - css=css, - ) - if tab_names is None: - tab_names = ["Tab {}".format(i) for i in range(len(interface_list))] - with self: - if title: - Markdown( - "<h1 style='text-align: center; margin-bottom: 1rem'>" - + title - + "</h1>" - ) - with Tabs(): - for (interface, tab_name) in zip(interface_list, tab_names): - with Tab(label=tab_name): - interface.render() - - -def close_all(verbose: bool = True) -> None: - for io in Interface.get_instances(): - io.close(verbose) diff --git a/spaces/ICML2022/OFA/fairseq/docs/_static/theme_overrides.css b/spaces/ICML2022/OFA/fairseq/docs/_static/theme_overrides.css deleted file mode 100644 index 2a0764193625e1a6fd66ff8af2ccdd0ad6369188..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/docs/_static/theme_overrides.css +++ /dev/null @@ -1,9 +0,0 @@ -.wy-table-responsive table td kbd { - white-space: nowrap; -} -.wy-table-responsive table td { - white-space: normal !important; -} -.wy-table-responsive { - overflow: visible !important; -} diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/wsc_criterion.py b/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/wsc_criterion.py deleted file mode 100644 index ed0251fdecc3573228ad271f1090aaf914b48cd1..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/wsc_criterion.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.criterions import LegacyFairseqCriterion, register_criterion -from fairseq.data import encoders - - -@register_criterion("wsc") -class WSCCriterion(LegacyFairseqCriterion): - def __init__(self, args, task): - super().__init__(args, task) - if self.args.save_predictions is not None: - self.prediction_h = open(self.args.save_predictions, "w") - else: - self.prediction_h = None - self.bpe = encoders.build_bpe(args.bpe) - self.tokenizer = encoders.build_tokenizer(args.tokenizer) - - def __del__(self): - if self.prediction_h is not None: - self.prediction_h.close() - - @staticmethod - def add_args(parser): - """Add criterion-specific arguments to the parser.""" - parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0) - parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0) - parser.add_argument( - "--wsc-cross-entropy", - action="store_true", - help="use cross entropy formulation instead of margin loss", - ) - parser.add_argument( - "--save-predictions", metavar="FILE", help="file to save predictions to" - ) - - def get_masked_input(self, tokens, mask): - masked_tokens = tokens.clone() - masked_tokens[mask] = self.task.mask - return masked_tokens - - def get_lprobs(self, model, tokens, mask): - logits, _ = model(src_tokens=self.get_masked_input(tokens, mask)) - lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) - scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) - mask = mask.type_as(scores) - scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) - return scores - - def get_loss(self, query_lprobs, cand_lprobs): - if self.args.wsc_cross_entropy: - return F.cross_entropy( - torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0), - query_lprobs.new([0]).long(), - ) - else: - return ( - -query_lprobs - + self.args.wsc_margin_alpha - * (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0) - ).sum() - - def forward(self, model, sample, reduce=True): - # compute loss and accuracy - loss, nloss = 0.0, 0 - ncorrect, nqueries = 0, 0 - - for i, label in enumerate(sample["labels"]): - query_lprobs = self.get_lprobs( - model, - sample["query_tokens"][i].unsqueeze(0), - sample["query_masks"][i].unsqueeze(0), - ) - cand_lprobs = self.get_lprobs( - model, - sample["candidate_tokens"][i], - sample["candidate_masks"][i], - ) - - pred = (query_lprobs >= cand_lprobs).all().item() - - if label is not None: - label = 1 if label else 0 - ncorrect += 1 if pred == label else 0 - nqueries += 1 - - if label: - # only compute a loss for positive instances - nloss += 1 - loss += self.get_loss(query_lprobs, cand_lprobs) - - id = sample["id"][i].item() - if self.prediction_h is not None: - print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h) - - if nloss == 0: - loss = torch.tensor(0.0, requires_grad=True) - - sample_size = nqueries if nqueries > 0 else 1 - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["nsentences"], - "sample_size": sample_size, - "ncorrect": ncorrect, - "nqueries": nqueries, - } - return loss, sample_size, logging_output - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - - agg_output = { - "loss": loss_sum / sample_size / math.log(2), - "ntokens": ntokens, - "nsentences": nsentences, - "sample_size": sample_size, - } - - ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) - nqueries = sum(log.get("nqueries", 0) for log in logging_outputs) - if nqueries > 0: - agg_output["accuracy"] = ncorrect / float(nqueries) - - return agg_output - - -@register_criterion("winogrande") -class WinograndeCriterion(WSCCriterion): - def forward(self, model, sample, reduce=True): - # compute loss and accuracy - query_lprobs = self.get_lprobs( - model, - sample["query_tokens"], - sample["query_masks"], - ) - cand_lprobs = self.get_lprobs( - model, - sample["candidate_tokens"], - sample["candidate_masks"], - ) - pred = query_lprobs >= cand_lprobs - loss = self.get_loss(query_lprobs, cand_lprobs) - - sample_size = sample["query_tokens"].size(0) - ncorrect = pred.sum().item() - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["nsentences"], - "sample_size": sample_size, - "ncorrect": ncorrect, - "nqueries": sample_size, - } - return loss, sample_size, logging_output diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py deleted file mode 100644 index 5f4faa99218b0b30c980cad167c52b2297cd92c3..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py +++ /dev/null @@ -1,4 +0,0 @@ -import sys - -for idx, line in enumerate(sys.stdin): - print(f"utt{idx:010d} {line}", end='') \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/multihead_attention.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/multihead_attention.py deleted file mode 100644 index a2516356117847b0d46d965ee942354a2ed23189..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/multihead_attention.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Dict, Optional, Tuple - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.quant_noise import quant_noise -from torch import Tensor, nn -from torch.nn import Parameter - - -@with_incremental_state -class MultiheadAttention(nn.Module): - """Multi-headed attention. - - See "Attention Is All You Need" for more details. - """ - - def __init__( - self, - embed_dim, - num_heads, - kdim=None, - vdim=None, - dropout=0.0, - bias=True, - add_bias_kv=False, - add_zero_attn=False, - self_attention=False, - encoder_decoder_attention=False, - q_noise=0.0, - qn_block_size=8, - ): - super().__init__() - self.embed_dim = embed_dim - self.kdim = kdim if kdim is not None else embed_dim - self.vdim = vdim if vdim is not None else embed_dim - self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim - - self.num_heads = num_heads - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - - self.head_dim = embed_dim // num_heads - assert ( - self.head_dim * num_heads == self.embed_dim - ), "embed_dim must be divisible by num_heads" - self.scaling = self.head_dim ** -0.5 - - self.self_attention = self_attention - self.encoder_decoder_attention = encoder_decoder_attention - - assert not self.self_attention or self.qkv_same_dim, ( - "Self-attention requires query, key and " "value to be of the same size" - ) - - self.k_proj = quant_noise( - nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size - ) - self.v_proj = quant_noise( - nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size - ) - self.q_proj = quant_noise( - nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size - ) - - self.out_proj = quant_noise( - nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size - ) - - if add_bias_kv: - self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) - self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) - else: - self.bias_k = self.bias_v = None - - self.add_zero_attn = add_zero_attn - - self.reset_parameters() - - self.onnx_trace = False - - def prepare_for_onnx_export_(self): - self.onnx_trace = True - - def reset_parameters(self): - if self.qkv_same_dim: - # Empirically observed the convergence to be much better with - # the scaled initialization - nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) - nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) - nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) - else: - nn.init.xavier_uniform_(self.k_proj.weight) - nn.init.xavier_uniform_(self.v_proj.weight) - nn.init.xavier_uniform_(self.q_proj.weight) - - nn.init.xavier_uniform_(self.out_proj.weight) - if self.out_proj.bias is not None: - nn.init.constant_(self.out_proj.bias, 0.0) - if self.bias_k is not None: - nn.init.xavier_normal_(self.bias_k) - if self.bias_v is not None: - nn.init.xavier_normal_(self.bias_v) - - def forward( - self, - query, - key: Optional[Tensor], - value: Optional[Tensor], - key_padding_mask: Optional[Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - need_weights: bool = True, - static_kv: bool = False, - attn_mask: Optional[Tensor] = None, - before_softmax: bool = False, - need_head_weights: bool = False, - ) -> Tuple[Tensor, Optional[Tensor]]: - """Input shape: Time x Batch x Channel - - Args: - key_padding_mask (ByteTensor, optional): mask to exclude - keys that are pads, of shape `(batch, src_len)`, where - padding elements are indicated by 1s. - need_weights (bool, optional): return the attention weights, - averaged over heads (default: False). - attn_mask (ByteTensor, optional): typically used to - implement causal attention, where the mask prevents the - attention from looking forward in time (default: None). - before_softmax (bool, optional): return the raw attention - weights and values before the attention softmax. - need_head_weights (bool, optional): return the attention - weights for each head. Implies *need_weights*. Default: - return the average attention weights over all heads. - """ - if need_head_weights: - need_weights = True - - is_tpu = query.device.type == "xla" - - tgt_len, bsz, embed_dim = query.size() - src_len = tgt_len - assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" - assert list(query.size()) == [tgt_len, bsz, embed_dim] - if key is not None: - src_len, key_bsz, _ = key.size() - if not torch.jit.is_scripting(): - assert key_bsz == bsz - assert value is not None - assert src_len, bsz == value.shape[:2] - - if ( - not self.onnx_trace - and not is_tpu # don't use PyTorch version on TPUs - and incremental_state is None - and not static_kv - # A workaround for quantization to work. Otherwise JIT compilation - # treats bias in linear module as method. - and not torch.jit.is_scripting() - ): - assert key is not None and value is not None - return F.multi_head_attention_forward( - query, - key, - value, - self.embed_dim, - self.num_heads, - torch.empty([0]), - torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), - self.bias_k, - self.bias_v, - self.add_zero_attn, - self.dropout_module.p, - self.out_proj.weight, - self.out_proj.bias, - self.training or self.dropout_module.apply_during_inference, - key_padding_mask, - need_weights, - attn_mask, - use_separate_proj_weight=True, - q_proj_weight=self.q_proj.weight, - k_proj_weight=self.k_proj.weight, - v_proj_weight=self.v_proj.weight, - ) - - if incremental_state is not None: - saved_state = self._get_input_buffer(incremental_state) - if saved_state is not None and "prev_key" in saved_state: - # previous time steps are cached - no need to recompute - # key and value if they are static - if static_kv: - assert self.encoder_decoder_attention and not self.self_attention - key = value = None - else: - saved_state = None - - if self.self_attention: - q = self.q_proj(query) - k = self.k_proj(query) - v = self.v_proj(query) - elif self.encoder_decoder_attention: - # encoder-decoder attention - q = self.q_proj(query) - if key is None: - assert value is None - k = v = None - else: - k = self.k_proj(key) - v = self.v_proj(key) - - else: - assert key is not None and value is not None - q = self.q_proj(query) - k = self.k_proj(key) - v = self.v_proj(value) - q *= self.scaling - - if self.bias_k is not None: - assert self.bias_v is not None - k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) - v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) - if attn_mask is not None: - attn_mask = torch.cat( - [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 - ) - if key_padding_mask is not None: - key_padding_mask = torch.cat( - [ - key_padding_mask, - key_padding_mask.new_zeros(key_padding_mask.size(0), 1), - ], - dim=1, - ) - - q = ( - q.contiguous() - .view(tgt_len, bsz * self.num_heads, self.head_dim) - .transpose(0, 1) - ) - if k is not None: - k = ( - k.contiguous() - .view(-1, bsz * self.num_heads, self.head_dim) - .transpose(0, 1) - ) - if v is not None: - v = ( - v.contiguous() - .view(-1, bsz * self.num_heads, self.head_dim) - .transpose(0, 1) - ) - - if saved_state is not None: - # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) - if "prev_key" in saved_state: - _prev_key = saved_state["prev_key"] - assert _prev_key is not None - prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) - if static_kv: - k = prev_key - else: - assert k is not None - k = torch.cat([prev_key, k], dim=1) - src_len = k.size(1) - if "prev_value" in saved_state: - _prev_value = saved_state["prev_value"] - assert _prev_value is not None - prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) - if static_kv: - v = prev_value - else: - assert v is not None - v = torch.cat([prev_value, v], dim=1) - prev_key_padding_mask: Optional[Tensor] = None - if "prev_key_padding_mask" in saved_state: - prev_key_padding_mask = saved_state["prev_key_padding_mask"] - assert k is not None and v is not None - key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( - key_padding_mask=key_padding_mask, - prev_key_padding_mask=prev_key_padding_mask, - batch_size=bsz, - src_len=k.size(1), - static_kv=static_kv, - ) - - saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) - saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) - saved_state["prev_key_padding_mask"] = key_padding_mask - # In this branch incremental_state is never None - assert incremental_state is not None - incremental_state = self._set_input_buffer(incremental_state, saved_state) - assert k is not None - assert k.size(1) == src_len - - # This is part of a workaround to get around fork/join parallelism - # not supporting Optional types. - if key_padding_mask is not None and key_padding_mask.dim() == 0: - key_padding_mask = None - - if key_padding_mask is not None: - assert key_padding_mask.size(0) == bsz - assert key_padding_mask.size(1) == src_len - - if self.add_zero_attn: - assert v is not None - src_len += 1 - k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) - v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) - if attn_mask is not None: - attn_mask = torch.cat( - [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 - ) - if key_padding_mask is not None: - key_padding_mask = torch.cat( - [ - key_padding_mask, - torch.zeros(key_padding_mask.size(0), 1).type_as( - key_padding_mask - ), - ], - dim=1, - ) - - attn_weights = torch.bmm(q, k.transpose(1, 2)) - attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) - - assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] - - if attn_mask is not None: - attn_mask = attn_mask.unsqueeze(0) - if self.onnx_trace: - attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) - attn_weights += attn_mask - - if key_padding_mask is not None: - # don't attend to padding symbols - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - if not is_tpu: - attn_weights = attn_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), - float("-inf"), - ) - else: - attn_weights = attn_weights.transpose(0, 2) - attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) - attn_weights = attn_weights.transpose(0, 2) - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - if before_softmax: - return attn_weights, v - - attn_weights_float = utils.softmax( - attn_weights, dim=-1, onnx_trace=self.onnx_trace - ) - attn_weights = attn_weights_float.type_as(attn_weights) - attn_probs = self.dropout_module(attn_weights) - - assert v is not None - attn = torch.bmm(attn_probs, v) - assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] - if self.onnx_trace and attn.size(1) == 1: - # when ONNX tracing a single decoder step (sequence length == 1) - # the transpose is a no-op copy before view, thus unnecessary - attn = attn.contiguous().view(tgt_len, bsz, embed_dim) - else: - attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) - attn = self.out_proj(attn) - attn_weights: Optional[Tensor] = None - if need_weights: - attn_weights = attn_weights_float.view( - bsz, self.num_heads, tgt_len, src_len - ).transpose(1, 0) - if not need_head_weights: - # average attention weights over heads - attn_weights = attn_weights.mean(dim=0) - - return attn, attn_weights - - @staticmethod - def _append_prev_key_padding_mask( - key_padding_mask: Optional[Tensor], - prev_key_padding_mask: Optional[Tensor], - batch_size: int, - src_len: int, - static_kv: bool, - ) -> Optional[Tensor]: - # saved key padding masks have shape (bsz, seq_len) - if prev_key_padding_mask is not None and static_kv: - new_key_padding_mask = prev_key_padding_mask - elif prev_key_padding_mask is not None and key_padding_mask is not None: - new_key_padding_mask = torch.cat( - [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 - ) - # During incremental decoding, as the padding token enters and - # leaves the frame, there will be a time when prev or current - # is None - elif prev_key_padding_mask is not None: - if src_len > prev_key_padding_mask.size(1): - filler = torch.zeros( - (batch_size, src_len - prev_key_padding_mask.size(1)), - device=prev_key_padding_mask.device, - ) - new_key_padding_mask = torch.cat( - [prev_key_padding_mask.float(), filler.float()], dim=1 - ) - else: - new_key_padding_mask = prev_key_padding_mask.float() - elif key_padding_mask is not None: - if src_len > key_padding_mask.size(1): - filler = torch.zeros( - (batch_size, src_len - key_padding_mask.size(1)), - device=key_padding_mask.device, - ) - new_key_padding_mask = torch.cat( - [filler.float(), key_padding_mask.float()], dim=1 - ) - else: - new_key_padding_mask = key_padding_mask.float() - else: - new_key_padding_mask = prev_key_padding_mask - return new_key_padding_mask - - @torch.jit.export - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - """Reorder buffered internal state (for incremental generation).""" - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - for k in input_buffer.keys(): - input_buffer_k = input_buffer[k] - if input_buffer_k is not None: - if self.encoder_decoder_attention and input_buffer_k.size( - 0 - ) == new_order.size(0): - break - input_buffer[k] = input_buffer_k.index_select(0, new_order) - incremental_state = self._set_input_buffer(incremental_state, input_buffer) - return incremental_state - - def _get_input_buffer( - self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] - ) -> Dict[str, Optional[Tensor]]: - result = self.get_incremental_state(incremental_state, "attn_state") - if result is not None: - return result - else: - empty_result: Dict[str, Optional[Tensor]] = {} - return empty_result - - def _set_input_buffer( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - buffer: Dict[str, Optional[Tensor]], - ): - return self.set_incremental_state(incremental_state, "attn_state", buffer) - - def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): - return attn_weights - - def upgrade_state_dict_named(self, state_dict, name): - prefix = name + "." if name != "" else "" - items_to_add = {} - keys_to_remove = [] - for k in state_dict.keys(): - if k.endswith(prefix + "in_proj_weight"): - # in_proj_weight used to be q + k + v with same dimensions - dim = int(state_dict[k].shape[0] / 3) - items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] - items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] - items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] - - keys_to_remove.append(k) - - k_bias = prefix + "in_proj_bias" - if k_bias in state_dict.keys(): - dim = int(state_dict[k].shape[0] / 3) - items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] - items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ - dim : 2 * dim - ] - items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] - - keys_to_remove.append(prefix + "in_proj_bias") - - for k in keys_to_remove: - del state_dict[k] - - for key, value in items_to_add.items(): - state_dict[key] = value diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/edsr_arch.py b/spaces/Iceclear/StableSR/StableSR/basicsr/archs/edsr_arch.py deleted file mode 100644 index b80566f11fbd4782d68eee8fbf7da686f89dc4e7..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/edsr_arch.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch import nn as nn - -from basicsr.archs.arch_util import ResidualBlockNoBN, Upsample, make_layer -from basicsr.utils.registry import ARCH_REGISTRY - - -@ARCH_REGISTRY.register() -class EDSR(nn.Module): - """EDSR network structure. - - Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution. - Ref git repo: https://github.com/thstkdgus35/EDSR-PyTorch - - Args: - num_in_ch (int): Channel number of inputs. - num_out_ch (int): Channel number of outputs. - num_feat (int): Channel number of intermediate features. - Default: 64. - num_block (int): Block number in the trunk network. Default: 16. - upscale (int): Upsampling factor. Support 2^n and 3. - Default: 4. - res_scale (float): Used to scale the residual in residual block. - Default: 1. - img_range (float): Image range. Default: 255. - rgb_mean (tuple[float]): Image mean in RGB orders. - Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset. - """ - - def __init__(self, - num_in_ch, - num_out_ch, - num_feat=64, - num_block=16, - upscale=4, - res_scale=1, - img_range=255., - rgb_mean=(0.4488, 0.4371, 0.4040)): - super(EDSR, self).__init__() - - self.img_range = img_range - self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) - - self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) - self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat, res_scale=res_scale, pytorch_init=True) - self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.upsample = Upsample(upscale, num_feat) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - - def forward(self, x): - self.mean = self.mean.type_as(x) - - x = (x - self.mean) * self.img_range - x = self.conv_first(x) - res = self.conv_after_body(self.body(x)) - res += x - - x = self.conv_last(self.upsample(res)) - x = x / self.img_range + self.mean - - return x diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/metrics/metric_util.py b/spaces/Iceclear/StableSR/StableSR/basicsr/metrics/metric_util.py deleted file mode 100644 index 2a27c70a043beeeb59cfaf533079492293065448..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/metrics/metric_util.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np - -from basicsr.utils import bgr2ycbcr - - -def reorder_image(img, input_order='HWC'): - """Reorder images to 'HWC' order. - - If the input_order is (h, w), return (h, w, 1); - If the input_order is (c, h, w), return (h, w, c); - If the input_order is (h, w, c), return as it is. - - Args: - img (ndarray): Input image. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - If the input image shape is (h, w), input_order will not have - effects. Default: 'HWC'. - - Returns: - ndarray: reordered image. - """ - - if input_order not in ['HWC', 'CHW']: - raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") - if len(img.shape) == 2: - img = img[..., None] - if input_order == 'CHW': - img = img.transpose(1, 2, 0) - return img - - -def to_y_channel(img): - """Change to Y channel of YCbCr. - - Args: - img (ndarray): Images with range [0, 255]. - - Returns: - (ndarray): Images with range [0, 255] (float type) without round. - """ - img = img.astype(np.float32) / 255. - if img.ndim == 3 and img.shape[2] == 3: - img = bgr2ycbcr(img, y_only=True) - img = img[..., None] - return img * 255. diff --git a/spaces/Iqbalzz/hololive-rvc-models/infer_pack/modules.py b/spaces/Iqbalzz/hololive-rvc-models/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/Iqbalzz/hololive-rvc-models/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/JeffJing/ZookChatBot/steamship/base/environments.py b/spaces/JeffJing/ZookChatBot/steamship/base/environments.py deleted file mode 100644 index f5d7662fdb2f3c16b52a1b4190166691a3cd8579..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/base/environments.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -from enum import Enum - -from steamship.base.configuration import Configuration -from steamship.base.error import SteamshipError - - -class RuntimeEnvironments(str, Enum): - REPLIT = "replit" - LOCALHOST = "localhost" - - -def _interactively_get_key(env: RuntimeEnvironments): - print( - """Get your free API key here: https://steamship.com/account/api - -You'll get immediate access to our SDK for AI models, including OpenAI, GPT, Cohere, and more. -""" - ) - - api_key = input("Paste your API key to run: ") - - while len(api_key.strip()) == 0: - api_key = input("API Key: ") - - os.environ["STEAMSHIP_API_KEY"] = api_key - - if env == RuntimeEnvironments.REPLIT: - print( - """ -This key is set temporarily. In the future, you can: -- Set the STEAMSHIP_API_KEY Replit Secret -- Close and re-open any Replit shells to make sure secrets are refreshed. - -""" - ) - elif env == RuntimeEnvironments.LOCALHOST: - print( - """ -This key is set temporarily. In the future, you can: -- Set the STEAMSHIP_API_KEY environment variable -- Run `ship login` to create a ~/.steamship.json credential file - -""" - ) - - -def _report_error_and_exit(env: RuntimeEnvironments): - if env == RuntimeEnvironments.REPLIT: - print( - """To run this Replit, you will need a Steamship API Key. - -1) If you're viewing someone else's Replit, clone it - -2) Visit https://steamship.com/account/api to get a key - -3) Add your key as a Replit secret named STEAMSHIP_API_KEY - -4) Close and re-open any shells to make sure your new secret is available - -Then try running again!""" - ) - elif env == RuntimeEnvironments.LOCALHOST: - print( - """To run this script, you will need a Steamship API Key. - -1) Visit https://steamship.com/account/api to get a key - -2) Set your key as the environment variable STEAMSHIP_API_KEY - -Then try running again! - -If you have pip-installed `steamship`, you can also try setting your key by simply running `ship login`. -""" - ) - exit(-1) - - -def check_environment(env: RuntimeEnvironments, interactively_set_key: bool = True): - # This will try loading from STEAMSHIP_API_KEY and also ~/.steamship.json - try: - config = Configuration() - - # If an API key is set, we're good to go! - if config.api_key: - return - except SteamshipError: - # The Configuration object will throw an error if there is no API Key found. - # Since that error is expected from the context of this function, we pass on it to handle it in a more - # user-interactive way. - pass - - # If we're hot-loading config, do it here! - if interactively_set_key: - _interactively_get_key(env) - return - - # If we're still here, we're not interactively setting the key. Display an error message and exit. - _report_error_and_exit(env) diff --git a/spaces/JeffJing/ZookChatBot/steamship/plugin/generator.py b/spaces/JeffJing/ZookChatBot/steamship/plugin/generator.py deleted file mode 100644 index c1c71f3ab1ea95806f7d92ce48c6cedecb7d3965..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/plugin/generator.py +++ /dev/null @@ -1,72 +0,0 @@ -import logging -from abc import ABC, abstractmethod - -from steamship.invocable import InvocableResponse, post -from steamship.invocable.plugin_service import PluginRequest, PluginService, TrainablePluginService -from steamship.plugin.inputs.raw_block_and_tag_plugin_input import RawBlockAndTagPluginInput -from steamship.plugin.inputs.train_plugin_input import TrainPluginInput -from steamship.plugin.inputs.training_parameter_plugin_input import TrainingParameterPluginInput -from steamship.plugin.outputs.raw_block_and_tag_plugin_output import RawBlockAndTagPluginOutput -from steamship.plugin.outputs.train_plugin_output import TrainPluginOutput -from steamship.plugin.outputs.training_parameter_plugin_output import TrainingParameterPluginOutput -from steamship.plugin.trainable_model import TrainableModel - -# Note! -# ===== -# -# This is the PLUGIN IMPLEMENTOR's View of a Generator. -# -# If you are using the Steamship Client, you probably want steamship.client.operations.generator instead -# of this file. -# - - -class Generator(PluginService[RawBlockAndTagPluginInput, RawBlockAndTagPluginOutput], ABC): - @abstractmethod - def run( - self, request: PluginRequest[RawBlockAndTagPluginInput] - ) -> InvocableResponse[RawBlockAndTagPluginOutput]: - raise NotImplementedError() - - @post("generate") - def run_endpoint(self, **kwargs) -> InvocableResponse[RawBlockAndTagPluginOutput]: - """Exposes the Tagger's `run` operation to the Steamship Engine via the expected HTTP path POST /tag""" - return self.run(PluginRequest[RawBlockAndTagPluginInput].parse_obj(kwargs)) - - -class TrainableGenerator( - TrainablePluginService[RawBlockAndTagPluginInput, RawBlockAndTagPluginOutput], ABC -): - @abstractmethod - def run_with_model( - self, request: PluginRequest[RawBlockAndTagPluginInput], model: TrainableModel - ) -> InvocableResponse[RawBlockAndTagPluginOutput]: - raise NotImplementedError() - - # noinspection PyUnusedLocal - @post("generate") - def run_endpoint(self, **kwargs) -> InvocableResponse[RawBlockAndTagPluginOutput]: - """Exposes the Tagger's `run` operation to the Steamship Engine via the expected HTTP path POST /generate""" - return self.run(PluginRequest[RawBlockAndTagPluginInput].parse_obj(kwargs)) - - # noinspection PyUnusedLocal - @post("getTrainingParameters") - def get_training_parameters_endpoint( - self, **kwargs - ) -> InvocableResponse[TrainingParameterPluginOutput]: - """Exposes the Service's `get_training_parameters` operation to the Steamship Engine via the expected HTTP path POST /getTrainingParameters""" - return self.get_training_parameters(PluginRequest[TrainingParameterPluginInput](**kwargs)) - - # noinspection PyUnusedLocal - @post("train") - def train_endpoint(self, **kwargs) -> InvocableResponse[TrainPluginOutput]: - """Exposes the Service's `train` operation to the Steamship Engine via the expected HTTP path POST /train""" - logging.info(f"Tagger:train_endpoint called. Calling train {kwargs}") - arg = PluginRequest[TrainPluginInput].parse_obj(kwargs) - model = self.model_cls()() - model.receive_config(config=self.config) - - if arg.is_status_check: - return self.train_status(arg, model) - else: - return self.train(arg, model) diff --git a/spaces/Joom/Front-end-code-generation-from-images/compiler/__init__.py b/spaces/Joom/Front-end-code-generation-from-images/compiler/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/train/mel_processing.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/train/mel_processing.py deleted file mode 100644 index f458775bf62b79f791b419ca7ed62c550ae252d5..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/train/mel_processing.py +++ /dev/null @@ -1,132 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -import logging - -logger = logging.getLogger(__name__) - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - return dynamic_range_compression_torch(magnitudes) - - -def spectral_de_normalize_torch(magnitudes): - return dynamic_range_decompression_torch(magnitudes) - - -# Reusable banks -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - """Convert waveform into Linear-frequency Linear-amplitude spectrogram. - - Args: - y :: (B, T) - Audio waveforms - n_fft - sampling_rate - hop_size - win_size - center - Returns: - :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram - """ - # Validation - if torch.min(y) < -1.07: - logger.debug("min value is %s", str(torch.min(y))) - if torch.max(y) > 1.07: - logger.debug("max value is %s", str(torch.max(y))) - - # Window - Cache if needed - global hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( - dtype=y.dtype, device=y.device - ) - - # Padding - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2) - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - # MelBasis - Cache if needed - global mel_basis - dtype_device = str(spec.dtype) + "_" + str(spec.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn( - sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax - ) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( - dtype=spec.dtype, device=spec.device - ) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame) - melspec = torch.matmul(mel_basis[fmax_dtype_device], spec) - melspec = spectral_normalize_torch(melspec) - return melspec - - -def mel_spectrogram_torch( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - """Convert waveform into Mel-frequency Log-amplitude spectrogram. - - Args: - y :: (B, T) - Waveforms - Returns: - melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram - """ - # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame) - spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame) - melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - - return melspec diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/cli/__init__.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Kvikontent/kviimager/README.md b/spaces/Kvikontent/kviimager/README.md deleted file mode 100644 index 909d33b348dead4212606fed1aa76e65d5e1eb0c..0000000000000000000000000000000000000000 --- a/spaces/Kvikontent/kviimager/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: KVIImager -emoji: 🌍 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: true -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Lavanya30/hiddenhunger/app.py b/spaces/Lavanya30/hiddenhunger/app.py deleted file mode 100644 index d4724f1c7298fdd1d2a8bed0b8afe20d768f6e28..0000000000000000000000000000000000000000 --- a/spaces/Lavanya30/hiddenhunger/app.py +++ /dev/null @@ -1,33 +0,0 @@ -import streamlit as st -import requests -import PIL -from PIL import Image - - - -st.set_page_config( - page_title="Hidden hunger", - page_icon="👁🌿" -) - -st.title("Hidden Hunger Detection") -st.subheader("This website is used to find the micronutrient deficiency of human and plants.") - - -st.write("Hidden hunger in human is found by using the images of nails and eyes.The micronutrient deficiency which can deteced for human are iron,iodine,vitamin b12,vitamin d,zinc and healthy") -st.write("Hidden hunger in plant is found by using the images of banana leaf.The micronutrient deficiency which can deteced for human are iron,zin,manganese,boron and healthy") - -st.sidebar.success("Select a page above.") - -def local_css(file_name): - with open(file_name) as f: - st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True) - -local_css("style/style.css") - - -# Load the image file -image = Image.open(r'micro.jpg') -# Display the image in the Streamlit app -st.image(image, use_column_width=True) - diff --git a/spaces/Lightxr/sd-diffusers-webui/app.py b/spaces/Lightxr/sd-diffusers-webui/app.py deleted file mode 100644 index 2c09b34b8d4cfa9a3528c3674bb43b9f70530529..0000000000000000000000000000000000000000 --- a/spaces/Lightxr/sd-diffusers-webui/app.py +++ /dev/null @@ -1,878 +0,0 @@ -import random -import tempfile -import time -import gradio as gr -import numpy as np -import torch -import math -import re - -from gradio import inputs -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - UNet2DConditionModel, -) -from modules.model import ( - CrossAttnProcessor, - StableDiffusionPipeline, -) -from torchvision import transforms -from transformers import CLIPTokenizer, CLIPTextModel -from PIL import Image -from pathlib import Path -from safetensors.torch import load_file -import modules.safe as _ -from modules.lora import LoRANetwork - -models = [ - ("AbyssOrangeMix2", "Korakoe/AbyssOrangeMix2-HF", 2), - ("Pastal Mix", "andite/pastel-mix", 2), - ("Basil Mix", "nuigurumi/basil_mix", 2) -] - -keep_vram = ["Korakoe/AbyssOrangeMix2-HF", "andite/pastel-mix"] -base_name, base_model, clip_skip = models[0] - -samplers_k_diffusion = [ - ("Euler a", "sample_euler_ancestral", {}), - ("Euler", "sample_euler", {}), - ("LMS", "sample_lms", {}), - ("Heun", "sample_heun", {}), - ("DPM2", "sample_dpm_2", {"discard_next_to_last_sigma": True}), - ("DPM2 a", "sample_dpm_2_ancestral", {"discard_next_to_last_sigma": True}), - ("DPM++ 2S a", "sample_dpmpp_2s_ancestral", {}), - ("DPM++ 2M", "sample_dpmpp_2m", {}), - ("DPM++ SDE", "sample_dpmpp_sde", {}), - ("LMS Karras", "sample_lms", {"scheduler": "karras"}), - ("DPM2 Karras", "sample_dpm_2", {"scheduler": "karras", "discard_next_to_last_sigma": True}), - ("DPM2 a Karras", "sample_dpm_2_ancestral", {"scheduler": "karras", "discard_next_to_last_sigma": True}), - ("DPM++ 2S a Karras", "sample_dpmpp_2s_ancestral", {"scheduler": "karras"}), - ("DPM++ 2M Karras", "sample_dpmpp_2m", {"scheduler": "karras"}), - ("DPM++ SDE Karras", "sample_dpmpp_sde", {"scheduler": "karras"}), -] - -# samplers_diffusers = [ -# ("DDIMScheduler", "diffusers.schedulers.DDIMScheduler", {}) -# ("DDPMScheduler", "diffusers.schedulers.DDPMScheduler", {}) -# ("DEISMultistepScheduler", "diffusers.schedulers.DEISMultistepScheduler", {}) -# ] - -start_time = time.time() -timeout = 90 - -scheduler = DDIMScheduler.from_pretrained( - base_model, - subfolder="scheduler", -) -vae = AutoencoderKL.from_pretrained( - "stabilityai/sd-vae-ft-ema", - torch_dtype=torch.float16 -) -text_encoder = CLIPTextModel.from_pretrained( - base_model, - subfolder="text_encoder", - torch_dtype=torch.float16, -) -tokenizer = CLIPTokenizer.from_pretrained( - base_model, - subfolder="tokenizer", - torch_dtype=torch.float16, -) -unet = UNet2DConditionModel.from_pretrained( - base_model, - subfolder="unet", - torch_dtype=torch.float16, -) -pipe = StableDiffusionPipeline( - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - vae=vae, - scheduler=scheduler, -) - -unet.set_attn_processor(CrossAttnProcessor) -pipe.setup_text_encoder(clip_skip, text_encoder) -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - -def get_model_list(): - return models - -te_cache = { - base_model: text_encoder -} - -unet_cache = { - base_model: unet -} - -lora_cache = { - base_model: LoRANetwork(text_encoder, unet) -} - -te_base_weight_length = text_encoder.get_input_embeddings().weight.data.shape[0] -original_prepare_for_tokenization = tokenizer.prepare_for_tokenization -current_model = base_model - -def setup_model(name, lora_state=None, lora_scale=1.0): - global pipe, current_model - - keys = [k[0] for k in models] - model = models[keys.index(name)][1] - if model not in unet_cache: - unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet", torch_dtype=torch.float16) - text_encoder = CLIPTextModel.from_pretrained(model, subfolder="text_encoder", torch_dtype=torch.float16) - - unet_cache[model] = unet - te_cache[model] = text_encoder - lora_cache[model] = LoRANetwork(text_encoder, unet) - - if current_model != model: - if current_model not in keep_vram: - # offload current model - unet_cache[current_model].to("cpu") - te_cache[current_model].to("cpu") - lora_cache[current_model].to("cpu") - current_model = model - - local_te, local_unet, local_lora, = te_cache[model], unet_cache[model], lora_cache[model] - local_unet.set_attn_processor(CrossAttnProcessor()) - local_lora.reset() - clip_skip = models[keys.index(name)][2] - - if torch.cuda.is_available(): - local_unet.to("cuda") - local_te.to("cuda") - - if lora_state is not None and lora_state != "": - local_lora.load(lora_state, lora_scale) - local_lora.to(local_unet.device, dtype=local_unet.dtype) - - pipe.text_encoder, pipe.unet = local_te, local_unet - pipe.setup_unet(local_unet) - pipe.tokenizer.prepare_for_tokenization = original_prepare_for_tokenization - pipe.tokenizer.added_tokens_encoder = {} - pipe.tokenizer.added_tokens_decoder = {} - pipe.setup_text_encoder(clip_skip, local_te) - return pipe - - -def error_str(error, title="Error"): - return ( - f"""#### {title} - {error}""" - if error - else "" - ) - -def make_token_names(embs): - all_tokens = [] - for name, vec in embs.items(): - tokens = [f'emb-{name}-{i}' for i in range(len(vec))] - all_tokens.append(tokens) - return all_tokens - -def setup_tokenizer(tokenizer, embs): - reg_match = [re.compile(fr"(?:^|(?<=\s|,)){k}(?=,|\s|$)") for k in embs.keys()] - clip_keywords = [' '.join(s) for s in make_token_names(embs)] - - def parse_prompt(prompt: str): - for m, v in zip(reg_match, clip_keywords): - prompt = m.sub(v, prompt) - return prompt - - def prepare_for_tokenization(self, text: str, is_split_into_words: bool = False, **kwargs): - text = parse_prompt(text) - r = original_prepare_for_tokenization(text, is_split_into_words, **kwargs) - return r - tokenizer.prepare_for_tokenization = prepare_for_tokenization.__get__(tokenizer, CLIPTokenizer) - return [t for sublist in make_token_names(embs) for t in sublist] - - -def convert_size(size_bytes): - if size_bytes == 0: - return "0B" - size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size_bytes, 1024))) - p = math.pow(1024, i) - s = round(size_bytes / p, 2) - return "%s %s" % (s, size_name[i]) - -def inference( - prompt, - guidance, - steps, - width=512, - height=512, - seed=0, - neg_prompt="", - state=None, - g_strength=0.4, - img_input=None, - i2i_scale=0.5, - hr_enabled=False, - hr_method="Latent", - hr_scale=1.5, - hr_denoise=0.8, - sampler="DPM++ 2M Karras", - embs=None, - model=None, - lora_state=None, - lora_scale=None, -): - if seed is None or seed == 0: - seed = random.randint(0, 2147483647) - - pipe = setup_model(model, lora_state, lora_scale) - generator = torch.Generator("cuda").manual_seed(int(seed)) - start_time = time.time() - - sampler_name, sampler_opt = None, None - for label, funcname, options in samplers_k_diffusion: - if label == sampler: - sampler_name, sampler_opt = funcname, options - - tokenizer, text_encoder = pipe.tokenizer, pipe.text_encoder - if embs is not None and len(embs) > 0: - ti_embs = {} - for name, file in embs.items(): - if str(file).endswith(".pt"): - loaded_learned_embeds = torch.load(file, map_location="cpu") - else: - loaded_learned_embeds = load_file(file, device="cpu") - loaded_learned_embeds = loaded_learned_embeds["string_to_param"]["*"] if "string_to_param" in loaded_learned_embed else loaded_learned_embed - ti_embs[name] = loaded_learned_embeds - - if len(ti_embs) > 0: - tokens = setup_tokenizer(tokenizer, ti_embs) - added_tokens = tokenizer.add_tokens(tokens) - delta_weight = torch.cat([val for val in ti_embs.values()], dim=0) - - assert added_tokens == delta_weight.shape[0] - text_encoder.resize_token_embeddings(len(tokenizer)) - token_embeds = text_encoder.get_input_embeddings().weight.data - token_embeds[-delta_weight.shape[0]:] = delta_weight - - config = { - "negative_prompt": neg_prompt, - "num_inference_steps": int(steps), - "guidance_scale": guidance, - "generator": generator, - "sampler_name": sampler_name, - "sampler_opt": sampler_opt, - "pww_state": state, - "pww_attn_weight": g_strength, - "start_time": start_time, - "timeout": timeout, - } - - if img_input is not None: - ratio = min(height / img_input.height, width / img_input.width) - img_input = img_input.resize( - (int(img_input.width * ratio), int(img_input.height * ratio)), Image.LANCZOS - ) - result = pipe.img2img(prompt, image=img_input, strength=i2i_scale, **config) - elif hr_enabled: - result = pipe.txt2img( - prompt, - width=width, - height=height, - upscale=True, - upscale_x=hr_scale, - upscale_denoising_strength=hr_denoise, - **config, - **latent_upscale_modes[hr_method], - ) - else: - result = pipe.txt2img(prompt, width=width, height=height, **config) - - end_time = time.time() - vram_free, vram_total = torch.cuda.mem_get_info() - print(f"done: model={model}, res={width}x{height}, step={steps}, time={round(end_time-start_time, 2)}s, vram_alloc={convert_size(vram_total-vram_free)}/{convert_size(vram_total)}") - return gr.Image.update(result[0][0], label=f"Initial Seed: {seed}") - - -color_list = [] - - -def get_color(n): - for _ in range(n - len(color_list)): - color_list.append(tuple(np.random.random(size=3) * 256)) - return color_list - - -def create_mixed_img(current, state, w=512, h=512): - w, h = int(w), int(h) - image_np = np.full([h, w, 4], 255) - if state is None: - state = {} - - colors = get_color(len(state)) - idx = 0 - - for key, item in state.items(): - if item["map"] is not None: - m = item["map"] < 255 - alpha = 150 - if current == key: - alpha = 200 - image_np[m] = colors[idx] + (alpha,) - idx += 1 - - return image_np - - -# width.change(apply_new_res, inputs=[width, height, global_stats], outputs=[global_stats, sp, rendered]) -def apply_new_res(w, h, state): - w, h = int(w), int(h) - - for key, item in state.items(): - if item["map"] is not None: - item["map"] = resize(item["map"], w, h) - - update_img = gr.Image.update(value=create_mixed_img("", state, w, h)) - return state, update_img - - -def detect_text(text, state, width, height): - - if text is None or text == "": - return None, None, gr.Radio.update(value=None), None - - t = text.split(",") - new_state = {} - - for item in t: - item = item.strip() - if item == "": - continue - if state is not None and item in state: - new_state[item] = { - "map": state[item]["map"], - "weight": state[item]["weight"], - "mask_outsides": state[item]["mask_outsides"], - } - else: - new_state[item] = { - "map": None, - "weight": 0.5, - "mask_outsides": False - } - update = gr.Radio.update(choices=[key for key in new_state.keys()], value=None) - update_img = gr.update(value=create_mixed_img("", new_state, width, height)) - update_sketch = gr.update(value=None, interactive=False) - return new_state, update_sketch, update, update_img - - -def resize(img, w, h): - trs = transforms.Compose( - [ - transforms.ToPILImage(), - transforms.Resize(min(h, w)), - transforms.CenterCrop((h, w)), - ] - ) - result = np.array(trs(img), dtype=np.uint8) - return result - - -def switch_canvas(entry, state, width, height): - if entry == None: - return None, 0.5, False, create_mixed_img("", state, width, height) - - return ( - gr.update(value=None, interactive=True), - gr.update(value=state[entry]["weight"] if entry in state else 0.5), - gr.update(value=state[entry]["mask_outsides"] if entry in state else False), - create_mixed_img(entry, state, width, height), - ) - - -def apply_canvas(selected, draw, state, w, h): - if selected in state: - w, h = int(w), int(h) - state[selected]["map"] = resize(draw, w, h) - return state, gr.Image.update(value=create_mixed_img(selected, state, w, h)) - - -def apply_weight(selected, weight, state): - if selected in state: - state[selected]["weight"] = weight - return state - - -def apply_option(selected, mask, state): - if selected in state: - state[selected]["mask_outsides"] = mask - return state - - -# sp2, radio, width, height, global_stats -def apply_image(image, selected, w, h, strgength, mask, state): - if selected in state: - state[selected] = { - "map": resize(image, w, h), - "weight": strgength, - "mask_outsides": mask - } - - return state, gr.Image.update(value=create_mixed_img(selected, state, w, h)) - - -# [ti_state, lora_state, ti_vals, lora_vals, uploads] -def add_net(files, ti_state, lora_state): - if files is None: - return ti_state, "", lora_state, None - - for file in files: - item = Path(file.name) - stripedname = str(item.stem).strip() - if item.suffix == ".pt": - state_dict = torch.load(file.name, map_location="cpu") - else: - state_dict = load_file(file.name, device="cpu") - if any("lora" in k for k in state_dict.keys()): - lora_state = file.name - else: - ti_state[stripedname] = file.name - - return ( - ti_state, - lora_state, - gr.Text.update(f"{[key for key in ti_state.keys()]}"), - gr.Text.update(f"{lora_state}"), - gr.Files.update(value=None), - ) - - -# [ti_state, lora_state, ti_vals, lora_vals, uploads] -def clean_states(ti_state, lora_state): - return ( - dict(), - None, - gr.Text.update(f""), - gr.Text.update(f""), - gr.File.update(value=None), - ) - - -latent_upscale_modes = { - "Latent": {"upscale_method": "bilinear", "upscale_antialias": False}, - "Latent (antialiased)": {"upscale_method": "bilinear", "upscale_antialias": True}, - "Latent (bicubic)": {"upscale_method": "bicubic", "upscale_antialias": False}, - "Latent (bicubic antialiased)": { - "upscale_method": "bicubic", - "upscale_antialias": True, - }, - "Latent (nearest)": {"upscale_method": "nearest", "upscale_antialias": False}, - "Latent (nearest-exact)": { - "upscale_method": "nearest-exact", - "upscale_antialias": False, - }, -} - -css = """ -.finetuned-diffusion-div div{ - display:inline-flex; - align-items:center; - gap:.8rem; - font-size:1.75rem; - padding-top:2rem; -} -.finetuned-diffusion-div div h1{ - font-weight:900; - margin-bottom:7px -} -.finetuned-diffusion-div p{ - margin-bottom:10px; - font-size:94% -} -.box { - float: left; - height: 20px; - width: 20px; - margin-bottom: 15px; - border: 1px solid black; - clear: both; -} -a{ - text-decoration:underline -} -.tabs{ - margin-top:0; - margin-bottom:0 -} -#gallery{ - min-height:20rem -} -.no-border { - border: none !important; -} - """ -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" - <div class="finetuned-diffusion-div"> - <div> - <h1>Demo for diffusion models</h1> - </div> - <p>Hso @ nyanko.sketch2img.gradio</p> - </div> - """ - ) - global_stats = gr.State(value={}) - - with gr.Row(): - - with gr.Column(scale=55): - model = gr.Dropdown( - choices=[k[0] for k in get_model_list()], - label="Model", - value=base_name, - ) - image_out = gr.Image(height=512) - # gallery = gr.Gallery( - # label="Generated images", show_label=False, elem_id="gallery" - # ).style(grid=[1], height="auto") - - with gr.Column(scale=45): - - with gr.Group(): - - with gr.Row(): - with gr.Column(scale=70): - - prompt = gr.Textbox( - label="Prompt", - value="loli cat girl, blue eyes, flat chest, solo, long messy silver hair, blue capelet, cat ears, cat tail, upper body", - show_label=True, - max_lines=4, - placeholder="Enter prompt.", - ) - neg_prompt = gr.Textbox( - label="Negative Prompt", - value="bad quality, low quality, jpeg artifact, cropped", - show_label=True, - max_lines=4, - placeholder="Enter negative prompt.", - ) - - generate = gr.Button(value="Generate").style( - rounded=(False, True, True, False) - ) - - with gr.Tab("Options"): - - with gr.Group(): - - # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1) - with gr.Row(): - guidance = gr.Slider( - label="Guidance scale", value=7.5, maximum=15 - ) - steps = gr.Slider( - label="Steps", value=25, minimum=2, maximum=50, step=1 - ) - - with gr.Row(): - width = gr.Slider( - label="Width", value=512, minimum=64, maximum=768, step=64 - ) - height = gr.Slider( - label="Height", value=512, minimum=64, maximum=768, step=64 - ) - - sampler = gr.Dropdown( - value="DPM++ 2M Karras", - label="Sampler", - choices=[s[0] for s in samplers_k_diffusion], - ) - seed = gr.Number(label="Seed (0 = random)", value=0) - - with gr.Tab("Image to image"): - with gr.Group(): - - inf_image = gr.Image( - label="Image", height=256, tool="editor", type="pil" - ) - inf_strength = gr.Slider( - label="Transformation strength", - minimum=0, - maximum=1, - step=0.01, - value=0.5, - ) - - def res_cap(g, w, h, x): - if g: - return f"Enable upscaler: {w}x{h} to {int(w*x)}x{int(h*x)}" - else: - return "Enable upscaler" - - with gr.Tab("Hires fix"): - with gr.Group(): - - hr_enabled = gr.Checkbox(label="Enable upscaler", value=False) - hr_method = gr.Dropdown( - [key for key in latent_upscale_modes.keys()], - value="Latent", - label="Upscale method", - ) - hr_scale = gr.Slider( - label="Upscale factor", - minimum=1.0, - maximum=1.5, - step=0.1, - value=1.2, - ) - hr_denoise = gr.Slider( - label="Denoising strength", - minimum=0.0, - maximum=1.0, - step=0.1, - value=0.8, - ) - - hr_scale.change( - lambda g, x, w, h: gr.Checkbox.update( - label=res_cap(g, w, h, x) - ), - inputs=[hr_enabled, hr_scale, width, height], - outputs=hr_enabled, - queue=False, - ) - hr_enabled.change( - lambda g, x, w, h: gr.Checkbox.update( - label=res_cap(g, w, h, x) - ), - inputs=[hr_enabled, hr_scale, width, height], - outputs=hr_enabled, - queue=False, - ) - - with gr.Tab("Embeddings/Loras"): - - ti_state = gr.State(dict()) - lora_state = gr.State() - - with gr.Group(): - with gr.Row(): - with gr.Column(scale=90): - ti_vals = gr.Text(label="Loaded embeddings") - - with gr.Row(): - with gr.Column(scale=90): - lora_vals = gr.Text(label="Loaded loras") - - with gr.Row(): - - uploads = gr.Files(label="Upload new embeddings/lora") - - with gr.Column(): - lora_scale = gr.Slider( - label="Lora scale", - minimum=0, - maximum=2, - step=0.01, - value=1.0, - ) - btn = gr.Button(value="Upload") - btn_del = gr.Button(value="Reset") - - btn.click( - add_net, - inputs=[uploads, ti_state, lora_state], - outputs=[ti_state, lora_state, ti_vals, lora_vals, uploads], - queue=False, - ) - btn_del.click( - clean_states, - inputs=[ti_state, lora_state], - outputs=[ti_state, lora_state, ti_vals, lora_vals, uploads], - queue=False, - ) - - # error_output = gr.Markdown() - - gr.HTML( - f""" - <div class="finetuned-diffusion-div"> - <div> - <h1>Paint with words</h1> - </div> - <p> - Will use the following formula: w = scale * token_weight_martix * log(1 + sigma) * max(qk). - </p> - </div> - """ - ) - - with gr.Row(): - - with gr.Column(scale=55): - - rendered = gr.Image( - invert_colors=True, - source="canvas", - interactive=False, - image_mode="RGBA", - ) - - with gr.Column(scale=45): - - with gr.Group(): - with gr.Row(): - with gr.Column(scale=70): - g_strength = gr.Slider( - label="Weight scaling", - minimum=0, - maximum=0.8, - step=0.01, - value=0.4, - ) - - text = gr.Textbox( - lines=2, - interactive=True, - label="Token to Draw: (Separate by comma)", - ) - - radio = gr.Radio([], label="Tokens") - - sk_update = gr.Button(value="Update").style( - rounded=(False, True, True, False) - ) - - # g_strength.change(lambda b: gr.update(f"Scaled additional attn: $w = {b} \log (1 + \sigma) \std (Q^T K)$."), inputs=g_strength, outputs=[g_output]) - - with gr.Tab("SketchPad"): - - sp = gr.Image( - image_mode="L", - tool="sketch", - source="canvas", - interactive=False, - ) - - mask_outsides = gr.Checkbox( - label="Mask other areas", - value=False - ) - - strength = gr.Slider( - label="Token strength", - minimum=0, - maximum=0.8, - step=0.01, - value=0.5, - ) - - - sk_update.click( - detect_text, - inputs=[text, global_stats, width, height], - outputs=[global_stats, sp, radio, rendered], - queue=False, - ) - radio.change( - switch_canvas, - inputs=[radio, global_stats, width, height], - outputs=[sp, strength, mask_outsides, rendered], - queue=False, - ) - sp.edit( - apply_canvas, - inputs=[radio, sp, global_stats, width, height], - outputs=[global_stats, rendered], - queue=False, - ) - strength.change( - apply_weight, - inputs=[radio, strength, global_stats], - outputs=[global_stats], - queue=False, - ) - mask_outsides.change( - apply_option, - inputs=[radio, mask_outsides, global_stats], - outputs=[global_stats], - queue=False, - ) - - with gr.Tab("UploadFile"): - - sp2 = gr.Image( - image_mode="L", - source="upload", - shape=(512, 512), - ) - - mask_outsides2 = gr.Checkbox( - label="Mask other areas", - value=False, - ) - - strength2 = gr.Slider( - label="Token strength", - minimum=0, - maximum=0.8, - step=0.01, - value=0.5, - ) - - apply_style = gr.Button(value="Apply") - apply_style.click( - apply_image, - inputs=[sp2, radio, width, height, strength2, mask_outsides2, global_stats], - outputs=[global_stats, rendered], - queue=False, - ) - - width.change( - apply_new_res, - inputs=[width, height, global_stats], - outputs=[global_stats, rendered], - queue=False, - ) - height.change( - apply_new_res, - inputs=[width, height, global_stats], - outputs=[global_stats, rendered], - queue=False, - ) - - # color_stats = gr.State(value={}) - # text.change(detect_color, inputs=[sp, text, color_stats], outputs=[color_stats, rendered]) - # sp.change(detect_color, inputs=[sp, text, color_stats], outputs=[color_stats, rendered]) - - inputs = [ - prompt, - guidance, - steps, - width, - height, - seed, - neg_prompt, - global_stats, - g_strength, - inf_image, - inf_strength, - hr_enabled, - hr_method, - hr_scale, - hr_denoise, - sampler, - ti_state, - model, - lora_state, - lora_scale, - ] - outputs = [image_out] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - -print(f"Space built in {time.time() - start_time:.2f} seconds") -# demo.launch(share=True) -demo.launch(enable_queue=True, server_name="0.0.0.0", server_port=7860) diff --git a/spaces/Lightxr/sd-diffusers-webui/modules/safe.py b/spaces/Lightxr/sd-diffusers-webui/modules/safe.py deleted file mode 100644 index 532c7dab3f60f5a68b068299d2adc0b776a423f9..0000000000000000000000000000000000000000 --- a/spaces/Lightxr/sd-diffusers-webui/modules/safe.py +++ /dev/null @@ -1,188 +0,0 @@ -# this code is adapted from the script contributed by anon from /h/ -# modified, from https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/safe.py - -import io -import pickle -import collections -import sys -import traceback - -import torch -import numpy -import _codecs -import zipfile -import re - - -# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage -TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage - - -def encode(*args): - out = _codecs.encode(*args) - return out - - -class RestrictedUnpickler(pickle.Unpickler): - extra_handler = None - - def persistent_load(self, saved_id): - assert saved_id[0] == 'storage' - return TypedStorage() - - def find_class(self, module, name): - if self.extra_handler is not None: - res = self.extra_handler(module, name) - if res is not None: - return res - - if module == 'collections' and name == 'OrderedDict': - return getattr(collections, name) - if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']: - return getattr(torch._utils, name) - if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32']: - return getattr(torch, name) - if module == 'torch.nn.modules.container' and name in ['ParameterDict']: - return getattr(torch.nn.modules.container, name) - if module == 'numpy.core.multiarray' and name in ['scalar', '_reconstruct']: - return getattr(numpy.core.multiarray, name) - if module == 'numpy' and name in ['dtype', 'ndarray']: - return getattr(numpy, name) - if module == '_codecs' and name == 'encode': - return encode - if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint': - import pytorch_lightning.callbacks - return pytorch_lightning.callbacks.model_checkpoint - if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint': - import pytorch_lightning.callbacks.model_checkpoint - return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint - if module == "__builtin__" and name == 'set': - return set - - # Forbid everything else. - raise Exception(f"global '{module}/{name}' is forbidden") - - -# Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/<number>' -allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$") -data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$") - -def check_zip_filenames(filename, names): - for name in names: - if allowed_zip_names_re.match(name): - continue - - raise Exception(f"bad file inside {filename}: {name}") - - -def check_pt(filename, extra_handler): - try: - - # new pytorch format is a zip file - with zipfile.ZipFile(filename) as z: - check_zip_filenames(filename, z.namelist()) - - # find filename of data.pkl in zip file: '<directory name>/data.pkl' - data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)] - if len(data_pkl_filenames) == 0: - raise Exception(f"data.pkl not found in {filename}") - if len(data_pkl_filenames) > 1: - raise Exception(f"Multiple data.pkl found in {filename}") - with z.open(data_pkl_filenames[0]) as file: - unpickler = RestrictedUnpickler(file) - unpickler.extra_handler = extra_handler - unpickler.load() - - except zipfile.BadZipfile: - - # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle - with open(filename, "rb") as file: - unpickler = RestrictedUnpickler(file) - unpickler.extra_handler = extra_handler - for i in range(5): - unpickler.load() - - -def load(filename, *args, **kwargs): - return load_with_extra(filename, extra_handler=global_extra_handler, *args, **kwargs) - - -def load_with_extra(filename, extra_handler=None, *args, **kwargs): - """ - this function is intended to be used by extensions that want to load models with - some extra classes in them that the usual unpickler would find suspicious. - - Use the extra_handler argument to specify a function that takes module and field name as text, - and returns that field's value: - - ```python - def extra(module, name): - if module == 'collections' and name == 'OrderedDict': - return collections.OrderedDict - - return None - - safe.load_with_extra('model.pt', extra_handler=extra) - ``` - - The alternative to this is just to use safe.unsafe_torch_load('model.pt'), which as the name implies is - definitely unsafe. - """ - - try: - check_pt(filename, extra_handler) - - except pickle.UnpicklingError: - print(f"Error verifying pickled file from {filename}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - print("The file is most likely corrupted.", file=sys.stderr) - return None - - except Exception: - print(f"Error verifying pickled file from {filename}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr) - print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr) - return None - - return unsafe_torch_load(filename, *args, **kwargs) - - -class Extra: - """ - A class for temporarily setting the global handler for when you can't explicitly call load_with_extra - (because it's not your code making the torch.load call). The intended use is like this: - -``` -import torch -from modules import safe - -def handler(module, name): - if module == 'torch' and name in ['float64', 'float16']: - return getattr(torch, name) - - return None - -with safe.Extra(handler): - x = torch.load('model.pt') -``` - """ - - def __init__(self, handler): - self.handler = handler - - def __enter__(self): - global global_extra_handler - - assert global_extra_handler is None, 'already inside an Extra() block' - global_extra_handler = self.handler - - def __exit__(self, exc_type, exc_val, exc_tb): - global global_extra_handler - - global_extra_handler = None - - -unsafe_torch_load = torch.load -torch.load = load -global_extra_handler = None diff --git a/spaces/Luelll/ChuanhuChatGPT/run_Linux.sh b/spaces/Luelll/ChuanhuChatGPT/run_Linux.sh deleted file mode 100644 index 2d26597ae47519f42336ccffc16646713a192ae1..0000000000000000000000000000000000000000 --- a/spaces/Luelll/ChuanhuChatGPT/run_Linux.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$(readlink -f "$0")") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" || exit - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi - -# 检查ChuanhuChatbot.py是否在运行 -if ! pgrep -f ChuanhuChatbot.py > /dev/null; then - # 如果没有运行,启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/MRiwu/Collection/text/cleaners.py b/spaces/MRiwu/Collection/text/cleaners.py deleted file mode 100644 index eedbeaee8ad73dd4aaf6c12e3f900fc34a1ee630..0000000000000000000000000000000000000000 --- a/spaces/MRiwu/Collection/text/cleaners.py +++ /dev/null @@ -1,150 +0,0 @@ -import re -import pyopenjtalk - -pyopenjtalk._lazy_init() - - -def japanese_cleaners(text): - from text.japanese import japanese_to_romaji_with_accent - text = japanese_to_romaji_with_accent(text) - text = re.sub(r'([A-Za-z])$', r'\1.', text) - return text - - -def japanese_cleaners2(text): - return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…') - - -def korean_cleaners(text): - '''Pipeline for Korean text''' - from text.korean import latin_to_hangul, number_to_hangul, divide_hangul - text = latin_to_hangul(text) - text = number_to_hangul(text) - text = divide_hangul(text) - text = re.sub(r'([\u3131-\u3163])$', r'\1.', text) - return text - - -def chinese_cleaners(text): - '''Pipeline for Chinese text''' - from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text) - return text - - -def zh_ja_mixture_cleaners(text): - from text.mandarin import chinese_to_romaji - from text.japanese import japanese_to_romaji_with_accent - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_romaji(x.group(1)) + ' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent( - x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…') + ' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def sanskrit_cleaners(text): - text = text.replace('॥', '।').replace('ॐ', 'ओम्') - if text[-1] != '।': - text += ' ।' - return text - - -def cjks_cleaners(text): - from text.mandarin import chinese_to_lazy_ipa - from text.japanese import japanese_to_ipa - from text.korean import korean_to_lazy_ipa - from text.sanskrit import devanagari_to_ipa - from text.english import english_to_lazy_ipa - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_lazy_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[KO\](.*?)\[KO\]', - lambda x: korean_to_lazy_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[SA\](.*?)\[SA\]', - lambda x: devanagari_to_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_lazy_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def cjke_cleaners(text): - from text.mandarin import chinese_to_lazy_ipa - from text.japanese import japanese_to_ipa - from text.korean import korean_to_ipa - from text.english import english_to_ipa2 - text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace( - 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + ' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace( - 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + ' ', text) - text = re.sub(r'\[KO\](.*?)\[KO\]', - lambda x: korean_to_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace( - 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + ' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def cjke_cleaners2(text): - from text.mandarin import chinese_to_ipa - from text.japanese import japanese_to_ipa2 - from text.korean import korean_to_ipa - from text.english import english_to_ipa2 - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa2(x.group(1)) + ' ', text) - text = re.sub(r'\[KO\](.*?)\[KO\]', - lambda x: korean_to_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_ipa2(x.group(1)) + ' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def thai_cleaners(text): - from text.thai import num_to_thai, latin_to_thai - text = num_to_thai(text) - text = latin_to_thai(text) - return text - - -def shanghainese_cleaners(text): - from text.shanghainese import shanghainese_to_ipa - text = shanghainese_to_ipa(text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def chinese_dialect_cleaners(text): - from text.mandarin import chinese_to_ipa2 - from text.japanese import japanese_to_ipa3 - from text.shanghainese import shanghainese_to_ipa - from text.cantonese import cantonese_to_ipa - from text.english import english_to_lazy_ipa2 - from text.ngu_dialect import ngu_dialect_to_ipa - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_ipa2(x.group(1)) + ' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ') + ' ', text) - text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5', - '˧˧˦').replace( - '6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e') + ' ', text) - text = re.sub(r'\[GD\](.*?)\[GD\]', - lambda x: cantonese_to_ipa(x.group(1)) + ' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_lazy_ipa2(x.group(1)) + ' ', text) - text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group( - 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ') + ' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text diff --git a/spaces/MUmairAB/BreastCancerDetector-app/app.py b/spaces/MUmairAB/BreastCancerDetector-app/app.py deleted file mode 100644 index 519ddc831693331e5dec72be53fb691c68314e16..0000000000000000000000000000000000000000 --- a/spaces/MUmairAB/BreastCancerDetector-app/app.py +++ /dev/null @@ -1,39 +0,0 @@ -#import necessary libraries -import gradio as gr -import tensorflow as tf -from tensorflow.keras.preprocessing.image import load_img, img_to_array -from huggingface_hub import from_pretrained_keras -import numpy as np - -def detect_cancer(img): - #Load the model - model = from_pretrained_keras('MUmairAB/Breast_Cancer_Detector') - #Convert the NumPy image to tensor - img = tf.convert_to_tensor(img) - #Convert the single images to batch image - img = tf.expand_dims(img, axis=0) - #Make predictions - pred = model.predict(img) - #Convert the "numpy.ndarray" object to a simple numebr - prediction = round(float(pred)) - if prediction == 0: - return("Congratulation! you don't have breast cancer") - else: - return("Unfortunately! you have breast cancer. Kindly consult a doctor!") - -#Define Gradio input components for reading image -input_img = gr.Image(shape=(50, 50)) -#Define Gradio output component -output = 'text' - -#Create a Gradio user interface -interfac = gr.Interface(title="Breast Cancer Diagnosis\n(by Umair Akram)", - description="Enter the Histopathological image of the breast to predict the diagnosis.", - fn=detect_cancer, - inputs=input_img, - outputs=output) - -#Define the main function -if __name__ == "__main__": - #Launch the Gradio interface - interfac.launch() \ No newline at end of file diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/monotonic_align/core.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/monotonic_align/core.py deleted file mode 100644 index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/monotonic_align/core.py +++ /dev/null @@ -1,35 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val=-1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y-1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y-1, x-1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - index = index - 1 diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/version.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/version.py deleted file mode 100644 index b794fd409a5e3b3b65ad76a43d6a01a318877640..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.1.0' diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/__init__.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Malifex/CPU-Anything-V3.0-WebUI/README.md b/spaces/Malifex/CPU-Anything-V3.0-WebUI/README.md deleted file mode 100644 index 97d972aa8679e4b367156dcbfff5d58ad47937e9..0000000000000000000000000000000000000000 --- a/spaces/Malifex/CPU-Anything-V3.0-WebUI/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Webui -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: zwv9/webui-cpu ---- diff --git a/spaces/Marshalls/testmtd/analysis/aistplusplus_api/sandbox.py b/spaces/Marshalls/testmtd/analysis/aistplusplus_api/sandbox.py deleted file mode 100644 index d3c8c46a7f3eebc98a465530b9e61a7ec56b973f..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/aistplusplus_api/sandbox.py +++ /dev/null @@ -1,91 +0,0 @@ -import pickle - -thing = pickle.load(open("SMPL_MALE.pkl","rb"), encoding="latin1") - -thing.keys() - -thing["kintree_table"][0].shape -thing["kintree_table"][0] -thing["J_regressor"].shape -thing["J"].shape -thing["v_template"].shape - -from aist_plusplus.loader import AISTDataset -from smplx import SMPL -import torch - -#%% - -# smpl_poses, smpl_scaling, smpl_trans = AISTDataset.load_motion( -# "../multimodal-transformer/data/motions", "gWA_sFM_cAll_d26_mWA1_ch09") -smpl_thing = pickle.load(open("last.generated.test.pkl", "rb")) -smpl_poses,smpl_scaling,smpl_trans = smpl_thing['smpl_poses'], smpl_thing['smpl_scaling'], smpl_thing['smpl_trans'] -#MY PICKLE IS PROBABLY WRONG -smpl = SMPL(model_path="./", gender='MALE', batch_size=1) -output = smpl.forward( - global_orient=torch.from_numpy(smpl_poses[:, 0:1]).float(), - body_pose=torch.from_numpy(smpl_poses[:, 1:]).float(), - transl=torch.from_numpy(smpl_trans).float(), - scaling=torch.from_numpy(smpl_scaling.reshape(1, 1)).float(), - ) -keypoints3d = output.joints.detach().numpy() - -output.vertices.shape - -smpl_poses.shape -keypoints3d.shape - -keypoints3d = keypoints3d[:,:24] # the body joints (ignoring the head, feet and hand bones added onto it here https://github.com/vchoutas/smplx/blob/7547ee6656b942a68a97604d0cf7b6b834fad9eb/smplx/vertex_joint_selector.py) -# that file takes the position of the vertices corresponding to certain joints - -#%% - - -import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D - -%matplotlib - - -import time - -from celluloid import Camera -fig = plt.figure() -ax = Axes3D(fig) - -# camera = Camera(fig) - -import numpy as np -np.max(keypoints3d) - -ax.scatter(keypoints3d[0,:,2], keypoints3d[0,:,0], keypoints3d[0,:,1]) -plt.show() -plt.xlim([-200,200]) -plt.ylim([-200,200]) -ax.set_zlim([75,475]) -ax.view_init(0, 0) -plt.draw() -#%% -# plt.zlim([-50,50]) -for i in range(len(keypoints3d)): -# for i in range(512): - ax.clear() - ax.scatter(keypoints3d[i,:,2], keypoints3d[i,:,0], keypoints3d[i,:,1]) - plt.xlim([-100,100]) - plt.ylim([-100,100]) - ax.set_zlim([75,275]) - ax.view_init(0, 0) - plt.draw() - plt.savefig("img/img_"+str(i)+".png") - # camera.snap() - -# i=300 -# plt.gca().clear() -# plt.scatter(keypoints3d[i,:,0], keypoints3d[i,:,1], keypoints3d[i,:,2]) -# plt.show() -# -# a = camera.animate() -# a.save("out.mp4") -# thing['kintree_table'][0] - -1 diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/features/beats_crf.c b/spaces/Marshalls/testmtd/feature_extraction/madmom/features/beats_crf.c deleted file mode 100644 index d5e9a0be580a84ff2c1e869ec7a62eaf0b4ee6df..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/feature_extraction/madmom/features/beats_crf.c +++ /dev/null @@ -1,25397 +0,0 @@ -/* Generated by Cython 0.29.22 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "depends": [ - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h", - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayscalars.h", - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h", - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h", - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/npy_math.h", - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h" - ], - "include_dirs": [ - "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include" - ], - "name": "madmom.features.beats_crf", - "sources": [ - "madmom/features/beats_crf.pyx" - ] - }, - "module_name": "madmom.features.beats_crf" -} -END: Cython Metadata */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_22" -#define CYTHON_HEX_VERSION 0x001D16F0 -#define CYTHON_FUTURE_DIVISION 1 -#include <stddef.h> -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include <stdint.h> -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include <math.h> -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__madmom__features__beats_crf -#define __PYX_HAVE_API__madmom__features__beats_crf -/* Early includes */ -#include <string.h> -#include <stdio.h> -#include "numpy/arrayobject.h" -#include "numpy/ndarrayobject.h" -#include "numpy/ndarraytypes.h" -#include "numpy/arrayscalars.h" -#include "numpy/ufuncobject.h" - - /* NumPy API declarations from "numpy/__init__.pxd" */ - -#include "numpy/npy_math.h" -#include "pythread.h" -#include <stdlib.h> -#include "pystate.h" -#ifdef _OPENMP -#include <omp.h> -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include <cstdlib> - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - -/* Header.proto */ -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include <complex> - #else - #include <complex.h> - #endif -#endif -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - - -static const char *__pyx_f[] = { - "madmom/features/beats_crf.pyx", - "__init__.pxd", - "stringsource", - "type.pxd", -}; -/* MemviewSliceStruct.proto */ -struct __pyx_memoryview_obj; -typedef struct { - struct __pyx_memoryview_obj *memview; - char *data; - Py_ssize_t shape[8]; - Py_ssize_t strides[8]; - Py_ssize_t suboffsets[8]; -} __Pyx_memviewslice; -#define __Pyx_MemoryView_Len(m) (m.shape[0]) - -/* Atomics.proto */ -#include <pythread.h> -#ifndef CYTHON_ATOMICS - #define CYTHON_ATOMICS 1 -#endif -#define __pyx_atomic_int_type int -#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ - (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ - !defined(__i386__) - #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) - #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using GNU atomics" - #endif -#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 - #include <Windows.h> - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type LONG - #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #pragma message ("Using MSVC atomics") - #endif -#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 - #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using Intel atomics" - #endif -#else - #undef CYTHON_ATOMICS - #define CYTHON_ATOMICS 0 - #ifdef __PYX_DEBUG_ATOMICS - #warning "Not using atomics" - #endif -#endif -typedef volatile __pyx_atomic_int_type __pyx_atomic_int; -#if CYTHON_ATOMICS - #define __pyx_add_acquisition_count(memview)\ - __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) -#else - #define __pyx_add_acquisition_count(memview)\ - __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) -#endif - -/* ForceInitThreads.proto */ -#ifndef __PYX_FORCE_INIT_THREADS - #define __PYX_FORCE_INIT_THREADS 0 -#endif - -/* NoFastGil.proto */ -#define __Pyx_PyGILState_Ensure PyGILState_Ensure -#define __Pyx_PyGILState_Release PyGILState_Release -#define __Pyx_FastGIL_Remember() -#define __Pyx_FastGIL_Forget() -#define __Pyx_FastGilFuncInit() - -/* BufferFormatStructs.proto */ -#define IS_UNSIGNED(type) (((type) -1) > 0) -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":690 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":691 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":692 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":693 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":697 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":698 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":699 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":700 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":704 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":705 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":714 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":715 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":716 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":718 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":719 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":720 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":722 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":723 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":725 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":726 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":727 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; -/* Declarations.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -/* Declarations.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - - -/*--- Type declarations ---*/ -struct __pyx_array_obj; -struct __pyx_MemviewEnum_obj; -struct __pyx_memoryview_obj; -struct __pyx_memoryviewslice_obj; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":729 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":730 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":731 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":733 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ -struct __pyx_array_obj { - PyObject_HEAD - struct __pyx_vtabstruct_array *__pyx_vtab; - char *data; - Py_ssize_t len; - char *format; - int ndim; - Py_ssize_t *_shape; - Py_ssize_t *_strides; - Py_ssize_t itemsize; - PyObject *mode; - PyObject *_format; - void (*callback_free_data)(void *); - int free_data; - int dtype_is_object; -}; - - -/* "View.MemoryView":279 - * - * @cname('__pyx_MemviewEnum') - * cdef class Enum(object): # <<<<<<<<<<<<<< - * cdef object name - * def __init__(self, name): - */ -struct __pyx_MemviewEnum_obj { - PyObject_HEAD - PyObject *name; -}; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ -struct __pyx_memoryview_obj { - PyObject_HEAD - struct __pyx_vtabstruct_memoryview *__pyx_vtab; - PyObject *obj; - PyObject *_size; - PyObject *_array_interface; - PyThread_type_lock lock; - __pyx_atomic_int acquisition_count[2]; - __pyx_atomic_int *acquisition_count_aligned_p; - Py_buffer view; - int flags; - int dtype_is_object; - __Pyx_TypeInfo *typeinfo; -}; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ -struct __pyx_memoryviewslice_obj { - struct __pyx_memoryview_obj __pyx_base; - __Pyx_memviewslice from_slice; - PyObject *from_object; - PyObject *(*to_object_func)(char *); - int (*to_dtype_func)(char *, PyObject *); -}; - - - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ - -struct __pyx_vtabstruct_array { - PyObject *(*get_memview)(struct __pyx_array_obj *); -}; -static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ - -struct __pyx_vtabstruct_memoryview { - char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); - PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); -}; -static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ - -struct __pyx_vtabstruct__memoryviewslice { - struct __pyx_vtabstruct_memoryview __pyx_base; -}; -static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* SliceObject.proto */ -#define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)\ - __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) -static CYTHON_INLINE int __Pyx_PyObject_SetSlice( - PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** py_start, PyObject** py_stop, PyObject** py_slice, - int has_cstart, int has_cstop, int wraparound); - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* SetItemInt.proto */ -#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ - __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) -static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, - int is_list, int wraparound, int boundscheck); - -/* PyCFunctionFastCall.proto */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif - -/* PyObjectCall2Args.proto */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* PyObjectCallNoArg.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); -#else -#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_TrueDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_TrueDivideObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceTrueDivide(op1, op2) : PyNumber_TrueDivide(op1, op2)) -#endif - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* PyObjectLookupSpecial.proto */ -#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) { - PyObject *res; - PyTypeObject *tp = Py_TYPE(obj); -#if PY_MAJOR_VERSION < 3 - if (unlikely(PyInstance_Check(obj))) - return __Pyx_PyObject_GetAttrStr(obj, attr_name); -#endif - res = _PyType_Lookup(tp, attr_name); - if (likely(res)) { - descrgetfunc f = Py_TYPE(res)->tp_descr_get; - if (!f) { - Py_INCREF(res); - } else { - res = f(res, obj, (PyObject *)tp); - } - } else { - PyErr_SetObject(PyExc_AttributeError, attr_name); - } - return res; -} -#else -#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) -#endif - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* None.proto */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); - -/* MemviewSliceInit.proto */ -#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d -#define __Pyx_MEMVIEW_DIRECT 1 -#define __Pyx_MEMVIEW_PTR 2 -#define __Pyx_MEMVIEW_FULL 4 -#define __Pyx_MEMVIEW_CONTIG 8 -#define __Pyx_MEMVIEW_STRIDED 16 -#define __Pyx_MEMVIEW_FOLLOW 32 -#define __Pyx_IS_C_CONTIG 1 -#define __Pyx_IS_F_CONTIG 2 -static int __Pyx_init_memviewslice( - struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference); -static CYTHON_INLINE int __pyx_add_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) -#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) -#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) -#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) -static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* IncludeStringH.proto */ -#include <string.h> - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* StrEquals.proto */ -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - -/* None.proto */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); - -/* UnaryNegOverflows.proto */ -#define UNARY_NEG_WOULD_OVERFLOW(x)\ - (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) - -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* decode_c_string_utf16.proto */ -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 0; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = -1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} - -/* decode_c_string.proto */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); - -/* GetAttr3.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* RaiseNoneIterError.proto */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* ListExtend.proto */ -static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { -#if CYTHON_COMPILING_IN_CPYTHON - PyObject* none = _PyList_Extend((PyListObject*)L, v); - if (unlikely(!none)) - return -1; - Py_DECREF(none); - return 0; -#else - return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); -#endif -} - -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* None.proto */ -static CYTHON_INLINE long __Pyx_div_long(long, long); - -/* HasAttr.proto */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); - -/* PyObject_GenericGetAttrNoDict.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/* PyObject_GenericGetAttr.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr -#endif - -/* SetVTable.proto */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable); - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* SetupReduce.proto */ -static int __Pyx_setup_reduce(PyObject* type_obj); - -/* TypeImport.proto */ -#ifndef __PYX_HAVE_RT_ImportType_proto -#define __PYX_HAVE_RT_ImportType_proto -enum __Pyx_ImportType_CheckSize { - __Pyx_ImportType_CheckSize_Error = 0, - __Pyx_ImportType_CheckSize_Warn = 1, - __Pyx_ImportType_CheckSize_Ignore = 2 -}; -static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); -#endif - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -/* MemviewSliceIsContig.proto */ -static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); - -/* OverlappingSlices.proto */ -static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize); - -/* Capsule.proto */ -static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* TypeInfoCompare.proto */ -static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); - -/* MemviewSliceValidateAndInit.proto */ -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_float(PyObject *, int writable_flag); - -/* GCCDiagnostics.proto */ -#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* MemviewDtypeToObject.proto */ -static CYTHON_INLINE PyObject *__pyx_memview_get_long(const char *itemp); -static CYTHON_INLINE int __pyx_memview_set_long(const char *itemp, PyObject *obj); - -/* RealImag.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif -#if defined(__cplusplus) && CYTHON_CCOMPLEX\ - && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -/* Arithmetic.proto */ -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq_float(a, b) ((a)==(b)) - #define __Pyx_c_sum_float(a, b) ((a)+(b)) - #define __Pyx_c_diff_float(a, b) ((a)-(b)) - #define __Pyx_c_prod_float(a, b) ((a)*(b)) - #define __Pyx_c_quot_float(a, b) ((a)/(b)) - #define __Pyx_c_neg_float(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero_float(z) ((z)==(float)0) - #define __Pyx_c_conj_float(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs_float(z) (::std::abs(z)) - #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero_float(z) ((z)==0) - #define __Pyx_c_conj_float(z) (conjf(z)) - #if 1 - #define __Pyx_c_abs_float(z) (cabsf(z)) - #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -/* Arithmetic.proto */ -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq_double(a, b) ((a)==(b)) - #define __Pyx_c_sum_double(a, b) ((a)+(b)) - #define __Pyx_c_diff_double(a, b) ((a)-(b)) - #define __Pyx_c_prod_double(a, b) ((a)*(b)) - #define __Pyx_c_quot_double(a, b) ((a)/(b)) - #define __Pyx_c_neg_double(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero_double(z) ((z)==(double)0) - #define __Pyx_c_conj_double(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs_double(z) (::std::abs(z)) - #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero_double(z) ((z)==0) - #define __Pyx_c_conj_double(z) (conj(z)) - #if 1 - #define __Pyx_c_abs_double(z) (cabs(z)) - #define __Pyx_c_pow_double(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -/* MemviewSliceCopyTemplate.proto */ -static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_long(PyObject *, int writable_flag); - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'libc.string' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.type' */ -static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; - -/* Module declarations from 'cpython' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'cpython.mem' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_generic = 0; -static PyTypeObject *__pyx_ptype_5numpy_number = 0; -static PyTypeObject *__pyx_ptype_5numpy_integer = 0; -static PyTypeObject *__pyx_ptype_5numpy_signedinteger = 0; -static PyTypeObject *__pyx_ptype_5numpy_unsignedinteger = 0; -static PyTypeObject *__pyx_ptype_5numpy_inexact = 0; -static PyTypeObject *__pyx_ptype_5numpy_floating = 0; -static PyTypeObject *__pyx_ptype_5numpy_complexfloating = 0; -static PyTypeObject *__pyx_ptype_5numpy_flexible = 0; -static PyTypeObject *__pyx_ptype_5numpy_character = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; - -/* Module declarations from 'cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'numpy.math' */ - -/* Module declarations from 'madmom.features.beats_crf' */ -static PyTypeObject *__pyx_array_type = 0; -static PyTypeObject *__pyx_MemviewEnum_type = 0; -static PyTypeObject *__pyx_memoryview_type = 0; -static PyTypeObject *__pyx_memoryviewslice_type = 0; -static PyObject *generic = 0; -static PyObject *strided = 0; -static PyObject *indirect = 0; -static PyObject *contiguous = 0; -static PyObject *indirect_contiguous = 0; -static int __pyx_memoryview_thread_locks_used; -static PyThread_type_lock __pyx_memoryview_thread_locks[8]; -static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ -static void *__pyx_align_pointer(void *, size_t); /*proto*/ -static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ -static PyObject *_unellipsify(PyObject *, int); /*proto*/ -static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ -static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ -static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ -static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ -static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ -static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ -static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ -static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_long = { "long", NULL, sizeof(long), { 0 }, 0, IS_UNSIGNED(long) ? 'U' : 'I', IS_UNSIGNED(long), 0 }; -#define __Pyx_MODULE_NAME "madmom.features.beats_crf" -extern int __pyx_module_is_main_madmom__features__beats_crf; -int __pyx_module_is_main_madmom__features__beats_crf = 0; - -/* Implementation of 'madmom.features.beats_crf' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ImportError; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_MemoryError; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_TypeError; -static PyObject *__pyx_builtin_Ellipsis; -static PyObject *__pyx_builtin_id; -static PyObject *__pyx_builtin_IndexError; -static const char __pyx_k_O[] = "O"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k_i[] = "i"; -static const char __pyx_k_j[] = "j"; -static const char __pyx_k_k[] = "k"; -static const char __pyx_k_id[] = "id"; -static const char __pyx_k_np[] = "np"; -static const char __pyx_k_pi[] = "pi"; -static const char __pyx_k_bps[] = "bps"; -static const char __pyx_k_int[] = "int"; -static const char __pyx_k_loc[] = "loc"; -static const char __pyx_k_log[] = "log"; -static const char __pyx_k_new[] = "__new__"; -static const char __pyx_k_obj[] = "obj"; -static const char __pyx_k_pdf[] = "pdf"; -static const char __pyx_k_sum[] = "sum"; -static const char __pyx_k_tau[] = "tau"; -static const char __pyx_k_v_c[] = "v_c"; -static const char __pyx_k_v_p[] = "v_p"; -static const char __pyx_k_base[] = "base"; -static const char __pyx_k_cval[] = "cval"; -static const char __pyx_k_dict[] = "__dict__"; -static const char __pyx_k_exit[] = "__exit__"; -static const char __pyx_k_init[] = "init"; -static const char __pyx_k_log2[] = "log2"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_mode[] = "mode"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_ndim[] = "ndim"; -static const char __pyx_k_norm[] = "norm"; -static const char __pyx_k_ones[] = "ones"; -static const char __pyx_k_pack[] = "pack"; -static const char __pyx_k_path[] = "path"; -static const char __pyx_k_size[] = "size"; -static const char __pyx_k_step[] = "step"; -static const char __pyx_k_stop[] = "stop"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_ASCII[] = "ASCII"; -static const char __pyx_k_class[] = "__class__"; -static const char __pyx_k_dtype[] = "dtype"; -static const char __pyx_k_empty[] = "empty"; -static const char __pyx_k_enter[] = "__enter__"; -static const char __pyx_k_error[] = "error"; -static const char __pyx_k_flags[] = "flags"; -static const char __pyx_k_float[] = "float"; -static const char __pyx_k_num_x[] = "num_x"; -static const char __pyx_k_numpy[] = "numpy"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_scale[] = "scale"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_start[] = "start"; -static const char __pyx_k_trans[] = "trans"; -static const char __pyx_k_arange[] = "arange"; -static const char __pyx_k_astype[] = "astype"; -static const char __pyx_k_divide[] = "divide"; -static const char __pyx_k_encode[] = "encode"; -static const char __pyx_k_format[] = "format"; -static const char __pyx_k_ignore[] = "ignore"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_num_st[] = "num_st"; -static const char __pyx_k_num_tr[] = "num_tr"; -static const char __pyx_k_origin[] = "origin"; -static const char __pyx_k_pickle[] = "pickle"; -static const char __pyx_k_reduce[] = "__reduce__"; -static const char __pyx_k_struct[] = "struct"; -static const char __pyx_k_unpack[] = "unpack"; -static const char __pyx_k_update[] = "update"; -static const char __pyx_k_asarray[] = "asarray"; -static const char __pyx_k_float32[] = "float32"; -static const char __pyx_k_fortran[] = "fortran"; -static const char __pyx_k_log_act[] = "log_act"; -static const char __pyx_k_memview[] = "memview"; -static const char __pyx_k_viterbi[] = "viterbi"; -static const char __pyx_k_Ellipsis[] = "Ellipsis"; -static const char __pyx_k_constant[] = "constant"; -static const char __pyx_k_errstate[] = "errstate"; -static const char __pyx_k_getstate[] = "__getstate__"; -static const char __pyx_k_interval[] = "interval"; -static const char __pyx_k_itemsize[] = "itemsize"; -static const char __pyx_k_new_prob[] = "new_prob"; -static const char __pyx_k_pyx_type[] = "__pyx_type"; -static const char __pyx_k_setstate[] = "__setstate__"; -static const char __pyx_k_TypeError[] = "TypeError"; -static const char __pyx_k_enumerate[] = "enumerate"; -static const char __pyx_k_init_dist[] = "init_dist"; -static const char __pyx_k_norm_fact[] = "norm_fact"; -static const char __pyx_k_path_prob[] = "path_prob"; -static const char __pyx_k_pyx_state[] = "__pyx_state"; -static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; -static const char __pyx_k_IndexError[] = "IndexError"; -static const char __pyx_k_ValueError[] = "ValueError"; -static const char __pyx_k_move_range[] = "move_range"; -static const char __pyx_k_next_state[] = "next_state"; -static const char __pyx_k_num_states[] = "num_states"; -static const char __pyx_k_pyx_result[] = "__pyx_result"; -static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; -static const char __pyx_k_trans_dist[] = "trans_dist"; -static const char __pyx_k_transition[] = "transition"; -static const char __pyx_k_ImportError[] = "ImportError"; -static const char __pyx_k_MemoryError[] = "MemoryError"; -static const char __pyx_k_PickleError[] = "PickleError"; -static const char __pyx_k_activations[] = "activations"; -static const char __pyx_k_correlate1d[] = "correlate1d"; -static const char __pyx_k_norm_factor[] = "norm_factor"; -static const char __pyx_k_scipy_stats[] = "scipy.stats"; -static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; -static const char __pyx_k_stringsource[] = "stringsource"; -static const char __pyx_k_best_sequence[] = "best_sequence"; -static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; -static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; -static const char __pyx_k_interval_sigma[] = "interval_sigma"; -static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; -static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; -static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; -static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; -static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; -static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; -static const char __pyx_k_initial_distribution[] = "initial_distribution"; -static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; -static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; -static const char __pyx_k_normalisation_factors[] = "normalisation_factors"; -static const char __pyx_k_scipy_ndimage_filters[] = "scipy.ndimage.filters"; -static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; -static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; -static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; -static const char __pyx_k_transition_distribution[] = "transition_distribution"; -static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; -static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; -static const char __pyx_k_madmom_features_beats_crf[] = "madmom.features.beats_crf"; -static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; -static const char __pyx_k_madmom_features_beats_crf_pyx[] = "madmom/features/beats_crf.pyx"; -static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; -static const char __pyx_k_This_module_contains_the_speed[] = "\nThis module contains the speed crucial Viterbi functionality for the\nCRFBeatDetector plus some functions computing the distributions and\nnormalisation factors.\n\nReferences\n----------\n.. [1] Filip Korzeniowski, Sebastian B\303\266ck and Gerhard Widmer,\n \"Probabilistic Extraction of Beat Positions from a Beat Activation\n Function\",\n Proceedings of the 15th International Society for Music Information\n Retrieval Conference (ISMIR), 2014.\n\n"; -static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; -static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; -static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; -static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; -static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; -static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; -static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; -static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; -static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; -static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; -static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; -static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; -static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; -static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; -static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; -static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; -static PyObject *__pyx_n_s_ASCII; -static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; -static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; -static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; -static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; -static PyObject *__pyx_kp_s_Cannot_index_with_type_s; -static PyObject *__pyx_n_s_Ellipsis; -static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; -static PyObject *__pyx_n_s_ImportError; -static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; -static PyObject *__pyx_n_s_IndexError; -static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; -static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; -static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; -static PyObject *__pyx_n_s_MemoryError; -static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; -static PyObject *__pyx_kp_s_MemoryView_of_r_object; -static PyObject *__pyx_n_b_O; -static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; -static PyObject *__pyx_n_s_PickleError; -static PyObject *__pyx_n_s_TypeError; -static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; -static PyObject *__pyx_n_s_ValueError; -static PyObject *__pyx_n_s_View_MemoryView; -static PyObject *__pyx_n_s_activations; -static PyObject *__pyx_n_s_allocate_buffer; -static PyObject *__pyx_n_s_arange; -static PyObject *__pyx_n_s_asarray; -static PyObject *__pyx_n_s_astype; -static PyObject *__pyx_n_s_base; -static PyObject *__pyx_n_s_best_sequence; -static PyObject *__pyx_n_s_bps; -static PyObject *__pyx_n_s_c; -static PyObject *__pyx_n_u_c; -static PyObject *__pyx_n_s_class; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_n_s_constant; -static PyObject *__pyx_kp_s_contiguous_and_direct; -static PyObject *__pyx_kp_s_contiguous_and_indirect; -static PyObject *__pyx_n_s_correlate1d; -static PyObject *__pyx_n_s_cval; -static PyObject *__pyx_n_s_dict; -static PyObject *__pyx_n_s_divide; -static PyObject *__pyx_n_s_dtype; -static PyObject *__pyx_n_s_dtype_is_object; -static PyObject *__pyx_n_s_empty; -static PyObject *__pyx_n_s_encode; -static PyObject *__pyx_n_s_enter; -static PyObject *__pyx_n_s_enumerate; -static PyObject *__pyx_n_s_error; -static PyObject *__pyx_n_s_errstate; -static PyObject *__pyx_n_s_exit; -static PyObject *__pyx_n_s_flags; -static PyObject *__pyx_n_s_float; -static PyObject *__pyx_n_s_float32; -static PyObject *__pyx_n_s_format; -static PyObject *__pyx_n_s_fortran; -static PyObject *__pyx_n_u_fortran; -static PyObject *__pyx_n_s_getstate; -static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; -static PyObject *__pyx_n_s_i; -static PyObject *__pyx_n_s_id; -static PyObject *__pyx_n_s_ignore; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_init; -static PyObject *__pyx_n_s_init_dist; -static PyObject *__pyx_n_s_initial_distribution; -static PyObject *__pyx_n_s_int; -static PyObject *__pyx_n_s_interval; -static PyObject *__pyx_n_s_interval_sigma; -static PyObject *__pyx_n_s_itemsize; -static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; -static PyObject *__pyx_n_s_j; -static PyObject *__pyx_n_s_k; -static PyObject *__pyx_n_s_loc; -static PyObject *__pyx_n_s_log; -static PyObject *__pyx_n_s_log2; -static PyObject *__pyx_n_s_log_act; -static PyObject *__pyx_n_s_madmom_features_beats_crf; -static PyObject *__pyx_kp_s_madmom_features_beats_crf_pyx; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_memview; -static PyObject *__pyx_n_s_mode; -static PyObject *__pyx_n_s_move_range; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_name_2; -static PyObject *__pyx_n_s_ndim; -static PyObject *__pyx_n_s_new; -static PyObject *__pyx_n_s_new_prob; -static PyObject *__pyx_n_s_next_state; -static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; -static PyObject *__pyx_n_s_norm; -static PyObject *__pyx_n_s_norm_fact; -static PyObject *__pyx_n_s_norm_factor; -static PyObject *__pyx_n_s_normalisation_factors; -static PyObject *__pyx_n_s_np; -static PyObject *__pyx_n_s_num_st; -static PyObject *__pyx_n_s_num_states; -static PyObject *__pyx_n_s_num_tr; -static PyObject *__pyx_n_s_num_x; -static PyObject *__pyx_n_s_numpy; -static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; -static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; -static PyObject *__pyx_n_s_obj; -static PyObject *__pyx_n_s_ones; -static PyObject *__pyx_n_s_origin; -static PyObject *__pyx_n_s_pack; -static PyObject *__pyx_n_s_path; -static PyObject *__pyx_n_s_path_prob; -static PyObject *__pyx_n_s_pdf; -static PyObject *__pyx_n_s_pi; -static PyObject *__pyx_n_s_pickle; -static PyObject *__pyx_n_s_pyx_PickleError; -static PyObject *__pyx_n_s_pyx_checksum; -static PyObject *__pyx_n_s_pyx_getbuffer; -static PyObject *__pyx_n_s_pyx_result; -static PyObject *__pyx_n_s_pyx_state; -static PyObject *__pyx_n_s_pyx_type; -static PyObject *__pyx_n_s_pyx_unpickle_Enum; -static PyObject *__pyx_n_s_pyx_vtable; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_reduce; -static PyObject *__pyx_n_s_reduce_cython; -static PyObject *__pyx_n_s_reduce_ex; -static PyObject *__pyx_n_s_scale; -static PyObject *__pyx_n_s_scipy_ndimage_filters; -static PyObject *__pyx_n_s_scipy_stats; -static PyObject *__pyx_n_s_setstate; -static PyObject *__pyx_n_s_setstate_cython; -static PyObject *__pyx_n_s_shape; -static PyObject *__pyx_n_s_size; -static PyObject *__pyx_n_s_start; -static PyObject *__pyx_n_s_step; -static PyObject *__pyx_n_s_stop; -static PyObject *__pyx_kp_s_strided_and_direct; -static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; -static PyObject *__pyx_kp_s_strided_and_indirect; -static PyObject *__pyx_kp_s_stringsource; -static PyObject *__pyx_n_s_struct; -static PyObject *__pyx_n_s_sum; -static PyObject *__pyx_n_s_tau; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_n_s_trans; -static PyObject *__pyx_n_s_trans_dist; -static PyObject *__pyx_n_s_transition; -static PyObject *__pyx_n_s_transition_distribution; -static PyObject *__pyx_kp_s_unable_to_allocate_array_data; -static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; -static PyObject *__pyx_n_s_unpack; -static PyObject *__pyx_n_s_update; -static PyObject *__pyx_n_s_v_c; -static PyObject *__pyx_n_s_v_p; -static PyObject *__pyx_n_s_viterbi; -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_initial_distribution(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_num_states, PyObject *__pyx_v_interval); /* proto */ -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_2transition_distribution(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_interval, PyObject *__pyx_v_interval_sigma); /* proto */ -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_4normalisation_factors(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_activations, PyObject *__pyx_v_transition_distribution); /* proto */ -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_6best_sequence(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_activations, PyObject *__pyx_v_interval, PyObject *__pyx_v_interval_sigma); /* proto */ -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_8viterbi(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_pi, __Pyx_memviewslice __pyx_v_transition, __Pyx_memviewslice __pyx_v_norm_factor, __Pyx_memviewslice __pyx_v_activations, int __pyx_v_tau); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_float_0_000001; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_184977713; -static PyObject *__pyx_int_neg_1; -static PyObject *__pyx_tuple_; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__3; -static PyObject *__pyx_tuple__4; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__6; -static PyObject *__pyx_tuple__7; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_tuple__9; -static PyObject *__pyx_slice__18; -static PyObject *__pyx_tuple__10; -static PyObject *__pyx_tuple__11; -static PyObject *__pyx_tuple__12; -static PyObject *__pyx_tuple__13; -static PyObject *__pyx_tuple__14; -static PyObject *__pyx_tuple__15; -static PyObject *__pyx_tuple__16; -static PyObject *__pyx_tuple__17; -static PyObject *__pyx_tuple__19; -static PyObject *__pyx_tuple__20; -static PyObject *__pyx_tuple__21; -static PyObject *__pyx_tuple__22; -static PyObject *__pyx_tuple__24; -static PyObject *__pyx_tuple__26; -static PyObject *__pyx_tuple__28; -static PyObject *__pyx_tuple__30; -static PyObject *__pyx_tuple__32; -static PyObject *__pyx_tuple__33; -static PyObject *__pyx_tuple__34; -static PyObject *__pyx_tuple__35; -static PyObject *__pyx_tuple__36; -static PyObject *__pyx_tuple__37; -static PyObject *__pyx_codeobj__23; -static PyObject *__pyx_codeobj__25; -static PyObject *__pyx_codeobj__27; -static PyObject *__pyx_codeobj__29; -static PyObject *__pyx_codeobj__31; -static PyObject *__pyx_codeobj__38; -/* Late includes */ - -/* "madmom/features/beats_crf.pyx":28 - * - * - * def initial_distribution(num_states, interval): # <<<<<<<<<<<<<< - * """ - * Compute the initial distribution. - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_1initial_distribution(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6madmom_8features_9beats_crf_initial_distribution[] = "initial_distribution(num_states, interval)\n\n Compute the initial distribution.\n\n Parameters\n ----------\n num_states : int\n Number of states in the model.\n interval : int\n Beat interval of the piece [frames].\n\n Returns\n -------\n numpy array\n Initial distribution of the model.\n\n "; -static PyMethodDef __pyx_mdef_6madmom_8features_9beats_crf_1initial_distribution = {"initial_distribution", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_8features_9beats_crf_1initial_distribution, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_8features_9beats_crf_initial_distribution}; -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_1initial_distribution(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_num_states = 0; - PyObject *__pyx_v_interval = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("initial_distribution (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_num_states,&__pyx_n_s_interval,0}; - PyObject* values[2] = {0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_states)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_interval)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("initial_distribution", 1, 2, 2, 1); __PYX_ERR(0, 28, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "initial_distribution") < 0)) __PYX_ERR(0, 28, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - } - __pyx_v_num_states = values[0]; - __pyx_v_interval = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("initial_distribution", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 28, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("madmom.features.beats_crf.initial_distribution", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_6madmom_8features_9beats_crf_initial_distribution(__pyx_self, __pyx_v_num_states, __pyx_v_interval); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_initial_distribution(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_num_states, PyObject *__pyx_v_interval) { - PyObject *__pyx_v_init_dist = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("initial_distribution", 0); - - /* "madmom/features/beats_crf.pyx":48 - * # position of the first beat not to influence the probability of the - * # beat sequence. Normalising would favour shorter intervals. - * init_dist = np.ones(num_states, dtype=np.float32) # <<<<<<<<<<<<<< - * init_dist[interval:] = 0 - * return init_dist - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ones); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_num_states); - __Pyx_GIVEREF(__pyx_v_num_states); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_num_states); - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_init_dist = __pyx_t_5; - __pyx_t_5 = 0; - - /* "madmom/features/beats_crf.pyx":49 - * # beat sequence. Normalising would favour shorter intervals. - * init_dist = np.ones(num_states, dtype=np.float32) - * init_dist[interval:] = 0 # <<<<<<<<<<<<<< - * return init_dist - * - */ - if (__Pyx_PyObject_SetSlice(__pyx_v_init_dist, __pyx_int_0, 0, 0, &__pyx_v_interval, NULL, NULL, 0, 0, 1) < 0) __PYX_ERR(0, 49, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":50 - * init_dist = np.ones(num_states, dtype=np.float32) - * init_dist[interval:] = 0 - * return init_dist # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_init_dist); - __pyx_r = __pyx_v_init_dist; - goto __pyx_L0; - - /* "madmom/features/beats_crf.pyx":28 - * - * - * def initial_distribution(num_states, interval): # <<<<<<<<<<<<<< - * """ - * Compute the initial distribution. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("madmom.features.beats_crf.initial_distribution", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_init_dist); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "madmom/features/beats_crf.pyx":53 - * - * - * def transition_distribution(interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Compute the transition distribution between beats. - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_3transition_distribution(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6madmom_8features_9beats_crf_2transition_distribution[] = "transition_distribution(interval, interval_sigma)\n\n Compute the transition distribution between beats.\n\n Parameters\n ----------\n interval : int\n Interval of the piece [frames].\n interval_sigma : float\n Allowed deviation from the interval per beat.\n\n Returns\n -------\n numpy array\n Transition distribution between beats.\n\n "; -static PyMethodDef __pyx_mdef_6madmom_8features_9beats_crf_3transition_distribution = {"transition_distribution", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_8features_9beats_crf_3transition_distribution, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_8features_9beats_crf_2transition_distribution}; -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_3transition_distribution(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_interval = 0; - PyObject *__pyx_v_interval_sigma = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("transition_distribution (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_interval,&__pyx_n_s_interval_sigma,0}; - PyObject* values[2] = {0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_interval)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_interval_sigma)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("transition_distribution", 1, 2, 2, 1); __PYX_ERR(0, 53, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "transition_distribution") < 0)) __PYX_ERR(0, 53, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - } - __pyx_v_interval = values[0]; - __pyx_v_interval_sigma = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("transition_distribution", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 53, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("madmom.features.beats_crf.transition_distribution", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_6madmom_8features_9beats_crf_2transition_distribution(__pyx_self, __pyx_v_interval, __pyx_v_interval_sigma); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_2transition_distribution(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_interval, PyObject *__pyx_v_interval_sigma) { - PyObject *__pyx_v_norm = NULL; - PyObject *__pyx_v_move_range = NULL; - PyObject *__pyx_v_trans_dist = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("transition_distribution", 0); - - /* "madmom/features/beats_crf.pyx":70 - * - * """ - * from scipy.stats import norm # <<<<<<<<<<<<<< - * - * move_range = np.arange(interval * 2, dtype=np.float) - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_n_s_norm); - __Pyx_GIVEREF(__pyx_n_s_norm); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_norm); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_stats, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_norm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_1); - __pyx_v_norm = __pyx_t_1; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "madmom/features/beats_crf.pyx":72 - * from scipy.stats import norm - * - * move_range = np.arange(interval * 2, dtype=np.float) # <<<<<<<<<<<<<< - * # to avoid floating point hell due to np.log2(0) - * move_range[0] = 0.000001 - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_arange); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Multiply(__pyx_v_interval, __pyx_int_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_move_range = __pyx_t_5; - __pyx_t_5 = 0; - - /* "madmom/features/beats_crf.pyx":74 - * move_range = np.arange(interval * 2, dtype=np.float) - * # to avoid floating point hell due to np.log2(0) - * move_range[0] = 0.000001 # <<<<<<<<<<<<<< - * - * trans_dist = norm.pdf(np.log2(move_range), - */ - if (unlikely(__Pyx_SetItemInt(__pyx_v_move_range, 0, __pyx_float_0_000001, long, 1, __Pyx_PyInt_From_long, 0, 0, 1) < 0)) __PYX_ERR(0, 74, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":76 - * move_range[0] = 0.000001 - * - * trans_dist = norm.pdf(np.log2(move_range), # <<<<<<<<<<<<<< - * loc=np.log2(interval), - * scale=interval_sigma) - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_norm, __pyx_n_s_pdf); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_log2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - } - } - __pyx_t_2 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_3, __pyx_v_move_range) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_move_range); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __pyx_t_2 = 0; - - /* "madmom/features/beats_crf.pyx":77 - * - * trans_dist = norm.pdf(np.log2(move_range), - * loc=np.log2(interval), # <<<<<<<<<<<<<< - * scale=interval_sigma) - * trans_dist /= trans_dist.sum() - */ - __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_log2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - } - } - __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_4, __pyx_v_interval) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_interval); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_loc, __pyx_t_3) < 0) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "madmom/features/beats_crf.pyx":78 - * trans_dist = norm.pdf(np.log2(move_range), - * loc=np.log2(interval), - * scale=interval_sigma) # <<<<<<<<<<<<<< - * trans_dist /= trans_dist.sum() - * return trans_dist.astype(np.float32) - */ - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_scale, __pyx_v_interval_sigma) < 0) __PYX_ERR(0, 77, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":76 - * move_range[0] = 0.000001 - * - * trans_dist = norm.pdf(np.log2(move_range), # <<<<<<<<<<<<<< - * loc=np.log2(interval), - * scale=interval_sigma) - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_trans_dist = __pyx_t_3; - __pyx_t_3 = 0; - - /* "madmom/features/beats_crf.pyx":79 - * loc=np.log2(interval), - * scale=interval_sigma) - * trans_dist /= trans_dist.sum() # <<<<<<<<<<<<<< - * return trans_dist.astype(np.float32) - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_trans_dist, __pyx_n_s_sum); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_1) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1) : __Pyx_PyObject_CallNoArg(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_trans_dist, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF_SET(__pyx_v_trans_dist, __pyx_t_2); - __pyx_t_2 = 0; - - /* "madmom/features/beats_crf.pyx":80 - * scale=interval_sigma) - * trans_dist /= trans_dist.sum() - * return trans_dist.astype(np.float32) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_trans_dist, __pyx_n_s_astype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } - } - __pyx_t_2 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_1, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "madmom/features/beats_crf.pyx":53 - * - * - * def transition_distribution(interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Compute the transition distribution between beats. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("madmom.features.beats_crf.transition_distribution", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_norm); - __Pyx_XDECREF(__pyx_v_move_range); - __Pyx_XDECREF(__pyx_v_trans_dist); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "madmom/features/beats_crf.pyx":83 - * - * - * def normalisation_factors(activations, transition_distribution): # <<<<<<<<<<<<<< - * """ - * Compute normalisation factors for model. - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_5normalisation_factors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6madmom_8features_9beats_crf_4normalisation_factors[] = "normalisation_factors(activations, transition_distribution)\n\n Compute normalisation factors for model.\n\n Parameters\n ----------\n activations : numpy array\n Beat activation function of the piece.\n transition_distribution : numpy array\n Transition distribution of the model.\n\n Returns\n -------\n numpy array\n Normalisation factors for model.\n\n "; -static PyMethodDef __pyx_mdef_6madmom_8features_9beats_crf_5normalisation_factors = {"normalisation_factors", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_8features_9beats_crf_5normalisation_factors, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_8features_9beats_crf_4normalisation_factors}; -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_5normalisation_factors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_activations = 0; - PyObject *__pyx_v_transition_distribution = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("normalisation_factors (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_activations,&__pyx_n_s_transition_distribution,0}; - PyObject* values[2] = {0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_activations)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_transition_distribution)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("normalisation_factors", 1, 2, 2, 1); __PYX_ERR(0, 83, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normalisation_factors") < 0)) __PYX_ERR(0, 83, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - } - __pyx_v_activations = values[0]; - __pyx_v_transition_distribution = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("normalisation_factors", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 83, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("madmom.features.beats_crf.normalisation_factors", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_6madmom_8features_9beats_crf_4normalisation_factors(__pyx_self, __pyx_v_activations, __pyx_v_transition_distribution); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_4normalisation_factors(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_activations, PyObject *__pyx_v_transition_distribution) { - PyObject *__pyx_v_correlate1d = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("normalisation_factors", 0); - - /* "madmom/features/beats_crf.pyx":100 - * - * """ - * from scipy.ndimage.filters import correlate1d # <<<<<<<<<<<<<< - * return correlate1d(activations, transition_distribution, - * mode='constant', cval=0, - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_n_s_correlate1d); - __Pyx_GIVEREF(__pyx_n_s_correlate1d); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_correlate1d); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_ndimage_filters, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_correlate1d); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_1); - __pyx_v_correlate1d = __pyx_t_1; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "madmom/features/beats_crf.pyx":101 - * """ - * from scipy.ndimage.filters import correlate1d - * return correlate1d(activations, transition_distribution, # <<<<<<<<<<<<<< - * mode='constant', cval=0, - * origin=-int(transition_distribution.shape[0] / 2)) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_activations); - __Pyx_GIVEREF(__pyx_v_activations); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_activations); - __Pyx_INCREF(__pyx_v_transition_distribution); - __Pyx_GIVEREF(__pyx_v_transition_distribution); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_transition_distribution); - - /* "madmom/features/beats_crf.pyx":102 - * from scipy.ndimage.filters import correlate1d - * return correlate1d(activations, transition_distribution, - * mode='constant', cval=0, # <<<<<<<<<<<<<< - * origin=-int(transition_distribution.shape[0] / 2)) - * - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_mode, __pyx_n_s_constant) < 0) __PYX_ERR(0, 102, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_cval, __pyx_int_0) < 0) __PYX_ERR(0, 102, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":103 - * return correlate1d(activations, transition_distribution, - * mode='constant', cval=0, - * origin=-int(transition_distribution.shape[0] / 2)) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_transition_distribution, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_4, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Negative(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_origin, __pyx_t_3) < 0) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "madmom/features/beats_crf.pyx":101 - * """ - * from scipy.ndimage.filters import correlate1d - * return correlate1d(activations, transition_distribution, # <<<<<<<<<<<<<< - * mode='constant', cval=0, - * origin=-int(transition_distribution.shape[0] / 2)) - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_correlate1d, __pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "madmom/features/beats_crf.pyx":83 - * - * - * def normalisation_factors(activations, transition_distribution): # <<<<<<<<<<<<<< - * """ - * Compute normalisation factors for model. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("madmom.features.beats_crf.normalisation_factors", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_correlate1d); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "madmom/features/beats_crf.pyx":106 - * - * - * def best_sequence(activations, interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Extract the best beat sequence for a piece with the Viterbi algorithm. - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_7best_sequence(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6madmom_8features_9beats_crf_6best_sequence[] = "best_sequence(activations, interval, interval_sigma)\n\n Extract the best beat sequence for a piece with the Viterbi algorithm.\n\n Parameters\n ----------\n activations : numpy array\n Beat activation function of the piece.\n interval : int\n Beat interval of the piece.\n interval_sigma : float\n Allowed deviation from the interval per beat.\n\n Returns\n -------\n beat_pos : numpy array\n Extracted beat positions [frame indices].\n log_prob : float\n Log probability of the beat sequence.\n\n "; -static PyMethodDef __pyx_mdef_6madmom_8features_9beats_crf_7best_sequence = {"best_sequence", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_8features_9beats_crf_7best_sequence, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_8features_9beats_crf_6best_sequence}; -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_7best_sequence(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_activations = 0; - PyObject *__pyx_v_interval = 0; - PyObject *__pyx_v_interval_sigma = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("best_sequence (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_activations,&__pyx_n_s_interval,&__pyx_n_s_interval_sigma,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_activations)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_interval)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("best_sequence", 1, 3, 3, 1); __PYX_ERR(0, 106, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_interval_sigma)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("best_sequence", 1, 3, 3, 2); __PYX_ERR(0, 106, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "best_sequence") < 0)) __PYX_ERR(0, 106, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - } - __pyx_v_activations = values[0]; - __pyx_v_interval = values[1]; - __pyx_v_interval_sigma = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("best_sequence", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 106, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("madmom.features.beats_crf.best_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_6madmom_8features_9beats_crf_6best_sequence(__pyx_self, __pyx_v_activations, __pyx_v_interval, __pyx_v_interval_sigma); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_6best_sequence(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_activations, PyObject *__pyx_v_interval, PyObject *__pyx_v_interval_sigma) { - PyObject *__pyx_v_init = NULL; - PyObject *__pyx_v_trans = NULL; - PyObject *__pyx_v_norm_fact = NULL; - PyObject *__pyx_v_log_act = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - int __pyx_t_12; - int __pyx_t_13; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("best_sequence", 0); - - /* "madmom/features/beats_crf.pyx":127 - * - * """ - * init = initial_distribution(activations.shape[0], # <<<<<<<<<<<<<< - * interval) - * trans = transition_distribution(interval, interval_sigma) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_activations, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "madmom/features/beats_crf.pyx":128 - * """ - * init = initial_distribution(activations.shape[0], - * interval) # <<<<<<<<<<<<<< - * trans = transition_distribution(interval, interval_sigma) - * norm_fact = normalisation_factors(activations, trans) - */ - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_t_4, __pyx_v_interval}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_t_4, __pyx_v_interval}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } else - #endif - { - __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (__pyx_t_3) { - __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; - } - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_t_4); - __Pyx_INCREF(__pyx_v_interval); - __Pyx_GIVEREF(__pyx_v_interval); - PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_v_interval); - __pyx_t_4 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_init = __pyx_t_1; - __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":129 - * init = initial_distribution(activations.shape[0], - * interval) - * trans = transition_distribution(interval, interval_sigma) # <<<<<<<<<<<<<< - * norm_fact = normalisation_factors(activations, trans) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_transition_distribution); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_interval, __pyx_v_interval_sigma}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_GOTREF(__pyx_t_1); - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_interval, __pyx_v_interval_sigma}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_GOTREF(__pyx_t_1); - } else - #endif - { - __pyx_t_4 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__pyx_t_6) { - __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; - } - __Pyx_INCREF(__pyx_v_interval); - __Pyx_GIVEREF(__pyx_v_interval); - PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_5, __pyx_v_interval); - __Pyx_INCREF(__pyx_v_interval_sigma); - __Pyx_GIVEREF(__pyx_v_interval_sigma); - PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_5, __pyx_v_interval_sigma); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_trans = __pyx_t_1; - __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":130 - * interval) - * trans = transition_distribution(interval, interval_sigma) - * norm_fact = normalisation_factors(activations, trans) # <<<<<<<<<<<<<< - * - * # ignore division by zero warnings when taking the logarithm of 0.0, - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_normalisation_factors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_activations, __pyx_v_trans}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GOTREF(__pyx_t_1); - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_activations, __pyx_v_trans}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GOTREF(__pyx_t_1); - } else - #endif - { - __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (__pyx_t_4) { - __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; - } - __Pyx_INCREF(__pyx_v_activations); - __Pyx_GIVEREF(__pyx_v_activations); - PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_activations); - __Pyx_INCREF(__pyx_v_trans); - __Pyx_GIVEREF(__pyx_v_trans); - PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_v_trans); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_norm_fact = __pyx_t_1; - __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":134 - * # ignore division by zero warnings when taking the logarithm of 0.0, - * # the result -inf is fine anyways! - * with np.errstate(divide='ignore'): # <<<<<<<<<<<<<< - * init = np.log(init) - * trans = np.log(trans) - */ - /*with:*/ { - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_errstate); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_divide, __pyx_n_s_ignore) < 0) __PYX_ERR(0, 134, __pyx_L1_error) - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_t_6, __pyx_n_s_exit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_2 = __Pyx_PyObject_LookupSpecial(__pyx_t_6, __pyx_n_s_enter); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - /*try:*/ { - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); - /*try:*/ { - - /* "madmom/features/beats_crf.pyx":135 - * # the result -inf is fine anyways! - * with np.errstate(divide='ignore'): - * init = np.log(init) # <<<<<<<<<<<<<< - * trans = np.log(trans) - * norm_fact = np.log(norm_fact) - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 135, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_log); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 135, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_6 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_v_init) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_init); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 135, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_init, __pyx_t_6); - __pyx_t_6 = 0; - - /* "madmom/features/beats_crf.pyx":136 - * with np.errstate(divide='ignore'): - * init = np.log(init) - * trans = np.log(trans) # <<<<<<<<<<<<<< - * norm_fact = np.log(norm_fact) - * log_act = np.log(activations) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 136, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_log); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - } - } - __pyx_t_6 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_2, __pyx_v_trans) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_trans); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 136, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_trans, __pyx_t_6); - __pyx_t_6 = 0; - - /* "madmom/features/beats_crf.pyx":137 - * init = np.log(init) - * trans = np.log(trans) - * norm_fact = np.log(norm_fact) # <<<<<<<<<<<<<< - * log_act = np.log(activations) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_log); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_6 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_v_norm_fact) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_norm_fact); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 137, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_norm_fact, __pyx_t_6); - __pyx_t_6 = 0; - - /* "madmom/features/beats_crf.pyx":138 - * trans = np.log(trans) - * norm_fact = np.log(norm_fact) - * log_act = np.log(activations) # <<<<<<<<<<<<<< - * - * return viterbi(init, trans, norm_fact, log_act, interval) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_log); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - } - } - __pyx_t_6 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_2, __pyx_v_activations) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_activations); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 138, __pyx_L7_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_log_act = __pyx_t_6; - __pyx_t_6 = 0; - - /* "madmom/features/beats_crf.pyx":134 - * # ignore division by zero warnings when taking the logarithm of 0.0, - * # the result -inf is fine anyways! - * with np.errstate(divide='ignore'): # <<<<<<<<<<<<<< - * init = np.log(init) - * trans = np.log(trans) - */ - } - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; - goto __pyx_L12_try_end; - __pyx_L7_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - /*except:*/ { - __Pyx_AddTraceback("madmom.features.beats_crf.best_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_1, &__pyx_t_2) < 0) __PYX_ERR(0, 134, __pyx_L9_except_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyTuple_Pack(3, __pyx_t_6, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L9_except_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 134, __pyx_L9_except_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_11); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (__pyx_t_12 < 0) __PYX_ERR(0, 134, __pyx_L9_except_error) - __pyx_t_13 = ((!(__pyx_t_12 != 0)) != 0); - if (__pyx_t_13) { - __Pyx_GIVEREF(__pyx_t_6); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_ErrRestoreWithState(__pyx_t_6, __pyx_t_1, __pyx_t_2); - __pyx_t_6 = 0; __pyx_t_1 = 0; __pyx_t_2 = 0; - __PYX_ERR(0, 134, __pyx_L9_except_error) - } - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - goto __pyx_L8_exception_handled; - } - __pyx_L9_except_error:; - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_ExceptionReset(__pyx_t_8, __pyx_t_9, __pyx_t_10); - goto __pyx_L1_error; - __pyx_L8_exception_handled:; - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_ExceptionReset(__pyx_t_8, __pyx_t_9, __pyx_t_10); - __pyx_L12_try_end:; - } - } - /*finally:*/ { - /*normal exit:*/{ - if (__pyx_t_7) { - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_tuple_, NULL); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - } - goto __pyx_L6; - } - __pyx_L6:; - } - goto __pyx_L16; - __pyx_L3_error:; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - goto __pyx_L1_error; - __pyx_L16:; - } - - /* "madmom/features/beats_crf.pyx":140 - * log_act = np.log(activations) - * - * return viterbi(init, trans, norm_fact, log_act, interval) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_viterbi); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (unlikely(!__pyx_v_log_act)) { __Pyx_RaiseUnboundLocalError("log_act"); __PYX_ERR(0, 140, __pyx_L1_error) } - __pyx_t_6 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_5 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_1)) { - PyObject *__pyx_temp[6] = {__pyx_t_6, __pyx_v_init, __pyx_v_trans, __pyx_v_norm_fact, __pyx_v_log_act, __pyx_v_interval}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_5, 5+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_GOTREF(__pyx_t_2); - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { - PyObject *__pyx_temp[6] = {__pyx_t_6, __pyx_v_init, __pyx_v_trans, __pyx_v_norm_fact, __pyx_v_log_act, __pyx_v_interval}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_5, 5+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_GOTREF(__pyx_t_2); - } else - #endif - { - __pyx_t_4 = PyTuple_New(5+__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__pyx_t_6) { - __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; - } - __Pyx_INCREF(__pyx_v_init); - __Pyx_GIVEREF(__pyx_v_init); - PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_5, __pyx_v_init); - __Pyx_INCREF(__pyx_v_trans); - __Pyx_GIVEREF(__pyx_v_trans); - PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_5, __pyx_v_trans); - __Pyx_INCREF(__pyx_v_norm_fact); - __Pyx_GIVEREF(__pyx_v_norm_fact); - PyTuple_SET_ITEM(__pyx_t_4, 2+__pyx_t_5, __pyx_v_norm_fact); - __Pyx_INCREF(__pyx_v_log_act); - __Pyx_GIVEREF(__pyx_v_log_act); - PyTuple_SET_ITEM(__pyx_t_4, 3+__pyx_t_5, __pyx_v_log_act); - __Pyx_INCREF(__pyx_v_interval); - __Pyx_GIVEREF(__pyx_v_interval); - PyTuple_SET_ITEM(__pyx_t_4, 4+__pyx_t_5, __pyx_v_interval); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "madmom/features/beats_crf.pyx":106 - * - * - * def best_sequence(activations, interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Extract the best beat sequence for a piece with the Viterbi algorithm. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("madmom.features.beats_crf.best_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_init); - __Pyx_XDECREF(__pyx_v_trans); - __Pyx_XDECREF(__pyx_v_norm_fact); - __Pyx_XDECREF(__pyx_v_log_act); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "madmom/features/beats_crf.pyx":146 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * def viterbi(float [::1] pi, float[::1] transition, float[::1] norm_factor, # <<<<<<<<<<<<<< - * float [::1] activations, int tau): - * """ - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_9viterbi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6madmom_8features_9beats_crf_8viterbi[] = "viterbi(float[::1] pi, float[::1] transition, float[::1] norm_factor, float[::1] activations, int tau)\n\n Viterbi algorithm to compute the most likely beat sequence from the\n given activations and the dominant interval.\n\n Parameters\n ----------\n pi : numpy array\n Initial distribution.\n transition : numpy array\n Transition distribution.\n norm_factor : numpy array\n Normalisation factors.\n activations : numpy array\n Beat activations.\n tau : int\n Dominant interval [frames].\n\n Returns\n -------\n beat_pos : numpy array\n Extracted beat positions [frame indices].\n log_prob : float\n Log probability of the beat sequence.\n\n "; -static PyMethodDef __pyx_mdef_6madmom_8features_9beats_crf_9viterbi = {"viterbi", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_8features_9beats_crf_9viterbi, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_8features_9beats_crf_8viterbi}; -static PyObject *__pyx_pw_6madmom_8features_9beats_crf_9viterbi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - __Pyx_memviewslice __pyx_v_pi = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_transition = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_norm_factor = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_activations = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_v_tau; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("viterbi (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pi,&__pyx_n_s_transition,&__pyx_n_s_norm_factor,&__pyx_n_s_activations,&__pyx_n_s_tau,0}; - PyObject* values[5] = {0,0,0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pi)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_transition)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("viterbi", 1, 5, 5, 1); __PYX_ERR(0, 146, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_norm_factor)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("viterbi", 1, 5, 5, 2); __PYX_ERR(0, 146, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_activations)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("viterbi", 1, 5, 5, 3); __PYX_ERR(0, 146, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tau)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("viterbi", 1, 5, 5, 4); __PYX_ERR(0, 146, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "viterbi") < 0)) __PYX_ERR(0, 146, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - } - __pyx_v_pi = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_pi.memview)) __PYX_ERR(0, 146, __pyx_L3_error) - __pyx_v_transition = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_transition.memview)) __PYX_ERR(0, 146, __pyx_L3_error) - __pyx_v_norm_factor = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_norm_factor.memview)) __PYX_ERR(0, 146, __pyx_L3_error) - __pyx_v_activations = __Pyx_PyObject_to_MemoryviewSlice_dc_float(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_activations.memview)) __PYX_ERR(0, 147, __pyx_L3_error) - __pyx_v_tau = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_tau == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 147, __pyx_L3_error) - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("viterbi", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 146, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("madmom.features.beats_crf.viterbi", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_6madmom_8features_9beats_crf_8viterbi(__pyx_self, __pyx_v_pi, __pyx_v_transition, __pyx_v_norm_factor, __pyx_v_activations, __pyx_v_tau); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_6madmom_8features_9beats_crf_8viterbi(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_pi, __Pyx_memviewslice __pyx_v_transition, __Pyx_memviewslice __pyx_v_norm_factor, __Pyx_memviewslice __pyx_v_activations, int __pyx_v_tau) { - int __pyx_v_num_st; - int __pyx_v_num_tr; - int __pyx_v_num_x; - __Pyx_memviewslice __pyx_v_v_c = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_v_p = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_bps = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_path = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_v_k; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_v_next_state; - double __pyx_v_new_prob; - double __pyx_v_path_prob; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_t_9; - int __pyx_t_10; - int __pyx_t_11; - Py_ssize_t __pyx_t_12; - Py_ssize_t __pyx_t_13; - Py_ssize_t __pyx_t_14; - Py_ssize_t __pyx_t_15; - long __pyx_t_16; - long __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - __Pyx_memviewslice __pyx_t_23 = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("viterbi", 0); - - /* "madmom/features/beats_crf.pyx":174 - * """ - * # number of states - * cdef int num_st = activations.shape[0] # <<<<<<<<<<<<<< - * # number of transitions - * cdef int num_tr = transition.shape[0] - */ - __pyx_v_num_st = (__pyx_v_activations.shape[0]); - - /* "madmom/features/beats_crf.pyx":176 - * cdef int num_st = activations.shape[0] - * # number of transitions - * cdef int num_tr = transition.shape[0] # <<<<<<<<<<<<<< - * # number of beat variables - * cdef int num_x = num_st / tau - */ - __pyx_v_num_tr = (__pyx_v_transition.shape[0]); - - /* "madmom/features/beats_crf.pyx":178 - * cdef int num_tr = transition.shape[0] - * # number of beat variables - * cdef int num_x = num_st / tau # <<<<<<<<<<<<<< - * - * # current viterbi variables - */ - __pyx_v_num_x = (__pyx_v_num_st / __pyx_v_tau); - - /* "madmom/features/beats_crf.pyx":181 - * - * # current viterbi variables - * cdef float [::1] v_c = np.empty(num_st, dtype=np.float32) # <<<<<<<<<<<<<< - * # previous viterbi variables. will be initialized with prior (first beat) - * cdef float [::1] v_p = np.empty(num_st, dtype=np.float32) - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_num_st); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_v_c = __pyx_t_6; - __pyx_t_6.memview = NULL; - __pyx_t_6.data = NULL; - - /* "madmom/features/beats_crf.pyx":183 - * cdef float [::1] v_c = np.empty(num_st, dtype=np.float32) - * # previous viterbi variables. will be initialized with prior (first beat) - * cdef float [::1] v_p = np.empty(num_st, dtype=np.float32) # <<<<<<<<<<<<<< - * # back-tracking pointers; - * cdef long [:, ::1] bps = np.empty((num_x - 1, num_st), dtype=np.int) - */ - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_empty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_num_st); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float32); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dc_float(__pyx_t_4, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_v_p = __pyx_t_6; - __pyx_t_6.memview = NULL; - __pyx_t_6.data = NULL; - - /* "madmom/features/beats_crf.pyx":185 - * cdef float [::1] v_p = np.empty(num_st, dtype=np.float32) - * # back-tracking pointers; - * cdef long [:, ::1] bps = np.empty((num_x - 1, num_st), dtype=np.int) # <<<<<<<<<<<<<< - * # back tracked path, a.k.a. path sequence - * cdef long [::1] path = np.empty(num_x, dtype=np.int) - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyInt_From_long((__pyx_v_num_x - 1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_num_st); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); - __pyx_t_4 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 185, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_bps = __pyx_t_7; - __pyx_t_7.memview = NULL; - __pyx_t_7.data = NULL; - - /* "madmom/features/beats_crf.pyx":187 - * cdef long [:, ::1] bps = np.empty((num_x - 1, num_st), dtype=np.int) - * # back tracked path, a.k.a. path sequence - * cdef long [::1] path = np.empty(num_x, dtype=np.int) # <<<<<<<<<<<<<< - * - * # counters etc. - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_num_x); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_long(__pyx_t_4, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 187, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_path = __pyx_t_8; - __pyx_t_8.memview = NULL; - __pyx_t_8.data = NULL; - - /* "madmom/features/beats_crf.pyx":194 - * - * # init first beat - * for i in range(num_st): # <<<<<<<<<<<<<< - * v_p[i] = pi[i] + activations[i] + norm_factor[i] - * - */ - __pyx_t_9 = __pyx_v_num_st; - __pyx_t_10 = __pyx_t_9; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "madmom/features/beats_crf.pyx":195 - * # init first beat - * for i in range(num_st): - * v_p[i] = pi[i] + activations[i] + norm_factor[i] # <<<<<<<<<<<<<< - * - * # iterate over all beats; the 1st beat is given by prior - */ - __pyx_t_12 = __pyx_v_i; - __pyx_t_13 = __pyx_v_i; - __pyx_t_14 = __pyx_v_i; - __pyx_t_15 = __pyx_v_i; - *((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_p.data) + __pyx_t_15)) )) = (((*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_pi.data) + __pyx_t_12)) ))) + (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_activations.data) + __pyx_t_13)) )))) + (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_norm_factor.data) + __pyx_t_14)) )))); - } - - /* "madmom/features/beats_crf.pyx":198 - * - * # iterate over all beats; the 1st beat is given by prior - * for k in range(num_x - 1): # <<<<<<<<<<<<<< - * # reset all current viterbi variables - * v_c[:] = -INFINITY - */ - __pyx_t_16 = (__pyx_v_num_x - 1); - __pyx_t_17 = __pyx_t_16; - for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_17; __pyx_t_9+=1) { - __pyx_v_k = __pyx_t_9; - - /* "madmom/features/beats_crf.pyx":200 - * for k in range(num_x - 1): - * # reset all current viterbi variables - * v_c[:] = -INFINITY # <<<<<<<<<<<<<< - * - * # find the best transition for each state i - */ - { - float __pyx_temp_scalar = (-NPY_INFINITY); - { - Py_ssize_t __pyx_temp_extent = __pyx_v_v_c.shape[0]; - Py_ssize_t __pyx_temp_idx; - float *__pyx_temp_pointer = (float *) __pyx_v_v_c.data; - for (__pyx_temp_idx = 0; __pyx_temp_idx < __pyx_temp_extent; __pyx_temp_idx++) { - *((float *) __pyx_temp_pointer) = __pyx_temp_scalar; - __pyx_temp_pointer += 1; - } - } - } - - /* "madmom/features/beats_crf.pyx":203 - * - * # find the best transition for each state i - * for i in range(num_st): # <<<<<<<<<<<<<< - * # j is the number of frames we look back - * for j in range(min(i, num_tr)): - */ - __pyx_t_10 = __pyx_v_num_st; - __pyx_t_11 = __pyx_t_10; - for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_11; __pyx_t_18+=1) { - __pyx_v_i = __pyx_t_18; - - /* "madmom/features/beats_crf.pyx":205 - * for i in range(num_st): - * # j is the number of frames we look back - * for j in range(min(i, num_tr)): # <<<<<<<<<<<<<< - * # Important remark: the actual computation we'd have to do here - * # is v_p[i - j] + norm_factor[i - j] + transition[j] + - */ - __pyx_t_19 = __pyx_v_num_tr; - __pyx_t_20 = __pyx_v_i; - if (((__pyx_t_19 < __pyx_t_20) != 0)) { - __pyx_t_21 = __pyx_t_19; - } else { - __pyx_t_21 = __pyx_t_20; - } - __pyx_t_19 = __pyx_t_21; - __pyx_t_21 = __pyx_t_19; - for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_21; __pyx_t_20+=1) { - __pyx_v_j = __pyx_t_20; - - /* "madmom/features/beats_crf.pyx":214 - * # if we immediately add the normalisation factor to v_c[i], - * # we can skip adding norm_factor[i - j] for each v_p[i - j]. - * new_prob = v_p[i - j] + transition[j] # <<<<<<<<<<<<<< - * if new_prob > v_c[i]: - * v_c[i] = new_prob - */ - __pyx_t_14 = (__pyx_v_i - __pyx_v_j); - __pyx_t_13 = __pyx_v_j; - __pyx_v_new_prob = ((*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_p.data) + __pyx_t_14)) ))) + (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_transition.data) + __pyx_t_13)) )))); - - /* "madmom/features/beats_crf.pyx":215 - * # we can skip adding norm_factor[i - j] for each v_p[i - j]. - * new_prob = v_p[i - j] + transition[j] - * if new_prob > v_c[i]: # <<<<<<<<<<<<<< - * v_c[i] = new_prob - * bps[k, i] = i - j - */ - __pyx_t_13 = __pyx_v_i; - __pyx_t_22 = ((__pyx_v_new_prob > (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_c.data) + __pyx_t_13)) )))) != 0); - if (__pyx_t_22) { - - /* "madmom/features/beats_crf.pyx":216 - * new_prob = v_p[i - j] + transition[j] - * if new_prob > v_c[i]: - * v_c[i] = new_prob # <<<<<<<<<<<<<< - * bps[k, i] = i - j - * - */ - __pyx_t_13 = __pyx_v_i; - *((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_c.data) + __pyx_t_13)) )) = __pyx_v_new_prob; - - /* "madmom/features/beats_crf.pyx":217 - * if new_prob > v_c[i]: - * v_c[i] = new_prob - * bps[k, i] = i - j # <<<<<<<<<<<<<< - * - * # Add activation and norm_factor. For the last random variable, - */ - __pyx_t_13 = __pyx_v_k; - __pyx_t_14 = __pyx_v_i; - *((long *) ( /* dim=1 */ ((char *) (((long *) ( /* dim=0 */ (__pyx_v_bps.data + __pyx_t_13 * __pyx_v_bps.strides[0]) )) + __pyx_t_14)) )) = (__pyx_v_i - __pyx_v_j); - - /* "madmom/features/beats_crf.pyx":215 - * # we can skip adding norm_factor[i - j] for each v_p[i - j]. - * new_prob = v_p[i - j] + transition[j] - * if new_prob > v_c[i]: # <<<<<<<<<<<<<< - * v_c[i] = new_prob - * bps[k, i] = i - j - */ - } - } - - /* "madmom/features/beats_crf.pyx":221 - * # Add activation and norm_factor. For the last random variable, - * # we'll subtract norm_factor later when searching the maximum - * v_c[i] += activations[i] + norm_factor[i] # <<<<<<<<<<<<<< - * - * v_p, v_c = v_c, v_p - */ - __pyx_t_14 = __pyx_v_i; - __pyx_t_13 = __pyx_v_i; - __pyx_t_12 = __pyx_v_i; - *((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_c.data) + __pyx_t_12)) )) += ((*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_activations.data) + __pyx_t_14)) ))) + (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_norm_factor.data) + __pyx_t_13)) )))); - } - - /* "madmom/features/beats_crf.pyx":223 - * v_c[i] += activations[i] + norm_factor[i] - * - * v_p, v_c = v_c, v_p # <<<<<<<<<<<<<< - * - * # add the final best state to the path - */ - __pyx_t_6 = __pyx_v_v_c; - __PYX_INC_MEMVIEW(&__pyx_t_6, 1); - __pyx_t_23 = __pyx_v_v_p; - __PYX_INC_MEMVIEW(&__pyx_t_23, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_v_p, 1); - __pyx_v_v_p = __pyx_t_6; - __pyx_t_6.memview = NULL; - __pyx_t_6.data = NULL; - __PYX_XDEC_MEMVIEW(&__pyx_v_v_c, 1); - __pyx_v_v_c = __pyx_t_23; - __pyx_t_23.memview = NULL; - __pyx_t_23.data = NULL; - } - - /* "madmom/features/beats_crf.pyx":226 - * - * # add the final best state to the path - * path_prob = -INFINITY # <<<<<<<<<<<<<< - * for i in range(num_st): - * # subtract the norm factor because they shouldn't have been added - */ - __pyx_v_path_prob = (-NPY_INFINITY); - - /* "madmom/features/beats_crf.pyx":227 - * # add the final best state to the path - * path_prob = -INFINITY - * for i in range(num_st): # <<<<<<<<<<<<<< - * # subtract the norm factor because they shouldn't have been added - * # for the last random variable - */ - __pyx_t_9 = __pyx_v_num_st; - __pyx_t_10 = __pyx_t_9; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "madmom/features/beats_crf.pyx":230 - * # subtract the norm factor because they shouldn't have been added - * # for the last random variable - * v_p[i] -= norm_factor[i] # <<<<<<<<<<<<<< - * if v_p[i] > path_prob: - * next_state = i - */ - __pyx_t_13 = __pyx_v_i; - __pyx_t_14 = __pyx_v_i; - *((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_p.data) + __pyx_t_14)) )) -= (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_norm_factor.data) + __pyx_t_13)) ))); - - /* "madmom/features/beats_crf.pyx":231 - * # for the last random variable - * v_p[i] -= norm_factor[i] - * if v_p[i] > path_prob: # <<<<<<<<<<<<<< - * next_state = i - * path_prob = v_p[i] - */ - __pyx_t_13 = __pyx_v_i; - __pyx_t_22 = (((*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_p.data) + __pyx_t_13)) ))) > __pyx_v_path_prob) != 0); - if (__pyx_t_22) { - - /* "madmom/features/beats_crf.pyx":232 - * v_p[i] -= norm_factor[i] - * if v_p[i] > path_prob: - * next_state = i # <<<<<<<<<<<<<< - * path_prob = v_p[i] - * path[num_x - 1] = next_state - */ - __pyx_v_next_state = __pyx_v_i; - - /* "madmom/features/beats_crf.pyx":233 - * if v_p[i] > path_prob: - * next_state = i - * path_prob = v_p[i] # <<<<<<<<<<<<<< - * path[num_x - 1] = next_state - * - */ - __pyx_t_13 = __pyx_v_i; - __pyx_v_path_prob = (*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_v_p.data) + __pyx_t_13)) ))); - - /* "madmom/features/beats_crf.pyx":231 - * # for the last random variable - * v_p[i] -= norm_factor[i] - * if v_p[i] > path_prob: # <<<<<<<<<<<<<< - * next_state = i - * path_prob = v_p[i] - */ - } - } - - /* "madmom/features/beats_crf.pyx":234 - * next_state = i - * path_prob = v_p[i] - * path[num_x - 1] = next_state # <<<<<<<<<<<<<< - * - * # track the path backwards - */ - __pyx_t_13 = (__pyx_v_num_x - 1); - *((long *) ( /* dim=0 */ ((char *) (((long *) __pyx_v_path.data) + __pyx_t_13)) )) = __pyx_v_next_state; - - /* "madmom/features/beats_crf.pyx":237 - * - * # track the path backwards - * for i in range(num_x - 2, -1, -1): # <<<<<<<<<<<<<< - * next_state = bps[i, next_state] - * path[i] = next_state - */ - for (__pyx_t_9 = (__pyx_v_num_x - 2); __pyx_t_9 > -1; __pyx_t_9-=1) { - __pyx_v_i = __pyx_t_9; - - /* "madmom/features/beats_crf.pyx":238 - * # track the path backwards - * for i in range(num_x - 2, -1, -1): - * next_state = bps[i, next_state] # <<<<<<<<<<<<<< - * path[i] = next_state - * - */ - __pyx_t_13 = __pyx_v_i; - __pyx_t_14 = __pyx_v_next_state; - __pyx_v_next_state = (*((long *) ( /* dim=1 */ ((char *) (((long *) ( /* dim=0 */ (__pyx_v_bps.data + __pyx_t_13 * __pyx_v_bps.strides[0]) )) + __pyx_t_14)) ))); - - /* "madmom/features/beats_crf.pyx":239 - * for i in range(num_x - 2, -1, -1): - * next_state = bps[i, next_state] - * path[i] = next_state # <<<<<<<<<<<<<< - * - * # return the best sequence and its log probability - */ - __pyx_t_14 = __pyx_v_i; - *((long *) ( /* dim=0 */ ((char *) (((long *) __pyx_v_path.data) + __pyx_t_14)) )) = __pyx_v_next_state; - } - - /* "madmom/features/beats_crf.pyx":242 - * - * # return the best sequence and its log probability - * return np.asarray(path), path_prob # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_path, 1, (PyObject *(*)(char *)) __pyx_memview_get_long, (int (*)(char *, PyObject *)) __pyx_memview_set_long, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } - } - __pyx_t_4 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_1, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyFloat_FromDouble(__pyx_v_path_prob); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 242, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); - __pyx_t_4 = 0; - __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "madmom/features/beats_crf.pyx":146 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * def viterbi(float [::1] pi, float[::1] transition, float[::1] norm_factor, # <<<<<<<<<<<<<< - * float [::1] activations, int tau): - * """ - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); - __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); - __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); - __PYX_XDEC_MEMVIEW(&__pyx_t_23, 1); - __Pyx_AddTraceback("madmom.features.beats_crf.viterbi", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __PYX_XDEC_MEMVIEW(&__pyx_v_v_c, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_v_p, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_bps, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_path, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_pi, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_transition, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_norm_factor, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_activations, 1); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":735 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, <void*>a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":736 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 736, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":735 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, <void*>a) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":738 - * return PyArray_MultiIterNew(1, <void*>a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, <void*>a, <void*>b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":739 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 739, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":738 - * return PyArray_MultiIterNew(1, <void*>a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, <void*>a, <void*>b) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":741 - * return PyArray_MultiIterNew(2, <void*>a, <void*>b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":742 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 742, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":741 - * return PyArray_MultiIterNew(2, <void*>a, <void*>b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":744 - * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":745 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 745, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":744 - * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":747 - * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":748 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 748, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":747 - * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":750 - * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) - * - * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< - * if PyDataType_HASSUBARRAY(d): - * return <tuple>d.subarray.shape - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":751 - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< - * return <tuple>d.subarray.shape - * else: - */ - __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); - if (__pyx_t_1) { - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":752 - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): - * return <tuple>d.subarray.shape # <<<<<<<<<<<<<< - * else: - * return () - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); - __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":751 - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< - * return <tuple>d.subarray.shape - * else: - */ - } - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":754 - * return <tuple>d.subarray.shape - * else: - * return () # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_empty_tuple); - __pyx_r = __pyx_empty_tuple; - goto __pyx_L0; - } - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":750 - * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) - * - * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< - * if PyDataType_HASSUBARRAY(d): - * return <tuple>d.subarray.shape - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":931 - * int _import_umath() except -1 - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("set_array_base", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":932 - * - * cdef inline void set_array_base(ndarray arr, object base): - * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< - * PyArray_SetBaseObject(arr, base) - * - */ - Py_INCREF(__pyx_v_base); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":933 - * cdef inline void set_array_base(ndarray arr, object base): - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":931 - * int _import_umath() except -1 - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":935 - * PyArray_SetBaseObject(arr, base) - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * base = PyArray_BASE(arr) - * if base is NULL: - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_v_base; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":936 - * - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< - * if base is NULL: - * return None - */ - __pyx_v_base = PyArray_BASE(__pyx_v_arr); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":937 - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) - * if base is NULL: # <<<<<<<<<<<<<< - * return None - * return <object>base - */ - __pyx_t_1 = ((__pyx_v_base == NULL) != 0); - if (__pyx_t_1) { - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":938 - * base = PyArray_BASE(arr) - * if base is NULL: - * return None # <<<<<<<<<<<<<< - * return <object>base - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":937 - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) - * if base is NULL: # <<<<<<<<<<<<<< - * return None - * return <object>base - */ - } - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":939 - * if base is NULL: - * return None - * return <object>base # <<<<<<<<<<<<<< - * - * # Versions of the import_* functions which are more suitable for - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_base)); - __pyx_r = ((PyObject *)__pyx_v_base); - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":935 - * PyArray_SetBaseObject(arr, base) - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * base = PyArray_BASE(arr) - * if base is NULL: - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":943 - * # Versions of the import_* functions which are more suitable for - * # Cython code. - * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< - * try: - * __pyx_import_array() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_array", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":945 - * cdef inline int import_array() except -1: - * try: - * __pyx_import_array() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") - */ - __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 945, __pyx_L3_error) - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":946 - * try: - * __pyx_import_array() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.multiarray failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 946, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":947 - * __pyx_import_array() - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_umath() except -1: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 947, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 947, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":943 - * # Versions of the import_* functions which are more suitable for - * # Cython code. - * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< - * try: - * __pyx_import_array() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":949 - * raise ImportError("numpy.core.multiarray failed to import") - * - * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_umath", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":951 - * cdef inline int import_umath() except -1: - * try: - * _import_umath() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.umath failed to import") - */ - __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 951, __pyx_L3_error) - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":952 - * try: - * _import_umath() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.umath failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 952, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":953 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_ufunc() except -1: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 953, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 953, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":949 - * raise ImportError("numpy.core.multiarray failed to import") - * - * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":955 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_ufunc", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":957 - * cdef inline int import_ufunc() except -1: - * try: - * _import_umath() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.umath failed to import") - */ - __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L3_error) - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":958 - * try: - * _import_umath() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.umath failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 958, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":959 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef extern from *: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 959, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 959, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":955 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":969 - * - * - * cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<< - * """ - * Cython equivalent of `isinstance(obj, np.timedelta64)` - */ - -static CYTHON_INLINE int __pyx_f_5numpy_is_timedelta64_object(PyObject *__pyx_v_obj) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_timedelta64_object", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":981 - * bool - * """ - * return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyTimedeltaArrType_Type)); - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":969 - * - * - * cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<< - * """ - * Cython equivalent of `isinstance(obj, np.timedelta64)` - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":984 - * - * - * cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<< - * """ - * Cython equivalent of `isinstance(obj, np.datetime64)` - */ - -static CYTHON_INLINE int __pyx_f_5numpy_is_datetime64_object(PyObject *__pyx_v_obj) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_datetime64_object", 0); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":996 - * bool - * """ - * return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyDatetimeArrType_Type)); - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":984 - * - * - * cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<< - * """ - * Cython equivalent of `isinstance(obj, np.datetime64)` - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":999 - * - * - * cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<< - * """ - * returns the int64 value underlying scalar numpy datetime64 object - */ - -static CYTHON_INLINE npy_datetime __pyx_f_5numpy_get_datetime64_value(PyObject *__pyx_v_obj) { - npy_datetime __pyx_r; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1006 - * also needed. That can be found using `get_datetime64_unit`. - * """ - * return (<PyDatetimeScalarObject*>obj).obval # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((PyDatetimeScalarObject *)__pyx_v_obj)->obval; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":999 - * - * - * cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<< - * """ - * returns the int64 value underlying scalar numpy datetime64 object - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1009 - * - * - * cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<< - * """ - * returns the int64 value underlying scalar numpy timedelta64 object - */ - -static CYTHON_INLINE npy_timedelta __pyx_f_5numpy_get_timedelta64_value(PyObject *__pyx_v_obj) { - npy_timedelta __pyx_r; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1013 - * returns the int64 value underlying scalar numpy timedelta64 object - * """ - * return (<PyTimedeltaScalarObject*>obj).obval # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((PyTimedeltaScalarObject *)__pyx_v_obj)->obval; - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1009 - * - * - * cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<< - * """ - * returns the int64 value underlying scalar numpy timedelta64 object - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1016 - * - * - * cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<< - * """ - * returns the unit part of the dtype for a numpy datetime64 object. - */ - -static CYTHON_INLINE NPY_DATETIMEUNIT __pyx_f_5numpy_get_datetime64_unit(PyObject *__pyx_v_obj) { - NPY_DATETIMEUNIT __pyx_r; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1020 - * returns the unit part of the dtype for a numpy datetime64 object. - * """ - * return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base # <<<<<<<<<<<<<< - */ - __pyx_r = ((NPY_DATETIMEUNIT)((PyDatetimeScalarObject *)__pyx_v_obj)->obmeta.base); - goto __pyx_L0; - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1016 - * - * - * cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<< - * """ - * returns the unit part of the dtype for a numpy datetime64 object. - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - -/* Python wrapper */ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_shape = 0; - Py_ssize_t __pyx_v_itemsize; - PyObject *__pyx_v_format = 0; - PyObject *__pyx_v_mode = 0; - int __pyx_v_allocate_buffer; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; - PyObject* values[5] = {0,0,0,0,0}; - values[3] = ((PyObject *)__pyx_n_s_c); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(2, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(2, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); - if (value) { values[3] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 122, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_shape = ((PyObject*)values[0]); - __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 122, __pyx_L3_error) - __pyx_v_format = values[2]; - __pyx_v_mode = values[3]; - if (values[4]) { - __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 123, __pyx_L3_error) - } else { - - /* "View.MemoryView":123 - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, - * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< - * - * cdef int idx - */ - __pyx_v_allocate_buffer = ((int)1); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 122, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(2, 122, __pyx_L1_error) - if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { - PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(2, 122, __pyx_L1_error) - } - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { - int __pyx_v_idx; - Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_dim; - PyObject **__pyx_v_p; - char __pyx_v_order; - int __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - char *__pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - Py_ssize_t __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - __Pyx_INCREF(__pyx_v_format); - - /* "View.MemoryView":129 - * cdef PyObject **p - * - * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< - * self.itemsize = itemsize - * - */ - if (unlikely(__pyx_v_shape == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(2, 129, __pyx_L1_error) - } - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(2, 129, __pyx_L1_error) - __pyx_v_self->ndim = ((int)__pyx_t_1); - - /* "View.MemoryView":130 - * - * self.ndim = <int> len(shape) - * self.itemsize = itemsize # <<<<<<<<<<<<<< - * - * if not self.ndim: - */ - __pyx_v_self->itemsize = __pyx_v_itemsize; - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 133, __pyx_L1_error) - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - } - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 136, __pyx_L1_error) - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - } - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - __pyx_t_2 = PyBytes_Check(__pyx_v_format); - __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":139 - * - * if not isinstance(format, bytes): - * format = format.encode('ASCII') # <<<<<<<<<<<<<< - * self._format = format # keep a reference to the byte string - * self.format = self._format - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - } - } - __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - } - - /* "View.MemoryView":140 - * if not isinstance(format, bytes): - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< - * self.format = self._format - * - */ - if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(2, 140, __pyx_L1_error) - __pyx_t_3 = __pyx_v_format; - __Pyx_INCREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __Pyx_GOTREF(__pyx_v_self->_format); - __Pyx_DECREF(__pyx_v_self->_format); - __pyx_v_self->_format = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":141 - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - * self.format = self._format # <<<<<<<<<<<<<< - * - * - */ - if (unlikely(__pyx_v_self->_format == Py_None)) { - PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); - __PYX_ERR(2, 141, __pyx_L1_error) - } - __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(2, 141, __pyx_L1_error) - __pyx_v_self->format = __pyx_t_7; - - /* "View.MemoryView":144 - * - * - * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< - * self._strides = self._shape + self.ndim - * - */ - __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); - - /* "View.MemoryView":145 - * - * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) - * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< - * - * if not self._shape: - */ - __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 148, __pyx_L1_error) - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - } - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - __pyx_t_8 = 0; - __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; - for (;;) { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(2, 151, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 151, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_9; - __pyx_v_idx = __pyx_t_8; - __pyx_t_8 = (__pyx_t_8 + 1); - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":153 - * for idx, dim in enumerate(shape): - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< - * self._shape[idx] = dim - * - */ - __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); - __pyx_t_5 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(2, 153, __pyx_L1_error) - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - } - - /* "View.MemoryView":154 - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim # <<<<<<<<<<<<<< - * - * cdef char order - */ - (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 157, __pyx_L1_error) - if (__pyx_t_4) { - - /* "View.MemoryView":158 - * cdef char order - * if mode == 'fortran': - * order = b'F' # <<<<<<<<<<<<<< - * self.mode = u'fortran' - * elif mode == 'c': - */ - __pyx_v_order = 'F'; - - /* "View.MemoryView":159 - * if mode == 'fortran': - * order = b'F' - * self.mode = u'fortran' # <<<<<<<<<<<<<< - * elif mode == 'c': - * order = b'C' - */ - __Pyx_INCREF(__pyx_n_u_fortran); - __Pyx_GIVEREF(__pyx_n_u_fortran); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_fortran; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 160, __pyx_L1_error) - if (likely(__pyx_t_4)) { - - /* "View.MemoryView":161 - * self.mode = u'fortran' - * elif mode == 'c': - * order = b'C' # <<<<<<<<<<<<<< - * self.mode = u'c' - * else: - */ - __pyx_v_order = 'C'; - - /* "View.MemoryView":162 - * elif mode == 'c': - * order = b'C' - * self.mode = u'c' # <<<<<<<<<<<<<< - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - */ - __Pyx_INCREF(__pyx_n_u_c); - __Pyx_GIVEREF(__pyx_n_u_c); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_c; - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":164 - * self.mode = u'c' - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< - * - * self.len = fill_contig_strides_array(self._shape, self._strides, - */ - /*else*/ { - __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(2, 164, __pyx_L1_error) - } - __pyx_L10:; - - /* "View.MemoryView":166 - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - * - * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< - * itemsize, self.ndim, order) - * - */ - __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); - - /* "View.MemoryView":169 - * itemsize, self.ndim, order) - * - * self.free_data = allocate_buffer # <<<<<<<<<<<<<< - * self.dtype_is_object = format == b'O' - * if allocate_buffer: - */ - __pyx_v_self->free_data = __pyx_v_allocate_buffer; - - /* "View.MemoryView":170 - * - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< - * if allocate_buffer: - * - */ - __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 170, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 170, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_v_self->dtype_is_object = __pyx_t_4; - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = (__pyx_v_allocate_buffer != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":174 - * - * - * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< - * if not self.data: - * raise MemoryError("unable to allocate array data.") - */ - __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); - - /* "View.MemoryView":175 - * - * self.data = <char *>malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":176 - * self.data = <char *>malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(2, 176, __pyx_L1_error) - - /* "View.MemoryView":175 - * - * self.data = <char *>malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = <PyObject **> self.data - * for i in range(self.len / itemsize): - */ - __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":179 - * - * if self.dtype_is_object: - * p = <PyObject **> self.data # <<<<<<<<<<<<<< - * for i in range(self.len / itemsize): - * p[i] = Py_None - */ - __pyx_v_p = ((PyObject **)__pyx_v_self->data); - - /* "View.MemoryView":180 - * if self.dtype_is_object: - * p = <PyObject **> self.data - * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< - * p[i] = Py_None - * Py_INCREF(Py_None) - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(2, 180, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(2, 180, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); - __pyx_t_9 = __pyx_t_1; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "View.MemoryView":181 - * p = <PyObject **> self.data - * for i in range(self.len / itemsize): - * p[i] = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - (__pyx_v_p[__pyx_v_i]) = Py_None; - - /* "View.MemoryView":182 - * for i in range(self.len / itemsize): - * p[i] = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - Py_INCREF(Py_None); - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = <PyObject **> self.data - * for i in range(self.len / itemsize): - */ - } - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_format); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_bufmode; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - char *__pyx_t_4; - Py_ssize_t __pyx_t_5; - int __pyx_t_6; - Py_ssize_t *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":186 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 # <<<<<<<<<<<<<< - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = -1; - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 187, __pyx_L1_error) - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":188 - * cdef int bufmode = -1 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - goto __pyx_L3; - } - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 189, __pyx_L1_error) - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":190 - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - */ - __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - } - __pyx_L3:; - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 192, __pyx_L1_error) - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - } - - /* "View.MemoryView":193 - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data # <<<<<<<<<<<<<< - * info.len = self.len - * info.ndim = self.ndim - */ - __pyx_t_4 = __pyx_v_self->data; - __pyx_v_info->buf = __pyx_t_4; - - /* "View.MemoryView":194 - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - * info.len = self.len # <<<<<<<<<<<<<< - * info.ndim = self.ndim - * info.shape = self._shape - */ - __pyx_t_5 = __pyx_v_self->len; - __pyx_v_info->len = __pyx_t_5; - - /* "View.MemoryView":195 - * info.buf = self.data - * info.len = self.len - * info.ndim = self.ndim # <<<<<<<<<<<<<< - * info.shape = self._shape - * info.strides = self._strides - */ - __pyx_t_6 = __pyx_v_self->ndim; - __pyx_v_info->ndim = __pyx_t_6; - - /* "View.MemoryView":196 - * info.len = self.len - * info.ndim = self.ndim - * info.shape = self._shape # <<<<<<<<<<<<<< - * info.strides = self._strides - * info.suboffsets = NULL - */ - __pyx_t_7 = __pyx_v_self->_shape; - __pyx_v_info->shape = __pyx_t_7; - - /* "View.MemoryView":197 - * info.ndim = self.ndim - * info.shape = self._shape - * info.strides = self._strides # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = self.itemsize - */ - __pyx_t_7 = __pyx_v_self->_strides; - __pyx_v_info->strides = __pyx_t_7; - - /* "View.MemoryView":198 - * info.shape = self._shape - * info.strides = self._strides - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = self.itemsize - * info.readonly = 0 - */ - __pyx_v_info->suboffsets = NULL; - - /* "View.MemoryView":199 - * info.strides = self._strides - * info.suboffsets = NULL - * info.itemsize = self.itemsize # <<<<<<<<<<<<<< - * info.readonly = 0 - * - */ - __pyx_t_5 = __pyx_v_self->itemsize; - __pyx_v_info->itemsize = __pyx_t_5; - - /* "View.MemoryView":200 - * info.suboffsets = NULL - * info.itemsize = self.itemsize - * info.readonly = 0 # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - __pyx_v_info->readonly = 0; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":203 - * - * if flags & PyBUF_FORMAT: - * info.format = self.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_4 = __pyx_v_self->format; - __pyx_v_info->format = __pyx_t_4; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":205 - * info.format = self.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.obj = self - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L5:; - - /* "View.MemoryView":207 - * info.format = NULL - * - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":211 - * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - -/* Python wrapper */ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":213 - * def __dealloc__(array self): - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) # <<<<<<<<<<<<<< - * elif self.free_data: - * if self.dtype_is_object: - */ - __pyx_v_self->callback_free_data(__pyx_v_self->data); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - __pyx_t_1 = (__pyx_v_self->free_data != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":216 - * elif self.free_data: - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< - * self._strides, self.ndim, False) - * free(self.data) - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - } - - /* "View.MemoryView":218 - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - * free(self.data) # <<<<<<<<<<<<<< - * PyObject_Free(self._shape) - * - */ - free(__pyx_v_self->data); - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - } - __pyx_L3:; - - /* "View.MemoryView":219 - * self._strides, self.ndim, False) - * free(self.data) - * PyObject_Free(self._shape) # <<<<<<<<<<<<<< - * - * @property - */ - PyObject_Free(__pyx_v_self->_shape); - - /* "View.MemoryView":211 - * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":223 - * @property - * def memview(self): - * return self.get_memview() # <<<<<<<<<<<<<< - * - * @cname('get_memview') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_memview", 0); - - /* "View.MemoryView":227 - * @cname('get_memview') - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< - * return memoryview(self, flags, self.dtype_is_object) - * - */ - __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); - - /* "View.MemoryView":228 - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":231 - * - * def __len__(self): - * return self._shape[0] # <<<<<<<<<<<<<< - * - * def __getattr__(self, attr): - */ - __pyx_r = (__pyx_v_self->_shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getattr__", 0); - - /* "View.MemoryView":234 - * - * def __getattr__(self, attr): - * return getattr(self.memview, attr) # <<<<<<<<<<<<<< - * - * def __getitem__(self, item): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":237 - * - * def __getitem__(self, item): - * return self.memview[item] # <<<<<<<<<<<<<< - * - * def __setitem__(self, item, value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - -/* Python wrapper */ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - - /* "View.MemoryView":240 - * - * def __setitem__(self, item, value): - * self.memview[item] = value # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(2, 240, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - -static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { - struct __pyx_array_obj *__pyx_v_result = 0; - struct __pyx_array_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("array_cwrapper", 0); - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":249 - * - * if buf == NULL: - * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - /*else*/ { - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); - __pyx_t_4 = 0; - __pyx_t_5 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":252 - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) # <<<<<<<<<<<<<< - * result.data = buf - * - */ - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(2, 252, __pyx_L1_error) - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "View.MemoryView":253 - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) - * result.data = buf # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->data = __pyx_v_buf; - } - __pyx_L3:; - - /* "View.MemoryView":255 - * result.data = buf - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - -/* Python wrapper */ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_name = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; - PyObject* values[1] = {0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(2, 281, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - } - __pyx_v_name = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 281, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__", 0); - - /* "View.MemoryView":282 - * cdef object name - * def __init__(self, name): - * self.name = name # <<<<<<<<<<<<<< - * def __repr__(self): - * return self.name - */ - __Pyx_INCREF(__pyx_v_name); - __Pyx_GIVEREF(__pyx_v_name); - __Pyx_GOTREF(__pyx_v_self->name); - __Pyx_DECREF(__pyx_v_self->name); - __pyx_v_self->name = __pyx_v_name; - - /* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - - /* function exit code */ - __pyx_r = 0; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - -/* Python wrapper */ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":284 - * self.name = name - * def __repr__(self): - * return self.name # <<<<<<<<<<<<<< - * - * cdef generic = Enum("<strided and direct or indirect>") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->name); - __pyx_r = __pyx_v_self->name; - goto __pyx_L0; - - /* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_v_state = 0; - PyObject *__pyx_v__dict = 0; - int __pyx_v_use_setstate; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":5 - * cdef object _dict - * cdef bint use_setstate - * state = (self.name,) # <<<<<<<<<<<<<< - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_self->name); - __Pyx_GIVEREF(__pyx_v_self->name); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); - __pyx_v_state = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "(tree fragment)":6 - * cdef bint use_setstate - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< - * if _dict is not None: - * state += (_dict,) - */ - __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v__dict = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - __pyx_t_2 = (__pyx_v__dict != Py_None); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":8 - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - * state += (_dict,) # <<<<<<<<<<<<<< - * use_setstate = True - * else: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v__dict); - __Pyx_GIVEREF(__pyx_v__dict); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); - __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); - __pyx_t_4 = 0; - - /* "(tree fragment)":9 - * if _dict is not None: - * state += (_dict,) - * use_setstate = True # <<<<<<<<<<<<<< - * else: - * use_setstate = self.name is not None - */ - __pyx_v_use_setstate = 1; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - goto __pyx_L3; - } - - /* "(tree fragment)":11 - * use_setstate = True - * else: - * use_setstate = self.name is not None # <<<<<<<<<<<<<< - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_self->name != Py_None); - __pyx_v_use_setstate = __pyx_t_3; - } - __pyx_L3:; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - __pyx_t_3 = (__pyx_v_use_setstate != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":13 - * use_setstate = self.name is not None - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - } - - /* "(tree fragment)":15 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __pyx_t_5 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - } - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_state); - __Pyx_XDECREF(__pyx_v__dict); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":17 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 17, __pyx_L1_error) - __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory - */ - -static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { - Py_intptr_t __pyx_v_aligned_p; - size_t __pyx_v_offset; - void *__pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":300 - * cdef void *align_pointer(void *memory, size_t alignment) nogil: - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< - * cdef size_t offset - * - */ - __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); - - /* "View.MemoryView":304 - * - * with cython.cdivision(True): - * offset = aligned_p % alignment # <<<<<<<<<<<<<< - * - * if offset > 0: - */ - __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - __pyx_t_1 = ((__pyx_v_offset > 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":307 - * - * if offset > 0: - * aligned_p += alignment - offset # <<<<<<<<<<<<<< - * - * return <void *> aligned_p - */ - __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - } - - /* "View.MemoryView":309 - * aligned_p += alignment - offset - * - * return <void *> aligned_p # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((void *)__pyx_v_aligned_p); - goto __pyx_L0; - - /* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - -/* Python wrapper */ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_obj = 0; - int __pyx_v_flags; - int __pyx_v_dtype_is_object; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(2, 345, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 345, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_obj = values[0]; - __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error) - if (values[2]) { - __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error) - } else { - __pyx_v_dtype_is_object = ((int)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 345, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - - /* "View.MemoryView":346 - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj # <<<<<<<<<<<<<< - * self.flags = flags - * if type(self) is memoryview or obj is not None: - */ - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - __Pyx_GOTREF(__pyx_v_self->obj); - __Pyx_DECREF(__pyx_v_self->obj); - __pyx_v_self->obj = __pyx_v_obj; - - /* "View.MemoryView":347 - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj - * self.flags = flags # <<<<<<<<<<<<<< - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - */ - __pyx_v_self->flags = __pyx_v_flags; - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if <PyObject *> self.view.obj == NULL: - */ - __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); - __pyx_t_3 = (__pyx_t_2 != 0); - if (!__pyx_t_3) { - } else { - __pyx_t_1 = __pyx_t_3; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_3 = (__pyx_v_obj != Py_None); - __pyx_t_2 = (__pyx_t_3 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "View.MemoryView":349 - * self.flags = flags - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< - * if <PyObject *> self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - */ - __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 349, __pyx_L1_error) - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":351 - * __Pyx_GetBuffer(obj, &self.view, flags) - * if <PyObject *> self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; - - /* "View.MemoryView":352 - * if <PyObject *> self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * global __pyx_memoryview_thread_locks_used - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - } - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if <PyObject *> self.view.obj == NULL: - */ - } - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":356 - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - */ - __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - - /* "View.MemoryView":357 - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":359 - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< - * if self.lock is NULL: - * raise MemoryError - */ - __pyx_v_self->lock = PyThread_allocate_lock(); - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":361 - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - PyErr_NoMemory(); __PYX_ERR(2, 361, __pyx_L1_error) - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - } - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":364 - * - * if flags & PyBUF_FORMAT: - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< - * else: - * self.dtype_is_object = dtype_is_object - */ - __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_self->dtype_is_object = __pyx_t_1; - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - goto __pyx_L10; - } - - /* "View.MemoryView":366 - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - */ - /*else*/ { - __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; - } - __pyx_L10:; - - /* "View.MemoryView":368 - * self.dtype_is_object = dtype_is_object - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< - * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL - */ - __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); - - /* "View.MemoryView":370 - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL # <<<<<<<<<<<<<< - * - * def __dealloc__(memoryview self): - */ - __pyx_v_self->typeinfo = NULL; - - /* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - -/* Python wrapper */ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { - int __pyx_v_i; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyThread_type_lock __pyx_t_6; - PyThread_type_lock __pyx_t_7; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - __pyx_t_1 = (__pyx_v_self->obj != Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":374 - * def __dealloc__(memoryview self): - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - */ - __Pyx_ReleaseBuffer((&__pyx_v_self->view)); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":377 - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< - * Py_DECREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; - - /* "View.MemoryView":378 - * - * (<__pyx_buffer *> &self.view).obj = NULL - * Py_DECREF(Py_None) # <<<<<<<<<<<<<< - * - * cdef int i - */ - Py_DECREF(Py_None); - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - } - __pyx_L3:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":383 - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - */ - __pyx_t_3 = __pyx_memoryview_thread_locks_used; - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":385 - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":388 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< - * break - * else: - */ - __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); - - /* "View.MemoryView":387 - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break - */ - (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; - (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - } - - /* "View.MemoryView":389 - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break # <<<<<<<<<<<<<< - * else: - * PyThread_free_lock(self.lock) - */ - goto __pyx_L6_break; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - } - } - /*else*/ { - - /* "View.MemoryView":391 - * break - * else: - * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - */ - PyThread_free_lock(__pyx_v_self->lock); - } - __pyx_L6_break:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - } - - /* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = <char *> self.view.buf - */ - -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - Py_ssize_t __pyx_v_dim; - char *__pyx_v_itemp; - PyObject *__pyx_v_idx = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - char *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_item_pointer", 0); - - /* "View.MemoryView":395 - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - * cdef Py_ssize_t dim - * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< - * - * for dim, idx in enumerate(index): - */ - __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); - - /* "View.MemoryView":397 - * cdef char *itemp = <char *> self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - __pyx_t_1 = 0; - if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { - __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 397, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } - } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(2, 397, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_1; - __pyx_t_1 = (__pyx_t_1 + 1); - - /* "View.MemoryView":398 - * - * for dim, idx in enumerate(index): - * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< - * - * return itemp - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 398, __pyx_L1_error) - __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(2, 398, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_7; - - /* "View.MemoryView":397 - * cdef char *itemp = <char *> self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":400 - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - * return itemp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_itemp; - goto __pyx_L0; - - /* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = <char *> self.view.buf - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_indices = NULL; - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - char *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":405 - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: - * return self # <<<<<<<<<<<<<< - * - * have_slices, indices = _unellipsify(index, self.view.ndim) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __pyx_r = ((PyObject *)__pyx_v_self); - goto __pyx_L0; - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - } - - /* "View.MemoryView":407 - * return self - * - * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * cdef char *itemp - */ - __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (likely(__pyx_t_3 != Py_None)) { - PyObject* sequence = __pyx_t_3; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(2, 407, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 407, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_indices = __pyx_t_5; - __pyx_t_5 = 0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 410, __pyx_L1_error) - if (__pyx_t_2) { - - /* "View.MemoryView":411 - * cdef char *itemp - * if have_slices: - * return memview_slice(self, indices) # <<<<<<<<<<<<<< - * else: - * itemp = self.get_item_pointer(indices) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - } - - /* "View.MemoryView":413 - * return memview_slice(self, indices) - * else: - * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< - * return self.convert_item_to_object(itemp) - * - */ - /*else*/ { - __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(2, 413, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_6; - - /* "View.MemoryView":414 - * else: - * itemp = self.get_item_pointer(indices) - * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< - * - * def __setitem__(memoryview self, object index, object value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 414, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_indices); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - -/* Python wrapper */ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_obj = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - __Pyx_INCREF(__pyx_v_index); - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - __pyx_t_1 = (__pyx_v_self->view.readonly != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(2, 418, __pyx_L1_error) - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - } - - /* "View.MemoryView":420 - * raise TypeError("Cannot assign to read-only memoryview") - * - * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * if have_slices: - */ - __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(__pyx_t_2 != Py_None)) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(2, 420, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 420, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_3; - __pyx_t_3 = 0; - __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 422, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":423 - * - * if have_slices: - * obj = self.is_slice(value) # <<<<<<<<<<<<<< - * if obj: - * self.setitem_slice_assignment(self[index], obj) - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 423, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_obj = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 424, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":425 - * obj = self.is_slice(value) - * if obj: - * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< - * else: - * self.setitem_slice_assign_scalar(self[index], value) - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":427 - * self.setitem_slice_assignment(self[index], obj) - * else: - * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< - * else: - * self.setitem_indexed(index, value) - */ - /*else*/ { - __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(2, 427, __pyx_L1_error) - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L5:; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":429 - * self.setitem_slice_assign_scalar(self[index], value) - * else: - * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< - * - * cdef is_slice(self, obj): - */ - /*else*/ { - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 429, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L4:; - - /* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_slice", 0); - __Pyx_INCREF(__pyx_v_obj); - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_5); - /*try:*/ { - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":435 - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) # <<<<<<<<<<<<<< - * except TypeError: - * return None - */ - __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 435, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); - __pyx_t_6 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - goto __pyx_L9_try_end; - __pyx_L4_error:; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "View.MemoryView":436 - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - * except TypeError: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); - if (__pyx_t_9) { - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(2, 436, __pyx_L6_except_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":437 - * self.dtype_is_object) - * except TypeError: - * return None # <<<<<<<<<<<<<< - * - * return obj - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_except_return; - } - goto __pyx_L6_except_error; - __pyx_L6_except_error:; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L1_error; - __pyx_L7_except_return:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L0; - __pyx_L9_try_end:; - } - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - } - - /* "View.MemoryView":439 - * return None - * - * return obj # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assignment(self, dst, src): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - /* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { - __Pyx_memviewslice __pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_src_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - __Pyx_memviewslice *__pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(2, 445, __pyx_L1_error) - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 445, __pyx_L1_error) - - /* "View.MemoryView":446 - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< - * src.ndim, dst.ndim, self.dtype_is_object) - * - */ - if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(2, 446, __pyx_L1_error) - __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 446, __pyx_L1_error) - - /* "View.MemoryView":447 - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 445, __pyx_L1_error) - - /* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { - int __pyx_v_array[0x80]; - void *__pyx_v_tmp; - void *__pyx_v_item; - __Pyx_memviewslice *__pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_tmp_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - char const *__pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); - - /* "View.MemoryView":451 - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - * cdef int array[128] - * cdef void *tmp = NULL # <<<<<<<<<<<<<< - * cdef void *item - * - */ - __pyx_v_tmp = NULL; - - /* "View.MemoryView":456 - * cdef __Pyx_memviewslice *dst_slice - * cdef __Pyx_memviewslice tmp_slice - * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< - * - * if <size_t>self.view.itemsize > sizeof(array): - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 456, __pyx_L1_error) - __pyx_v_dst_slice = __pyx_t_1; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":459 - * - * if <size_t>self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< - * if tmp == NULL: - * raise MemoryError - */ - __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); - - /* "View.MemoryView":460 - * if <size_t>self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":461 - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * item = tmp - * else: - */ - PyErr_NoMemory(); __PYX_ERR(2, 461, __pyx_L1_error) - - /* "View.MemoryView":460 - * if <size_t>self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - } - - /* "View.MemoryView":462 - * if tmp == NULL: - * raise MemoryError - * item = tmp # <<<<<<<<<<<<<< - * else: - * item = <void *> array - */ - __pyx_v_item = __pyx_v_tmp; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":464 - * item = tmp - * else: - * item = <void *> array # <<<<<<<<<<<<<< - * - * try: - */ - /*else*/ { - __pyx_v_item = ((void *)__pyx_v_array); - } - __pyx_L3:; - - /* "View.MemoryView":466 - * item = <void *> array - * - * try: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * (<PyObject **> item)[0] = <PyObject *> value - */ - /*try:*/ { - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * (<PyObject **> item)[0] = <PyObject *> value - * else: - */ - __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":468 - * try: - * if self.dtype_is_object: - * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< - * else: - * self.assign_item_from_object(<char *> item, value) - */ - (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * (<PyObject **> item)[0] = <PyObject *> value - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":470 - * (<PyObject **> item)[0] = <PyObject *> value - * else: - * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 470, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L8:; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":475 - * - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - * item, self.dtype_is_object) - */ - __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 475, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - } - - /* "View.MemoryView":476 - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< - * item, self.dtype_is_object) - * finally: - */ - __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); - } - - /* "View.MemoryView":479 - * item, self.dtype_is_object) - * finally: - * PyMem_Free(tmp) # <<<<<<<<<<<<<< - * - * cdef setitem_indexed(self, index, value): - */ - /*finally:*/ { - /*normal exit:*/{ - PyMem_Free(__pyx_v_tmp); - goto __pyx_L7; - } - __pyx_L6_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); - __Pyx_XGOTREF(__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_12); - __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; - { - PyMem_Free(__pyx_v_tmp); - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_XGIVEREF(__pyx_t_12); - __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); - } - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; - goto __pyx_L1_error; - } - __pyx_L7:; - } - - /* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - char *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_indexed", 0); - - /* "View.MemoryView":482 - * - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< - * self.assign_item_from_object(itemp, value) - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(2, 482, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_1; - - /* "View.MemoryView":483 - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 483, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_v_struct = NULL; - PyObject *__pyx_v_bytesitem = 0; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - size_t __pyx_t_10; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":488 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef bytes bytesitem - * - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 488, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":491 - * cdef bytes bytesitem - * - * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< - * try: - * result = struct.unpack(self.view.format, bytesitem) - */ - __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 491, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - /*try:*/ { - - /* "View.MemoryView":493 - * bytesitem = itemp[:self.view.itemsize] - * try: - * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< - * except struct.error: - * raise ValueError("Unable to convert item to object") - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_9); - if (__pyx_t_7) { - __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; - } - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); - __Pyx_INCREF(__pyx_v_bytesitem); - __Pyx_GIVEREF(__pyx_v_bytesitem); - PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); - __pyx_t_6 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - } - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - /*else:*/ { - __pyx_t_10 = strlen(__pyx_v_self->view.format); - __pyx_t_11 = ((__pyx_t_10 == 1) != 0); - if (__pyx_t_11) { - - /* "View.MemoryView":498 - * else: - * if len(self.view.format) == 1: - * return result[0] # <<<<<<<<<<<<<< - * return result - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 498, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L6_except_return; - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - } - - /* "View.MemoryView":499 - * if len(self.view.format) == 1: - * return result[0] - * return result # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_result); - __pyx_r = __pyx_v_result; - goto __pyx_L6_except_return; - } - __pyx_L3_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "View.MemoryView":494 - * try: - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: # <<<<<<<<<<<<<< - * raise ValueError("Unable to convert item to object") - * else: - */ - __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); - __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; - if (__pyx_t_8) { - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(2, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_1); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 495, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_Raise(__pyx_t_6, 0, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __PYX_ERR(2, 495, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L1_error; - __pyx_L6_except_return:; - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L0; - } - - /* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesitem); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_v_struct = NULL; - char __pyx_v_c; - PyObject *__pyx_v_bytesvalue = 0; - Py_ssize_t __pyx_v_i; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - char *__pyx_t_11; - char *__pyx_t_12; - char *__pyx_t_13; - char *__pyx_t_14; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":504 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef char c - * cdef bytes bytesvalue - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 504, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - __pyx_t_2 = PyTuple_Check(__pyx_v_value); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "View.MemoryView":510 - * - * if isinstance(value, tuple): - * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< - * else: - * bytesvalue = struct.pack(self.view.format, value) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 510, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":512 - * bytesvalue = struct.pack(self.view.format, *value) - * else: - * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< - * - * for i, c in enumerate(bytesvalue): - */ - /*else*/ { - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_7 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - { - __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - if (__pyx_t_5) { - __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; - } - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); - __Pyx_INCREF(__pyx_v_value); - __Pyx_GIVEREF(__pyx_v_value); - PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); - __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 512, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = 0; - if (unlikely(__pyx_v_bytesvalue == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); - __PYX_ERR(2, 514, __pyx_L1_error) - } - __Pyx_INCREF(__pyx_v_bytesvalue); - __pyx_t_10 = __pyx_v_bytesvalue; - __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); - __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); - for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { - __pyx_t_11 = __pyx_t_14; - __pyx_v_c = (__pyx_t_11[0]); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - __pyx_v_i = __pyx_t_9; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = (__pyx_t_9 + 1); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; - } - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesvalue); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - char *__pyx_t_5; - void *__pyx_t_6; - int __pyx_t_7; - Py_ssize_t __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_self->view.readonly != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 520, __pyx_L1_error) - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - } - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":523 - * - * if flags & PyBUF_ND: - * info.shape = self.view.shape # <<<<<<<<<<<<<< - * else: - * info.shape = NULL - */ - __pyx_t_4 = __pyx_v_self->view.shape; - __pyx_v_info->shape = __pyx_t_4; - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":525 - * info.shape = self.view.shape - * else: - * info.shape = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_STRIDES: - */ - /*else*/ { - __pyx_v_info->shape = NULL; - } - __pyx_L6:; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":528 - * - * if flags & PyBUF_STRIDES: - * info.strides = self.view.strides # <<<<<<<<<<<<<< - * else: - * info.strides = NULL - */ - __pyx_t_4 = __pyx_v_self->view.strides; - __pyx_v_info->strides = __pyx_t_4; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - goto __pyx_L7; - } - - /* "View.MemoryView":530 - * info.strides = self.view.strides - * else: - * info.strides = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_INDIRECT: - */ - /*else*/ { - __pyx_v_info->strides = NULL; - } - __pyx_L7:; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":533 - * - * if flags & PyBUF_INDIRECT: - * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< - * else: - * info.suboffsets = NULL - */ - __pyx_t_4 = __pyx_v_self->view.suboffsets; - __pyx_v_info->suboffsets = __pyx_t_4; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":535 - * info.suboffsets = self.view.suboffsets - * else: - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - /*else*/ { - __pyx_v_info->suboffsets = NULL; - } - __pyx_L8:; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":538 - * - * if flags & PyBUF_FORMAT: - * info.format = self.view.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_5 = __pyx_v_self->view.format; - __pyx_v_info->format = __pyx_t_5; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":540 - * info.format = self.view.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.buf = self.view.buf - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L9:; - - /* "View.MemoryView":542 - * info.format = NULL - * - * info.buf = self.view.buf # <<<<<<<<<<<<<< - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - */ - __pyx_t_6 = __pyx_v_self->view.buf; - __pyx_v_info->buf = __pyx_t_6; - - /* "View.MemoryView":543 - * - * info.buf = self.view.buf - * info.ndim = self.view.ndim # <<<<<<<<<<<<<< - * info.itemsize = self.view.itemsize - * info.len = self.view.len - */ - __pyx_t_7 = __pyx_v_self->view.ndim; - __pyx_v_info->ndim = __pyx_t_7; - - /* "View.MemoryView":544 - * info.buf = self.view.buf - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< - * info.len = self.view.len - * info.readonly = self.view.readonly - */ - __pyx_t_8 = __pyx_v_self->view.itemsize; - __pyx_v_info->itemsize = __pyx_t_8; - - /* "View.MemoryView":545 - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - * info.len = self.view.len # <<<<<<<<<<<<<< - * info.readonly = self.view.readonly - * info.obj = self - */ - __pyx_t_8 = __pyx_v_self->view.len; - __pyx_v_info->len = __pyx_t_8; - - /* "View.MemoryView":546 - * info.itemsize = self.view.itemsize - * info.len = self.view.len - * info.readonly = self.view.readonly # <<<<<<<<<<<<<< - * info.obj = self - * - */ - __pyx_t_1 = __pyx_v_self->view.readonly; - __pyx_v_info->readonly = __pyx_t_1; - - /* "View.MemoryView":547 - * info.len = self.view.len - * info.readonly = self.view.readonly - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":554 - * @property - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< - * transpose_memslice(&result.from_slice) - * return result - */ - __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 554, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(2, 554, __pyx_L1_error) - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":555 - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 555, __pyx_L1_error) - - /* "View.MemoryView":556 - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - * return result # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":560 - * @property - * def base(self): - * return self.obj # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->obj); - __pyx_r = __pyx_v_self->obj; - goto __pyx_L0; - - /* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_length; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":564 - * @property - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_length = (__pyx_t_2[0]); - __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(2, 564, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_stride; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(2, 570, __pyx_L1_error) - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - } - - /* "View.MemoryView":572 - * raise ValueError("Buffer view does not expose strides") - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_v_stride = (__pyx_t_3[0]); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(2, 572, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - /* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - Py_ssize_t *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - } - - /* "View.MemoryView":579 - * return (-1,) * self.view.ndim - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); - for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { - __pyx_t_4 = __pyx_t_6; - __pyx_v_suboffset = (__pyx_t_4[0]); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(2, 579, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":583 - * @property - * def ndim(self): - * return self.view.ndim # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 583, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":587 - * @property - * def itemsize(self): - * return self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 587, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":591 - * @property - * def nbytes(self): - * return self.size * self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - __pyx_t_1 = (__pyx_v_self->_size == Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":596 - * def size(self): - * if self._size is None: - * result = 1 # <<<<<<<<<<<<<< - * - * for length in self.view.shape[:self.view.ndim]: - */ - __Pyx_INCREF(__pyx_int_1); - __pyx_v_result = __pyx_int_1; - - /* "View.MemoryView":598 - * result = 1 - * - * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< - * result *= length - * - */ - __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 598, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); - __pyx_t_6 = 0; - - /* "View.MemoryView":599 - * - * for length in self.view.shape[:self.view.ndim]: - * result *= length # <<<<<<<<<<<<<< - * - * self._size = result - */ - __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 599, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); - __pyx_t_6 = 0; - } - - /* "View.MemoryView":601 - * result *= length - * - * self._size = result # <<<<<<<<<<<<<< - * - * return self._size - */ - __Pyx_INCREF(__pyx_v_result); - __Pyx_GIVEREF(__pyx_v_result); - __Pyx_GOTREF(__pyx_v_self->_size); - __Pyx_DECREF(__pyx_v_self->_size); - __pyx_v_self->_size = __pyx_v_result; - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - } - - /* "View.MemoryView":603 - * self._size = result - * - * return self._size # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->_size); - __pyx_r = __pyx_v_self->_size; - goto __pyx_L0; - - /* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":607 - * def __len__(self): - * if self.view.ndim >= 1: - * return self.view.shape[0] # <<<<<<<<<<<<<< - * - * return 0 - */ - __pyx_r = (__pyx_v_self->view.shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - } - - /* "View.MemoryView":609 - * return self.view.shape[0] - * - * return 0 # <<<<<<<<<<<<<< - * - * def __repr__(self): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, - * id(self)) - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":613 - * def __repr__(self): - * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, - * id(self)) # <<<<<<<<<<<<<< - * - * def __str__(self): - */ - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 613, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, - * id(self)) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__str__", 0); - - /* "View.MemoryView":616 - * - * def __str__(self): - * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_c_contig", 0); - - /* "View.MemoryView":622 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 622, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":623 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< - * - * def is_f_contig(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 623, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_f_contig", 0); - - /* "View.MemoryView":628 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 628, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":629 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< - * - * def copy(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 629, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_mslice; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy", 0); - - /* "View.MemoryView":633 - * def copy(self): - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &mslice) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); - - /* "View.MemoryView":635 - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - * - * slice_copy(self, &mslice) # <<<<<<<<<<<<<< - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); - - /* "View.MemoryView":636 - * - * slice_copy(self, &mslice) - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_C_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 636, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":641 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< - * - * def copy_fortran(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 641, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy_fortran", 0); - - /* "View.MemoryView":645 - * def copy_fortran(self): - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &src) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); - - /* "View.MemoryView":647 - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - * - * slice_copy(self, &src) # <<<<<<<<<<<<<< - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); - - /* "View.MemoryView":648 - * - * slice_copy(self, &src) - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_F_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 648, __pyx_L1_error) - __pyx_v_dst = __pyx_t_1; - - /* "View.MemoryView":653 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 653, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - -static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { - struct __pyx_memoryview_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); - - /* "View.MemoryView":658 - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< - * result.typeinfo = typeinfo - * return result - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_o); - __Pyx_GIVEREF(__pyx_v_o); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":659 - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_v_result->typeinfo = __pyx_v_typeinfo; - - /* "View.MemoryView":660 - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_check') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("memoryview_check", 0); - - /* "View.MemoryView":664 - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): - * return isinstance(o, memoryview) # <<<<<<<<<<<<<< - * - * cdef tuple _unellipsify(object index, int ndim): - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - -static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { - PyObject *__pyx_v_tup = NULL; - PyObject *__pyx_v_result = NULL; - int __pyx_v_have_slices; - int __pyx_v_seen_ellipsis; - CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; - PyObject *__pyx_v_item = NULL; - Py_ssize_t __pyx_v_nslices; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - Py_ssize_t __pyx_t_5; - PyObject *(*__pyx_t_6)(PyObject *); - PyObject *__pyx_t_7 = NULL; - Py_ssize_t __pyx_t_8; - int __pyx_t_9; - int __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_unellipsify", 0); - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - __pyx_t_1 = PyTuple_Check(__pyx_v_index); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":672 - * """ - * if not isinstance(index, tuple): - * tup = (index,) # <<<<<<<<<<<<<< - * else: - * tup = index - */ - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 672, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_index); - __Pyx_GIVEREF(__pyx_v_index); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); - __pyx_v_tup = __pyx_t_3; - __pyx_t_3 = 0; - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":674 - * tup = (index,) - * else: - * tup = index # <<<<<<<<<<<<<< - * - * result = [] - */ - /*else*/ { - __Pyx_INCREF(__pyx_v_index); - __pyx_v_tup = __pyx_v_index; - } - __pyx_L3:; - - /* "View.MemoryView":676 - * tup = index - * - * result = [] # <<<<<<<<<<<<<< - * have_slices = False - * seen_ellipsis = False - */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 676, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_result = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":677 - * - * result = [] - * have_slices = False # <<<<<<<<<<<<<< - * seen_ellipsis = False - * for idx, item in enumerate(tup): - */ - __pyx_v_have_slices = 0; - - /* "View.MemoryView":678 - * result = [] - * have_slices = False - * seen_ellipsis = False # <<<<<<<<<<<<<< - * for idx, item in enumerate(tup): - * if item is Ellipsis: - */ - __pyx_v_seen_ellipsis = 0; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - __Pyx_INCREF(__pyx_int_0); - __pyx_t_3 = __pyx_int_0; - if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { - __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; - __pyx_t_6 = NULL; - } else { - __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 679, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_6)) { - if (likely(PyList_CheckExact(__pyx_t_4))) { - if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } else { - if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } - } else { - __pyx_t_7 = __pyx_t_6(__pyx_t_4); - if (unlikely(!__pyx_t_7)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(2, 679, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_7); - } - __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_INCREF(__pyx_t_3); - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); - __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = __pyx_t_7; - __pyx_t_7 = 0; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(2, 682, __pyx_L1_error) - __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__18); - __Pyx_GIVEREF(__pyx_slice__18); - PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__18); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 682, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":683 - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True # <<<<<<<<<<<<<< - * else: - * result.append(slice(None)) - */ - __pyx_v_seen_ellipsis = 1; - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - goto __pyx_L7; - } - - /* "View.MemoryView":685 - * seen_ellipsis = True - * else: - * result.append(slice(None)) # <<<<<<<<<<<<<< - * have_slices = True - * else: - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__18); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 685, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":686 - * else: - * result.append(slice(None)) - * have_slices = True # <<<<<<<<<<<<<< - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - */ - __pyx_v_have_slices = 1; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - goto __pyx_L6; - } - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - /*else*/ { - __pyx_t_2 = PySlice_Check(__pyx_v_item); - __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); - __pyx_t_1 = __pyx_t_10; - __pyx_L9_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":689 - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< - * - * have_slices = have_slices or isinstance(item, slice) - */ - __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_Raise(__pyx_t_11, 0, 0, 0); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __PYX_ERR(2, 689, __pyx_L1_error) - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - } - - /* "View.MemoryView":691 - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< - * result.append(item) - * - */ - __pyx_t_10 = (__pyx_v_have_slices != 0); - if (!__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = PySlice_Check(__pyx_v_item); - __pyx_t_2 = (__pyx_t_10 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_have_slices = __pyx_t_1; - - /* "View.MemoryView":692 - * - * have_slices = have_slices or isinstance(item, slice) - * result.append(item) # <<<<<<<<<<<<<< - * - * nslices = ndim - len(result) - */ - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 692, __pyx_L1_error) - } - __pyx_L6:; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":694 - * result.append(item) - * - * nslices = ndim - len(result) # <<<<<<<<<<<<<< - * if nslices: - * result.extend([slice(None)] * nslices) - */ - __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(2, 694, __pyx_L1_error) - __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - __pyx_t_1 = (__pyx_v_nslices != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":696 - * nslices = ndim - len(result) - * if nslices: - * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< - * - * return have_slices or nslices, tuple(result) - */ - __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 696, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__18); - __Pyx_GIVEREF(__pyx_slice__18); - PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__18); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 696, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - } - - /* "View.MemoryView":698 - * result.extend([slice(None)] * nslices) - * - * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - */ - __Pyx_XDECREF(__pyx_r); - if (!__pyx_v_have_slices) { - } else { - __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L14_bool_binop_done; - } - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_L14_bool_binop_done:; - __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_r = ((PyObject*)__pyx_t_11); - __pyx_t_11 = 0; - goto __pyx_L0; - - /* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_tup); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - -static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); - - /* "View.MemoryView":701 - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") - */ - __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); - for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { - __pyx_t_1 = __pyx_t_3; - __pyx_v_suboffset = (__pyx_t_1[0]); - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(2, 703, __pyx_L1_error) - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - } - } - - /* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { - int __pyx_v_new_ndim; - int __pyx_v_suboffset_dim; - int __pyx_v_dim; - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - __Pyx_memviewslice *__pyx_v_p_src; - struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; - __Pyx_memviewslice *__pyx_v_p_dst; - int *__pyx_v_p_suboffset_dim; - Py_ssize_t __pyx_v_start; - Py_ssize_t __pyx_v_stop; - Py_ssize_t __pyx_v_step; - int __pyx_v_have_start; - int __pyx_v_have_stop; - int __pyx_v_have_step; - PyObject *__pyx_v_index = NULL; - struct __pyx_memoryview_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - struct __pyx_memoryview_obj *__pyx_t_4; - char *__pyx_t_5; - int __pyx_t_6; - Py_ssize_t __pyx_t_7; - PyObject *(*__pyx_t_8)(PyObject *); - PyObject *__pyx_t_9 = NULL; - Py_ssize_t __pyx_t_10; - int __pyx_t_11; - Py_ssize_t __pyx_t_12; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memview_slice", 0); - - /* "View.MemoryView":711 - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): - * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< - * cdef bint negative_step - * cdef __Pyx_memviewslice src, dst - */ - __pyx_v_new_ndim = 0; - __pyx_v_suboffset_dim = -1; - - /* "View.MemoryView":718 - * - * - * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< - * - * cdef _memoryviewslice memviewsliceobj - */ - (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); - - /* "View.MemoryView":722 - * cdef _memoryviewslice memviewsliceobj - * - * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { - PyErr_SetNone(PyExc_AssertionError); - __PYX_ERR(2, 722, __pyx_L1_error) - } - } - #endif - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":725 - * - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview # <<<<<<<<<<<<<< - * p_src = &memviewsliceobj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 725, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":726 - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, &src) - */ - __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - goto __pyx_L3; - } - - /* "View.MemoryView":728 - * p_src = &memviewsliceobj.from_slice - * else: - * slice_copy(memview, &src) # <<<<<<<<<<<<<< - * p_src = &src - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); - - /* "View.MemoryView":729 - * else: - * slice_copy(memview, &src) - * p_src = &src # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_p_src = (&__pyx_v_src); - } - __pyx_L3:; - - /* "View.MemoryView":735 - * - * - * dst.memview = p_src.memview # <<<<<<<<<<<<<< - * dst.data = p_src.data - * - */ - __pyx_t_4 = __pyx_v_p_src->memview; - __pyx_v_dst.memview = __pyx_t_4; - - /* "View.MemoryView":736 - * - * dst.memview = p_src.memview - * dst.data = p_src.data # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_v_p_src->data; - __pyx_v_dst.data = __pyx_t_5; - - /* "View.MemoryView":741 - * - * - * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< - * cdef int *p_suboffset_dim = &suboffset_dim - * cdef Py_ssize_t start, stop, step - */ - __pyx_v_p_dst = (&__pyx_v_dst); - - /* "View.MemoryView":742 - * - * cdef __Pyx_memviewslice *p_dst = &dst - * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< - * cdef Py_ssize_t start, stop, step - * cdef bint have_start, have_stop, have_step - */ - __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - __pyx_t_6 = 0; - if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { - __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 746, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_8)) { - if (likely(PyList_CheckExact(__pyx_t_3))) { - if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } else { - if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } - } else { - __pyx_t_9 = __pyx_t_8(__pyx_t_3); - if (unlikely(!__pyx_t_9)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(2, 746, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_9); - } - __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); - __pyx_t_9 = 0; - __pyx_v_dim = __pyx_t_6; - __pyx_t_6 = (__pyx_t_6 + 1); - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":751 - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< - * 0, 0, 0, # have_{start,stop,step} - * False) - */ - __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 751, __pyx_L1_error) - - /* "View.MemoryView":748 - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 748, __pyx_L1_error) - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - goto __pyx_L6; - } - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - __pyx_t_2 = (__pyx_v_index == Py_None); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":755 - * False) - * elif index is None: - * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - */ - (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; - - /* "View.MemoryView":756 - * elif index is None: - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 - */ - (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; - - /* "View.MemoryView":757 - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< - * new_ndim += 1 - * else: - */ - (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; - - /* "View.MemoryView":758 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 # <<<<<<<<<<<<<< - * else: - * start = index.start or 0 - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - goto __pyx_L6; - } - - /* "View.MemoryView":760 - * new_ndim += 1 - * else: - * start = index.start or 0 # <<<<<<<<<<<<<< - * stop = index.stop or 0 - * step = index.step or 0 - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 760, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 760, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 760, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L7_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L7_bool_binop_done:; - __pyx_v_start = __pyx_t_10; - - /* "View.MemoryView":761 - * else: - * start = index.start or 0 - * stop = index.stop or 0 # <<<<<<<<<<<<<< - * step = index.step or 0 - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 761, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 761, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 761, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L9_bool_binop_done:; - __pyx_v_stop = __pyx_t_10; - - /* "View.MemoryView":762 - * start = index.start or 0 - * stop = index.stop or 0 - * step = index.step or 0 # <<<<<<<<<<<<<< - * - * have_start = index.start is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 762, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 762, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 762, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L11_bool_binop_done:; - __pyx_v_step = __pyx_t_10; - - /* "View.MemoryView":764 - * step = index.step or 0 - * - * have_start = index.start is not None # <<<<<<<<<<<<<< - * have_stop = index.stop is not None - * have_step = index.step is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 764, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_start = __pyx_t_1; - - /* "View.MemoryView":765 - * - * have_start = index.start is not None - * have_stop = index.stop is not None # <<<<<<<<<<<<<< - * have_step = index.step is not None - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 765, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_stop = __pyx_t_1; - - /* "View.MemoryView":766 - * have_start = index.start is not None - * have_stop = index.stop is not None - * have_step = index.step is not None # <<<<<<<<<<<<<< - * - * slice_memviewslice( - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 766, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_step = __pyx_t_1; - - /* "View.MemoryView":768 - * have_step = index.step is not None - * - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 768, __pyx_L1_error) - - /* "View.MemoryView":774 - * have_start, have_stop, have_step, - * True) - * new_ndim += 1 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - } - __pyx_L6:; - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":778 - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< - * memviewsliceobj.to_dtype_func, - * memview.dtype_is_object) - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 778, __pyx_L1_error) } - - /* "View.MemoryView":779 - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * else: - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 779, __pyx_L1_error) } - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 777, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 777, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - } - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - /*else*/ { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":783 - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 782, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 782, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { - Py_ssize_t __pyx_v_new_shape; - int __pyx_v_negative_step; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - __pyx_t_1 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":830 - * - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - } - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - __pyx_t_1 = (0 <= __pyx_v_start); - if (__pyx_t_1) { - __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); - } - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":832 - * start += shape - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< - * else: - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 832, __pyx_L1_error) - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - } - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":835 - * else: - * - * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< - * - * if have_step and step == 0: - */ - /*else*/ { - __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step < 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L6_bool_binop_done:; - __pyx_v_negative_step = __pyx_t_2; - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - __pyx_t_1 = (__pyx_v_have_step != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step == 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L9_bool_binop_done:; - if (__pyx_t_2) { - - /* "View.MemoryView":838 - * - * if have_step and step == 0: - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 838, __pyx_L1_error) - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - } - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - __pyx_t_2 = (__pyx_v_have_start != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":843 - * if have_start: - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if start < 0: - * start = 0 - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":845 - * start += shape - * if start < 0: - * start = 0 # <<<<<<<<<<<<<< - * elif start >= shape: - * if negative_step: - */ - __pyx_v_start = 0; - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - } - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - goto __pyx_L12; - } - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":848 - * elif start >= shape: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = shape - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L14; - } - - /* "View.MemoryView":850 - * start = shape - 1 - * else: - * start = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - /*else*/ { - __pyx_v_start = __pyx_v_shape; - } - __pyx_L14:; - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - } - __pyx_L12:; - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - goto __pyx_L11; - } - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":853 - * else: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = 0 - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L15; - } - - /* "View.MemoryView":855 - * start = shape - 1 - * else: - * start = 0 # <<<<<<<<<<<<<< - * - * if have_stop: - */ - /*else*/ { - __pyx_v_start = 0; - } - __pyx_L15:; - } - __pyx_L11:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - __pyx_t_2 = (__pyx_v_have_stop != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":859 - * if have_stop: - * if stop < 0: - * stop += shape # <<<<<<<<<<<<<< - * if stop < 0: - * stop = 0 - */ - __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":861 - * stop += shape - * if stop < 0: - * stop = 0 # <<<<<<<<<<<<<< - * elif stop > shape: - * stop = shape - */ - __pyx_v_stop = 0; - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - } - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - goto __pyx_L17; - } - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":863 - * stop = 0 - * elif stop > shape: - * stop = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - __pyx_v_stop = __pyx_v_shape; - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - } - __pyx_L17:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - goto __pyx_L16; - } - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":866 - * else: - * if negative_step: - * stop = -1 # <<<<<<<<<<<<<< - * else: - * stop = shape - */ - __pyx_v_stop = -1L; - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - goto __pyx_L19; - } - - /* "View.MemoryView":868 - * stop = -1 - * else: - * stop = shape # <<<<<<<<<<<<<< - * - * if not have_step: - */ - /*else*/ { - __pyx_v_stop = __pyx_v_shape; - } - __pyx_L19:; - } - __pyx_L16:; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":871 - * - * if not have_step: - * step = 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_step = 1; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - } - - /* "View.MemoryView":875 - * - * with cython.cdivision(True): - * new_shape = (stop - start) // step # <<<<<<<<<<<<<< - * - * if (stop - start) - step * new_shape: - */ - __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":878 - * - * if (stop - start) - step * new_shape: - * new_shape += 1 # <<<<<<<<<<<<<< - * - * if new_shape < 0: - */ - __pyx_v_new_shape = (__pyx_v_new_shape + 1); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - } - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":881 - * - * if new_shape < 0: - * new_shape = 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_new_shape = 0; - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - } - - /* "View.MemoryView":884 - * - * - * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset - */ - (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); - - /* "View.MemoryView":885 - * - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< - * dst.suboffsets[new_ndim] = suboffset - * - */ - (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; - - /* "View.MemoryView":886 - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; - } - __pyx_L3:; - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":890 - * - * if suboffset_dim[0] < 0: - * dst.data += start * stride # <<<<<<<<<<<<<< - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride - */ - __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - goto __pyx_L23; - } - - /* "View.MemoryView":892 - * dst.data += start * stride - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< - * - * if suboffset >= 0: - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_suboffset_dim[0]); - (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); - } - __pyx_L23:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = (<char **> dst.data)[0] + suboffset - */ - __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = (<char **> dst.data)[0] + suboffset - * else: - */ - __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":897 - * if not is_slice: - * if new_ndim == 0: - * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - */ - __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = (<char **> dst.data)[0] + suboffset - * else: - */ - goto __pyx_L26; - } - - /* "View.MemoryView":899 - * dst.data = (<char **> dst.data)[0] + suboffset - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< - * "must be indexed and not sliced", dim) - * else: - */ - /*else*/ { - - /* "View.MemoryView":900 - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< - * else: - * suboffset_dim[0] = new_ndim - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 899, __pyx_L1_error) - } - __pyx_L26:; - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = (<char **> dst.data)[0] + suboffset - */ - goto __pyx_L25; - } - - /* "View.MemoryView":902 - * "must be indexed and not sliced", dim) - * else: - * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< - * - * return 0 - */ - /*else*/ { - (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; - } - __pyx_L25:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - } - - /* "View.MemoryView":904 - * suboffset_dim[0] = new_ndim - * - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - -static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_suboffset; - Py_ssize_t __pyx_v_itemsize; - char *__pyx_v_resultp; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("pybuffer_index", 0); - - /* "View.MemoryView":912 - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< - * cdef Py_ssize_t itemsize = view.itemsize - * cdef char *resultp - */ - __pyx_v_suboffset = -1L; - - /* "View.MemoryView":913 - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< - * cdef char *resultp - * - */ - __pyx_t_1 = __pyx_v_view->itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":917 - * - * if view.ndim == 0: - * shape = view.len / itemsize # <<<<<<<<<<<<<< - * stride = itemsize - * else: - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(2, 917, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(2, 917, __pyx_L1_error) - } - __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); - - /* "View.MemoryView":918 - * if view.ndim == 0: - * shape = view.len / itemsize - * stride = itemsize # <<<<<<<<<<<<<< - * else: - * shape = view.shape[dim] - */ - __pyx_v_stride = __pyx_v_itemsize; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - goto __pyx_L3; - } - - /* "View.MemoryView":920 - * stride = itemsize - * else: - * shape = view.shape[dim] # <<<<<<<<<<<<<< - * stride = view.strides[dim] - * if view.suboffsets != NULL: - */ - /*else*/ { - __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); - - /* "View.MemoryView":921 - * else: - * shape = view.shape[dim] - * stride = view.strides[dim] # <<<<<<<<<<<<<< - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] - */ - __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":923 - * stride = view.strides[dim] - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< - * - * if index < 0: - */ - __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - } - } - __pyx_L3:; - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":926 - * - * if index < 0: - * index += view.shape[dim] # <<<<<<<<<<<<<< - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - */ - __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":928 - * index += view.shape[dim] - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * if index >= shape: - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 928, __pyx_L1_error) - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - } - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":931 - * - * if index >= shape: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * resultp = bufp + index * stride - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 931, __pyx_L1_error) - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":933 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * resultp = bufp + index * stride # <<<<<<<<<<<<<< - * if suboffset >= 0: - * resultp = (<char **> resultp)[0] + suboffset - */ - __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = (<char **> resultp)[0] + suboffset - * - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":935 - * resultp = bufp + index * stride - * if suboffset >= 0: - * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< - * - * return resultp - */ - __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = (<char **> resultp)[0] + suboffset - * - */ - } - - /* "View.MemoryView":937 - * resultp = (<char **> resultp)[0] + suboffset - * - * return resultp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_resultp; - goto __pyx_L0; - - /* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - -static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { - int __pyx_v_ndim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_r; - int __pyx_t_1; - Py_ssize_t *__pyx_t_2; - long __pyx_t_3; - long __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":944 - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: - * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< - * - * cdef Py_ssize_t *shape = memslice.shape - */ - __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; - __pyx_v_ndim = __pyx_t_1; - - /* "View.MemoryView":946 - * cdef int ndim = memslice.memview.view.ndim - * - * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< - * cdef Py_ssize_t *strides = memslice.strides - * - */ - __pyx_t_2 = __pyx_v_memslice->shape; - __pyx_v_shape = __pyx_t_2; - - /* "View.MemoryView":947 - * - * cdef Py_ssize_t *shape = memslice.shape - * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __pyx_v_memslice->strides; - __pyx_v_strides = __pyx_t_2; - - /* "View.MemoryView":951 - * - * cdef int i, j - * for i in range(ndim / 2): # <<<<<<<<<<<<<< - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - */ - __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":952 - * cdef int i, j - * for i in range(ndim / 2): - * j = ndim - 1 - i # <<<<<<<<<<<<<< - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] - */ - __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); - - /* "View.MemoryView":953 - * for i in range(ndim / 2): - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< - * shape[i], shape[j] = shape[j], shape[i] - * - */ - __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); - __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); - (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; - (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; - - /* "View.MemoryView":954 - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - */ - __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); - __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); - (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; - (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); - if (!__pyx_t_8) { - } else { - __pyx_t_7 = __pyx_t_8; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); - __pyx_t_7 = __pyx_t_8; - __pyx_L6_bool_binop_done:; - if (__pyx_t_7) { - - /* "View.MemoryView":957 - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< - * - * return 1 - */ - __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 957, __pyx_L1_error) - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - } - } - - /* "View.MemoryView":959 - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - * return 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 1; - goto __pyx_L0; - - /* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - -/* Python wrapper */ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":977 - * - * def __dealloc__(self): - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); - - /* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":981 - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) # <<<<<<<<<<<<<< - * else: - * return memoryview.convert_item_to_object(self, itemp) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 981, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - } - - /* "View.MemoryView":983 - * return self.to_object_func(itemp) - * else: - * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 983, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":987 - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< - * else: - * memoryview.assign_item_from_object(self, itemp, value) - */ - __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 987, __pyx_L1_error) - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":989 - * self.to_dtype_func(itemp, value) - * else: - * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< - * - * @property - */ - /*else*/ { - __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 989, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":993 - * @property - * def base(self): - * return self.from_object # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->from_object); - __pyx_r = __pyx_v_self->from_object; - goto __pyx_L0; - - /* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - __Pyx_TypeInfo *__pyx_t_4; - Py_buffer __pyx_t_5; - Py_ssize_t *__pyx_t_6; - Py_ssize_t *__pyx_t_7; - Py_ssize_t *__pyx_t_8; - Py_ssize_t __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_fromslice", 0); - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1008 - * - * if <PyObject *> memviewslice.memview == Py_None: - * return None # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - } - - /* "View.MemoryView":1013 - * - * - * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< - * - * result.from_slice = memviewslice - */ - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1015 - * result = _memoryviewslice(None, 0, dtype_is_object) - * - * result.from_slice = memviewslice # <<<<<<<<<<<<<< - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - */ - __pyx_v_result->from_slice = __pyx_v_memviewslice; - - /* "View.MemoryView":1016 - * - * result.from_slice = memviewslice - * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< - * - * result.from_object = (<memoryview> memviewslice.memview).base - */ - __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); - - /* "View.MemoryView":1018 - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< - * result.typeinfo = memviewslice.memview.typeinfo - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1018, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(__pyx_v_result->from_object); - __Pyx_DECREF(__pyx_v_result->from_object); - __pyx_v_result->from_object = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":1019 - * - * result.from_object = (<memoryview> memviewslice.memview).base - * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< - * - * result.view = memviewslice.memview.view - */ - __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; - __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; - - /* "View.MemoryView":1021 - * result.typeinfo = memviewslice.memview.typeinfo - * - * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< - * result.view.buf = <void *> memviewslice.data - * result.view.ndim = ndim - */ - __pyx_t_5 = __pyx_v_memviewslice.memview->view; - __pyx_v_result->__pyx_base.view = __pyx_t_5; - - /* "View.MemoryView":1022 - * - * result.view = memviewslice.memview.view - * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - */ - __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); - - /* "View.MemoryView":1023 - * result.view = memviewslice.memview.view - * result.view.buf = <void *> memviewslice.data - * result.view.ndim = ndim # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; - - /* "View.MemoryView":1024 - * result.view.buf = <void *> memviewslice.data - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; - - /* "View.MemoryView":1025 - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1028 - * - * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: - * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< - * else: - * result.flags = PyBUF_RECORDS_RO - */ - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1030 - * result.flags = PyBUF_RECORDS - * else: - * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< - * - * result.view.shape = <Py_ssize_t *> result.from_slice.shape - */ - /*else*/ { - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; - } - __pyx_L4:; - - /* "View.MemoryView":1032 - * result.flags = PyBUF_RECORDS_RO - * - * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< - * result.view.strides = <Py_ssize_t *> result.from_slice.strides - * - */ - __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); - - /* "View.MemoryView":1033 - * - * result.view.shape = <Py_ssize_t *> result.from_slice.shape - * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); - - /* "View.MemoryView":1036 - * - * - * result.view.suboffsets = NULL # <<<<<<<<<<<<<< - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - */ - __pyx_v_result->__pyx_base.view.suboffsets = NULL; - - /* "View.MemoryView":1037 - * - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets - */ - __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_v_suboffset = (__pyx_t_6[0]); - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets - * break - */ - __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1039 - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); - - /* "View.MemoryView":1040 - * if suboffset >= 0: - * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets - * break # <<<<<<<<<<<<<< - * - * result.view.len = result.view.itemsize - */ - goto __pyx_L6_break; - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets - * break - */ - } - } - __pyx_L6_break:; - - /* "View.MemoryView":1042 - * break - * - * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< - * for length in result.view.shape[:ndim]: - * result.view.len *= length - */ - __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - - /* "View.MemoryView":1043 - * - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< - * result.view.len *= length - * - */ - __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1043, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1044 - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: - * result.view.len *= length # <<<<<<<<<<<<<< - * - * result.to_object_func = to_object_func - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 1044, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - } - - /* "View.MemoryView":1046 - * result.view.len *= length - * - * result.to_object_func = to_object_func # <<<<<<<<<<<<<< - * result.to_dtype_func = to_dtype_func - * - */ - __pyx_v_result->to_object_func = __pyx_v_to_object_func; - - /* "View.MemoryView":1047 - * - * result.to_object_func = to_object_func - * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; - - /* "View.MemoryView":1049 - * result.to_dtype_func = to_dtype_func - * - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { - struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; - __Pyx_memviewslice *__pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_slice_from_memview", 0); - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1056 - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): - * obj = memview # <<<<<<<<<<<<<< - * return &obj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 1056, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":1057 - * if isinstance(memview, _memoryviewslice): - * obj = memview - * return &obj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, mslice) - */ - __pyx_r = (&__pyx_v_obj->from_slice); - goto __pyx_L0; - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - } - - /* "View.MemoryView":1059 - * return &obj.from_slice - * else: - * slice_copy(memview, mslice) # <<<<<<<<<<<<<< - * return mslice - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); - - /* "View.MemoryView":1060 - * else: - * slice_copy(memview, mslice) - * return mslice # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_slice_copy') - */ - __pyx_r = __pyx_v_mslice; - goto __pyx_L0; - } - - /* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_obj); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { - int __pyx_v_dim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - Py_ssize_t *__pyx_v_suboffsets; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - __Pyx_RefNannySetupContext("slice_copy", 0); - - /* "View.MemoryView":1067 - * cdef (Py_ssize_t*) shape, strides, suboffsets - * - * shape = memview.view.shape # <<<<<<<<<<<<<< - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets - */ - __pyx_t_1 = __pyx_v_memview->view.shape; - __pyx_v_shape = __pyx_t_1; - - /* "View.MemoryView":1068 - * - * shape = memview.view.shape - * strides = memview.view.strides # <<<<<<<<<<<<<< - * suboffsets = memview.view.suboffsets - * - */ - __pyx_t_1 = __pyx_v_memview->view.strides; - __pyx_v_strides = __pyx_t_1; - - /* "View.MemoryView":1069 - * shape = memview.view.shape - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< - * - * dst.memview = <__pyx_memoryview *> memview - */ - __pyx_t_1 = __pyx_v_memview->view.suboffsets; - __pyx_v_suboffsets = __pyx_t_1; - - /* "View.MemoryView":1071 - * suboffsets = memview.view.suboffsets - * - * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< - * dst.data = <char *> memview.view.buf - * - */ - __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); - - /* "View.MemoryView":1072 - * - * dst.memview = <__pyx_memoryview *> memview - * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< - * - * for dim in range(memview.view.ndim): - */ - __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); - - /* "View.MemoryView":1074 - * dst.data = <char *> memview.view.buf - * - * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - */ - __pyx_t_2 = __pyx_v_memview->view.ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_dim = __pyx_t_4; - - /* "View.MemoryView":1075 - * - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - */ - (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); - - /* "View.MemoryView":1076 - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - * - */ - (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); - - /* "View.MemoryView":1077 - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object') - */ - if ((__pyx_v_suboffsets != 0)) { - __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); - } else { - __pyx_t_5 = -1L; - } - (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; - } - - /* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { - __Pyx_memviewslice __pyx_v_memviewslice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy", 0); - - /* "View.MemoryView":1083 - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< - * return memoryview_copy_from_slice(memview, &memviewslice) - * - */ - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); - - /* "View.MemoryView":1084 - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) - * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object_from_slice') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1084, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { - PyObject *(*__pyx_v_to_object_func)(char *); - int (*__pyx_v_to_dtype_func)(char *, PyObject *); - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *(*__pyx_t_3)(char *); - int (*__pyx_t_4)(char *, PyObject *); - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1095 - * - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - */ - __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; - __pyx_v_to_object_func = __pyx_t_3; - - /* "View.MemoryView":1096 - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< - * else: - * to_object_func = NULL - */ - __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; - __pyx_v_to_dtype_func = __pyx_t_4; - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1098 - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - * to_object_func = NULL # <<<<<<<<<<<<<< - * to_dtype_func = NULL - * - */ - /*else*/ { - __pyx_v_to_object_func = NULL; - - /* "View.MemoryView":1099 - * else: - * to_object_func = NULL - * to_dtype_func = NULL # <<<<<<<<<<<<<< - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - */ - __pyx_v_to_dtype_func = NULL; - } - __pyx_L3:; - - /* "View.MemoryView":1101 - * to_dtype_func = NULL - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< - * to_object_func, to_dtype_func, - * memview.dtype_is_object) - */ - __Pyx_XDECREF(__pyx_r); - - /* "View.MemoryView":1103 - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - * to_object_func, to_dtype_func, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 1101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - -static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { - Py_ssize_t __pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - __pyx_t_1 = ((__pyx_v_arg < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1111 - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: - * return -arg # <<<<<<<<<<<<<< - * else: - * return arg - */ - __pyx_r = (-__pyx_v_arg); - goto __pyx_L0; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - } - - /* "View.MemoryView":1113 - * return -arg - * else: - * return arg # <<<<<<<<<<<<<< - * - * @cname('__pyx_get_best_slice_order') - */ - /*else*/ { - __pyx_r = __pyx_v_arg; - goto __pyx_L0; - } - - /* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - -static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { - int __pyx_v_i; - Py_ssize_t __pyx_v_c_stride; - Py_ssize_t __pyx_v_f_stride; - char __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1121 - * """ - * cdef int i - * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< - * cdef Py_ssize_t f_stride = 0 - * - */ - __pyx_v_c_stride = 0; - - /* "View.MemoryView":1122 - * cdef int i - * cdef Py_ssize_t c_stride = 0 - * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_f_stride = 0; - - /* "View.MemoryView":1124 - * cdef Py_ssize_t f_stride = 0 - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1126 - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1127 - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - goto __pyx_L4_break; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L4_break:; - - /* "View.MemoryView":1129 - * break - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - */ - __pyx_t_1 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_1; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1131 - * for i in range(ndim): - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1132 - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - */ - goto __pyx_L7_break; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L7_break:; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1135 - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - * return 'C' # <<<<<<<<<<<<<< - * else: - * return 'F' - */ - __pyx_r = 'C'; - goto __pyx_L0; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - } - - /* "View.MemoryView":1137 - * return 'C' - * else: - * return 'F' # <<<<<<<<<<<<<< - * - * @cython.cdivision(True) - */ - /*else*/ { - __pyx_r = 'F'; - goto __pyx_L0; - } - - /* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - -static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; - Py_ssize_t __pyx_v_dst_extent; - Py_ssize_t __pyx_v_src_stride; - Py_ssize_t __pyx_v_dst_stride; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - Py_ssize_t __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - - /* "View.MemoryView":1147 - * - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - */ - __pyx_v_src_extent = (__pyx_v_src_shape[0]); - - /* "View.MemoryView":1148 - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] - */ - __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); - - /* "View.MemoryView":1149 - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - */ - __pyx_v_src_stride = (__pyx_v_src_strides[0]); - - /* "View.MemoryView":1150 - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * <size_t> src_stride == itemsize == <size_t> dst_stride): - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * <size_t> src_stride == itemsize == <size_t> dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - - /* "View.MemoryView":1154 - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and - * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - */ - __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); - if (__pyx_t_2) { - __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); - } - __pyx_t_3 = (__pyx_t_2 != 0); - __pyx_t_1 = __pyx_t_3; - __pyx_L5_bool_binop_done:; - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * <size_t> src_stride == itemsize == <size_t> dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - if (__pyx_t_1) { - - /* "View.MemoryView":1155 - * if (src_stride > 0 and dst_stride > 0 and - * <size_t> src_stride == itemsize == <size_t> dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * <size_t> src_stride == itemsize == <size_t> dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1157 - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1158 - * else: - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< - * src_data += src_stride - * dst_data += dst_stride - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); - - /* "View.MemoryView":1159 - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * else: - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1160 - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L4:; - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * <size_t> src_stride == itemsize == <size_t> dst_stride): - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1162 - * dst_data += dst_stride - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * _copy_strided_to_strided(src_data, src_strides + 1, - * dst_data, dst_strides + 1, - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1163 - * else: - * for i in range(dst_extent): - * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< - * dst_data, dst_strides + 1, - * src_shape + 1, dst_shape + 1, - */ - _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); - - /* "View.MemoryView":1167 - * src_shape + 1, dst_shape + 1, - * ndim - 1, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1168 - * ndim - 1, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - - /* function exit code */ -} - -/* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - -static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - - /* "View.MemoryView":1173 - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< - * src.shape, dst.shape, ndim, itemsize) - * - */ - _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_size; - Py_ssize_t __pyx_r; - Py_ssize_t __pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - - /* "View.MemoryView":1179 - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< - * - * for shape in src.shape[:ndim]: - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_size = __pyx_t_1; - - /* "View.MemoryView":1181 - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - * - * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< - * size *= shape - * - */ - __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); - for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_shape = (__pyx_t_2[0]); - - /* "View.MemoryView":1182 - * - * for shape in src.shape[:ndim]: - * size *= shape # <<<<<<<<<<<<<< - * - * return size - */ - __pyx_v_size = (__pyx_v_size * __pyx_v_shape); - } - - /* "View.MemoryView":1184 - * size *= shape - * - * return size # <<<<<<<<<<<<<< - * - * @cname('__pyx_fill_contig_strides_array') - */ - __pyx_r = __pyx_v_size; - goto __pyx_L0; - - /* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { - int __pyx_v_idx; - Py_ssize_t __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - __pyx_t_1 = ((__pyx_v_order == 'F') != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1197 - * - * if order == 'F': - * for idx in range(ndim): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - __pyx_t_2 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_idx = __pyx_t_4; - - /* "View.MemoryView":1198 - * if order == 'F': - * for idx in range(ndim): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * else: - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1199 - * for idx in range(ndim): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * else: - * for idx in range(ndim - 1, -1, -1): - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1201 - * stride *= shape[idx] - * else: - * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - /*else*/ { - for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { - __pyx_v_idx = __pyx_t_2; - - /* "View.MemoryView":1202 - * else: - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1203 - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * - * return stride - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - } - __pyx_L3:; - - /* "View.MemoryView":1205 - * stride *= shape[idx] - * - * return stride # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_data_to_temp') - */ - __pyx_r = __pyx_v_stride; - goto __pyx_L0; - - /* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { - int __pyx_v_i; - void *__pyx_v_result; - size_t __pyx_v_itemsize; - size_t __pyx_v_size; - void *__pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - struct __pyx_memoryview_obj *__pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1219 - * cdef void *result - * - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef size_t size = slice_get_size(src, ndim) - * - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1220 - * - * cdef size_t itemsize = src.memview.view.itemsize - * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< - * - * result = malloc(size) - */ - __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); - - /* "View.MemoryView":1222 - * cdef size_t size = slice_get_size(src, ndim) - * - * result = malloc(size) # <<<<<<<<<<<<<< - * if not result: - * _err(MemoryError, NULL) - */ - __pyx_v_result = malloc(__pyx_v_size); - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1224 - * result = malloc(size) - * if not result: - * _err(MemoryError, NULL) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 1224, __pyx_L1_error) - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - } - - /* "View.MemoryView":1227 - * - * - * tmpslice.data = <char *> result # <<<<<<<<<<<<<< - * tmpslice.memview = src.memview - * for i in range(ndim): - */ - __pyx_v_tmpslice->data = ((char *)__pyx_v_result); - - /* "View.MemoryView":1228 - * - * tmpslice.data = <char *> result - * tmpslice.memview = src.memview # <<<<<<<<<<<<<< - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - */ - __pyx_t_4 = __pyx_v_src->memview; - __pyx_v_tmpslice->memview = __pyx_t_4; - - /* "View.MemoryView":1229 - * tmpslice.data = <char *> result - * tmpslice.memview = src.memview - * for i in range(ndim): # <<<<<<<<<<<<<< - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1230 - * tmpslice.memview = src.memview - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< - * tmpslice.suboffsets[i] = -1 - * - */ - (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); - - /* "View.MemoryView":1231 - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, - */ - (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1233 - * tmpslice.suboffsets[i] = -1 - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< - * ndim, order) - * - */ - (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); - - /* "View.MemoryView":1237 - * - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1239 - * for i in range(ndim): - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< - * - * if slice_is_contig(src[0], order, ndim): - */ - (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - } - } - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1242 - * - * if slice_is_contig(src[0], order, ndim): - * memcpy(result, src.data, size) # <<<<<<<<<<<<<< - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - */ - (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":1244 - * memcpy(result, src.data, size) - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< - * - * return result - */ - /*else*/ { - copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); - } - __pyx_L9:; - - /* "View.MemoryView":1246 - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = NULL; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - -static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_extents", 0); - - /* "View.MemoryView":1254 - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - * (i, extent1, extent2)) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err_dim') - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":1253 - * cdef int _err_extents(int i, Py_ssize_t extent1, - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< - * (i, extent1, extent2)) - * - */ - __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(2, 1253, __pyx_L1_error) - - /* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - -static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_dim", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1258 - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: - * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err') - */ - __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(__pyx_v_error); - __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } - } - __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(2, 1258, __pyx_L1_error) - - /* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - -static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":1263 - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: - * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< - * else: - * raise error - */ - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_error); - __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - } - } - __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(2, 1263, __pyx_L1_error) - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - } - - /* "View.MemoryView":1265 - * raise error(msg.decode('ascii')) - * else: - * raise error # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_contents') - */ - /*else*/ { - __Pyx_Raise(__pyx_v_error, 0, 0, 0); - __PYX_ERR(2, 1265, __pyx_L1_error) - } - - /* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { - void *__pyx_v_tmpdata; - size_t __pyx_v_itemsize; - int __pyx_v_i; - char __pyx_v_order; - int __pyx_v_broadcasting; - int __pyx_v_direct_copy; - __Pyx_memviewslice __pyx_v_tmp; - int __pyx_v_ndim; - int __pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - void *__pyx_t_7; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1276 - * Check for overlapping memory and verify the shapes. - * """ - * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - */ - __pyx_v_tmpdata = NULL; - - /* "View.MemoryView":1277 - * """ - * cdef void *tmpdata = NULL - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - */ - __pyx_t_1 = __pyx_v_src.memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1279 - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< - * cdef bint broadcasting = False - * cdef bint direct_copy = False - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); - - /* "View.MemoryView":1280 - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False # <<<<<<<<<<<<<< - * cdef bint direct_copy = False - * cdef __Pyx_memviewslice tmp - */ - __pyx_v_broadcasting = 0; - - /* "View.MemoryView":1281 - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False - * cdef bint direct_copy = False # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice tmp - * - */ - __pyx_v_direct_copy = 0; - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1285 - * - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1287 - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< - * - * cdef int ndim = max(src_ndim, dst_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - } - __pyx_L3:; - - /* "View.MemoryView":1289 - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - __pyx_t_3 = __pyx_v_dst_ndim; - __pyx_t_4 = __pyx_v_src_ndim; - if (((__pyx_t_3 > __pyx_t_4) != 0)) { - __pyx_t_5 = __pyx_t_3; - } else { - __pyx_t_5 = __pyx_t_4; - } - __pyx_v_ndim = __pyx_t_5; - - /* "View.MemoryView":1291 - * cdef int ndim = max(src_ndim, dst_ndim) - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - */ - __pyx_t_5 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_5; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1294 - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - * broadcasting = True # <<<<<<<<<<<<<< - * src.strides[i] = 0 - * else: - */ - __pyx_v_broadcasting = 1; - - /* "View.MemoryView":1295 - * if src.shape[i] == 1: - * broadcasting = True - * src.strides[i] = 0 # <<<<<<<<<<<<<< - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) - */ - (__pyx_v_src.strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - goto __pyx_L7; - } - - /* "View.MemoryView":1297 - * src.strides[i] = 0 - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< - * - * if src.suboffsets[i] >= 0: - */ - /*else*/ { - __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1297, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - } - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1300 - * - * if src.suboffsets[i] >= 0: - * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< - * - * if slices_overlap(&src, &dst, ndim, itemsize): - */ - __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1300, __pyx_L1_error) - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - } - } - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1305 - * - * if not slice_is_contig(src, order, ndim): - * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - } - - /* "View.MemoryView":1307 - * order = get_best_order(&dst, ndim) - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< - * src = tmp - * - */ - __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(2, 1307, __pyx_L1_error) - __pyx_v_tmpdata = __pyx_t_7; - - /* "View.MemoryView":1308 - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - * src = tmp # <<<<<<<<<<<<<< - * - * if not broadcasting: - */ - __pyx_v_src = __pyx_v_tmp; - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1314 - * - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - goto __pyx_L12; - } - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1316 - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< - * - * if direct_copy: - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - } - __pyx_L12:; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_2 = (__pyx_v_direct_copy != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1320 - * if direct_copy: - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1321 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - */ - (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); - - /* "View.MemoryView":1322 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * free(tmpdata) - * return 0 - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1323 - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1324 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * if order == 'F' == get_best_order(&dst, ndim): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = (__pyx_v_order == 'F'); - if (__pyx_t_2) { - __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); - } - __pyx_t_8 = (__pyx_t_2 != 0); - if (__pyx_t_8) { - - /* "View.MemoryView":1329 - * - * - * transpose_memslice(&src) # <<<<<<<<<<<<<< - * transpose_memslice(&dst) - * - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1329, __pyx_L1_error) - - /* "View.MemoryView":1330 - * - * transpose_memslice(&src) - * transpose_memslice(&dst) # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1330, __pyx_L1_error) - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1332 - * transpose_memslice(&dst) - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1333 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - */ - copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1334 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * free(tmpdata) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1336 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1337 - * - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_broadcast_leading') - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { - int __pyx_v_i; - int __pyx_v_offset; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - /* "View.MemoryView":1344 - * int ndim_other) nogil: - * cdef int i - * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); - - /* "View.MemoryView":1346 - * cdef int offset = ndim_other - ndim - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1347 - * - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - */ - (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); - - /* "View.MemoryView":1348 - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - */ - (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1349 - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< - * - * for i in range(offset): - */ - (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); - } - - /* "View.MemoryView":1351 - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - * for i in range(offset): # <<<<<<<<<<<<<< - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - */ - __pyx_t_1 = __pyx_v_offset; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1352 - * - * for i in range(offset): - * mslice.shape[i] = 1 # <<<<<<<<<<<<<< - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 - */ - (__pyx_v_mslice->shape[__pyx_v_i]) = 1; - - /* "View.MemoryView":1353 - * for i in range(offset): - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< - * mslice.suboffsets[i] = -1 - * - */ - (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); - - /* "View.MemoryView":1354 - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { - int __pyx_t_1; - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - __pyx_t_1 = (__pyx_v_dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1367 - * - * if dtype_is_object: - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< - * dst.strides, ndim, inc) - * - */ - __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - } - - /* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - - /* function exit code */ -} - -/* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - __Pyx_RefNannyDeclarations - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); - - /* "View.MemoryView":1374 - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif -} - -/* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - -static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); - - /* "View.MemoryView":1381 - * cdef Py_ssize_t i - * - * for i in range(shape[0]): # <<<<<<<<<<<<<< - * if ndim == 1: - * if inc: - */ - __pyx_t_1 = (__pyx_v_shape[0]); - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF((<PyObject **> data)[0]) - */ - __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF((<PyObject **> data)[0]) - * else: - */ - __pyx_t_4 = (__pyx_v_inc != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1384 - * if ndim == 1: - * if inc: - * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< - * else: - * Py_DECREF((<PyObject **> data)[0]) - */ - Py_INCREF((((PyObject **)__pyx_v_data)[0])); - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF((<PyObject **> data)[0]) - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":1386 - * Py_INCREF((<PyObject **> data)[0]) - * else: - * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - */ - /*else*/ { - Py_DECREF((((PyObject **)__pyx_v_data)[0])); - } - __pyx_L6:; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF((<PyObject **> data)[0]) - */ - goto __pyx_L5; - } - - /* "View.MemoryView":1388 - * Py_DECREF((<PyObject **> data)[0]) - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, inc) - * - */ - /*else*/ { - - /* "View.MemoryView":1389 - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - * ndim - 1, inc) # <<<<<<<<<<<<<< - * - * data += strides[0] - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); - } - __pyx_L5:; - - /* "View.MemoryView":1391 - * ndim - 1, inc) - * - * data += strides[0] # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - - /* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { - - /* "View.MemoryView":1400 - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1401 - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1403 - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - -static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_extent; - int __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - - /* "View.MemoryView":1411 - * size_t itemsize, void *item) nogil: - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t extent = shape[0] - * - */ - __pyx_v_stride = (__pyx_v_strides[0]); - - /* "View.MemoryView":1412 - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] - * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_extent = (__pyx_v_shape[0]); - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1415 - * - * if ndim == 1: - * for i in range(extent): # <<<<<<<<<<<<<< - * memcpy(data, item, itemsize) - * data += stride - */ - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1416 - * if ndim == 1: - * for i in range(extent): - * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< - * data += stride - * else: - */ - (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); - - /* "View.MemoryView":1417 - * for i in range(extent): - * memcpy(data, item, itemsize) - * data += stride # <<<<<<<<<<<<<< - * else: - * for i in range(extent): - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1419 - * data += stride - * else: - * for i in range(extent): # <<<<<<<<<<<<<< - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - */ - /*else*/ { - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1420 - * else: - * for i in range(extent): - * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, itemsize, item) - * data += stride - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1422 - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - * data += stride # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - - /* function exit code */ -} - -/* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v___pyx_type = 0; - long __pyx_v___pyx_checksum; - PyObject *__pyx_v___pyx_state = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(2, 1, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(2, 1, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(2, 1, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - } - __pyx_v___pyx_type = values[0]; - __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(2, 1, __pyx_L3_error) - __pyx_v___pyx_state = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 1, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_v___pyx_PickleError = 0; - PyObject *__pyx_v___pyx_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); - if (__pyx_t_1) { - - /* "(tree fragment)":5 - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_PickleError); - __Pyx_GIVEREF(__pyx_n_s_PickleError); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_2); - __pyx_v___pyx_PickleError = __pyx_t_2; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":6 - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - */ - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_v___pyx_PickleError); - __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(2, 6, __pyx_L1_error) - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - } - - /* "(tree fragment)":7 - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v___pyx_result = __pyx_t_3; - __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - * return __pyx_result - */ - __pyx_t_1 = (__pyx_v___pyx_state != Py_None); - __pyx_t_6 = (__pyx_t_1 != 0); - if (__pyx_t_6) { - - /* "(tree fragment)":9 - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 9, __pyx_L1_error) - __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - * return __pyx_result - */ - } - - /* "(tree fragment)":10 - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - * return __pyx_result # <<<<<<<<<<<<<< - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v___pyx_result); - __pyx_r = __pyx_v___pyx_result; - goto __pyx_L0; - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v___pyx_PickleError); - __Pyx_XDECREF(__pyx_v___pyx_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); - - /* "(tree fragment)":12 - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(2, 12, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_v___pyx_result->name); - __Pyx_DECREF(__pyx_v___pyx_result->name); - __pyx_v___pyx_result->name = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(2, 13, __pyx_L1_error) - } - __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(2, 13, __pyx_L1_error) - __pyx_t_4 = ((__pyx_t_3 > 1) != 0); - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 13, __pyx_L1_error) - __pyx_t_5 = (__pyx_t_4 != 0); - __pyx_t_2 = __pyx_t_5; - __pyx_L4_bool_binop_done:; - if (__pyx_t_2) { - - /* "(tree fragment)":14 - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< - */ - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(2, 14, __pyx_L1_error) - } - __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - } - } - __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - } - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_array __pyx_vtable_array; - -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_array_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_array_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_array; - p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); - p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); - if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_array(PyObject *o) { - struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_array___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->mode); - Py_CLEAR(p->_format); - (*Py_TYPE(o)->tp_free)(o); -} -static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_array___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { - PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); - if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - v = __pyx_array___getattr__(o, n); - } - return v; -} - -static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); -} - -static PyMethodDef __pyx_methods_array[] = { - {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_array[] = { - {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_array = { - __pyx_array___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_array, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_array = { - __pyx_array___len__, /*mp_length*/ - __pyx_array___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_array = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_array_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_array = { - PyVarObject_HEAD_INIT(0, 0) - "madmom.features.beats_crf.array", /*tp_name*/ - sizeof(struct __pyx_array_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_array, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - __pyx_tp_getattro_array, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ - 0, /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_array, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_array, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_array, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - struct __pyx_MemviewEnum_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_MemviewEnum_obj *)o); - p->name = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_Enum(PyObject *o) { - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - Py_CLEAR(p->name); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - if (p->name) { - e = (*v)(p->name, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_Enum(PyObject *o) { - PyObject* tmp; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - tmp = ((PyObject*)p->name); - p->name = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyMethodDef __pyx_methods_Enum[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_MemviewEnum = { - PyVarObject_HEAD_INIT(0, 0) - "madmom.features.beats_crf.Enum", /*tp_name*/ - sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_Enum, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_MemviewEnum___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_Enum, /*tp_traverse*/ - __pyx_tp_clear_Enum, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_Enum, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_MemviewEnum___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_Enum, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; - -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryview_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryview_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_memoryview; - p->obj = Py_None; Py_INCREF(Py_None); - p->_size = Py_None; Py_INCREF(Py_None); - p->_array_interface = Py_None; Py_INCREF(Py_None); - p->view.obj = NULL; - if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_memoryview(PyObject *o) { - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryview___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->obj); - Py_CLEAR(p->_size); - Py_CLEAR(p->_array_interface); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - if (p->obj) { - e = (*v)(p->obj, a); if (e) return e; - } - if (p->_size) { - e = (*v)(p->_size, a); if (e) return e; - } - if (p->_array_interface) { - e = (*v)(p->_array_interface, a); if (e) return e; - } - if (p->view.obj) { - e = (*v)(p->view.obj, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_memoryview(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - tmp = ((PyObject*)p->obj); - p->obj = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_size); - p->_size = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_array_interface); - p->_array_interface = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - Py_CLEAR(p->view.obj); - return 0; -} -static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_memoryview___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); -} - -static PyMethodDef __pyx_methods_memoryview[] = { - {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, - {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, - {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, - {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_memoryview[] = { - {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, - {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, - {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, - {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, - {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, - {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, - {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, - {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, - {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_memoryview = { - __pyx_memoryview___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_memoryview, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_memoryview = { - __pyx_memoryview___len__, /*mp_length*/ - __pyx_memoryview___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_memoryview = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_memoryview_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_memoryview = { - PyVarObject_HEAD_INIT(0, 0) - "madmom.features.beats_crf.memoryview", /*tp_name*/ - sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_memoryview___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - __pyx_memoryview___str__, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_memoryview, /*tp_traverse*/ - __pyx_tp_clear_memoryview, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_memoryview, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_memoryview, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_memoryview, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; - -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryviewslice_obj *p; - PyObject *o = __pyx_tp_new_memoryview(t, a, k); - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryviewslice_obj *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; - p->from_object = Py_None; Py_INCREF(Py_None); - p->from_slice.memview = NULL; - return o; -} - -static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryviewslice___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->from_object); - PyObject_GC_Track(o); - __pyx_tp_dealloc_memoryview(o); -} - -static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; - if (p->from_object) { - e = (*v)(p->from_object, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear__memoryviewslice(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - __pyx_tp_clear_memoryview(o); - tmp = ((PyObject*)p->from_object); - p->from_object = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - __PYX_XDEC_MEMVIEW(&p->from_slice, 1); - return 0; -} - -static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); -} - -static PyMethodDef __pyx_methods__memoryviewslice[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { - {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_memoryviewslice = { - PyVarObject_HEAD_INIT(0, 0) - "madmom.features.beats_crf._memoryviewslice", /*tp_name*/ - sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___repr__, /*tp_repr*/ - #else - 0, /*tp_repr*/ - #endif - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___str__, /*tp_str*/ - #else - 0, /*tp_str*/ - #endif - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - "Internal class for passing memoryview slices to Python", /*tp_doc*/ - __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ - __pyx_tp_clear__memoryviewslice, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods__memoryviewslice, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets__memoryviewslice, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new__memoryviewslice, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_beats_crf(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_beats_crf}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "beats_crf", - __pyx_k_This_module_contains_the_speed, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, - {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, - {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, - {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, - {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, - {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, - {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, - {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, - {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, - {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, - {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, - {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, - {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, - {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, - {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, - {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, - {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, - {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, - {&__pyx_n_s_activations, __pyx_k_activations, sizeof(__pyx_k_activations), 0, 0, 1, 1}, - {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, - {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, - {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, - {&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1}, - {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, - {&__pyx_n_s_best_sequence, __pyx_k_best_sequence, sizeof(__pyx_k_best_sequence), 0, 0, 1, 1}, - {&__pyx_n_s_bps, __pyx_k_bps, sizeof(__pyx_k_bps), 0, 0, 1, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, - {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_constant, __pyx_k_constant, sizeof(__pyx_k_constant), 0, 0, 1, 1}, - {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, - {&__pyx_n_s_correlate1d, __pyx_k_correlate1d, sizeof(__pyx_k_correlate1d), 0, 0, 1, 1}, - {&__pyx_n_s_cval, __pyx_k_cval, sizeof(__pyx_k_cval), 0, 0, 1, 1}, - {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_n_s_divide, __pyx_k_divide, sizeof(__pyx_k_divide), 0, 0, 1, 1}, - {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, - {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, - {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, - {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {&__pyx_n_s_enter, __pyx_k_enter, sizeof(__pyx_k_enter), 0, 0, 1, 1}, - {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, - {&__pyx_n_s_errstate, __pyx_k_errstate, sizeof(__pyx_k_errstate), 0, 0, 1, 1}, - {&__pyx_n_s_exit, __pyx_k_exit, sizeof(__pyx_k_exit), 0, 0, 1, 1}, - {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, - {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, - {&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1}, - {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, - {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, - {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, - {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, - {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, - {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, - {&__pyx_n_s_ignore, __pyx_k_ignore, sizeof(__pyx_k_ignore), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, - {&__pyx_n_s_init_dist, __pyx_k_init_dist, sizeof(__pyx_k_init_dist), 0, 0, 1, 1}, - {&__pyx_n_s_initial_distribution, __pyx_k_initial_distribution, sizeof(__pyx_k_initial_distribution), 0, 0, 1, 1}, - {&__pyx_n_s_int, __pyx_k_int, sizeof(__pyx_k_int), 0, 0, 1, 1}, - {&__pyx_n_s_interval, __pyx_k_interval, sizeof(__pyx_k_interval), 0, 0, 1, 1}, - {&__pyx_n_s_interval_sigma, __pyx_k_interval_sigma, sizeof(__pyx_k_interval_sigma), 0, 0, 1, 1}, - {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, - {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, - {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, - {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, - {&__pyx_n_s_loc, __pyx_k_loc, sizeof(__pyx_k_loc), 0, 0, 1, 1}, - {&__pyx_n_s_log, __pyx_k_log, sizeof(__pyx_k_log), 0, 0, 1, 1}, - {&__pyx_n_s_log2, __pyx_k_log2, sizeof(__pyx_k_log2), 0, 0, 1, 1}, - {&__pyx_n_s_log_act, __pyx_k_log_act, sizeof(__pyx_k_log_act), 0, 0, 1, 1}, - {&__pyx_n_s_madmom_features_beats_crf, __pyx_k_madmom_features_beats_crf, sizeof(__pyx_k_madmom_features_beats_crf), 0, 0, 1, 1}, - {&__pyx_kp_s_madmom_features_beats_crf_pyx, __pyx_k_madmom_features_beats_crf_pyx, sizeof(__pyx_k_madmom_features_beats_crf_pyx), 0, 0, 1, 0}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, - {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, - {&__pyx_n_s_move_range, __pyx_k_move_range, sizeof(__pyx_k_move_range), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, - {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, - {&__pyx_n_s_new_prob, __pyx_k_new_prob, sizeof(__pyx_k_new_prob), 0, 0, 1, 1}, - {&__pyx_n_s_next_state, __pyx_k_next_state, sizeof(__pyx_k_next_state), 0, 0, 1, 1}, - {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, - {&__pyx_n_s_norm, __pyx_k_norm, sizeof(__pyx_k_norm), 0, 0, 1, 1}, - {&__pyx_n_s_norm_fact, __pyx_k_norm_fact, sizeof(__pyx_k_norm_fact), 0, 0, 1, 1}, - {&__pyx_n_s_norm_factor, __pyx_k_norm_factor, sizeof(__pyx_k_norm_factor), 0, 0, 1, 1}, - {&__pyx_n_s_normalisation_factors, __pyx_k_normalisation_factors, sizeof(__pyx_k_normalisation_factors), 0, 0, 1, 1}, - {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {&__pyx_n_s_num_st, __pyx_k_num_st, sizeof(__pyx_k_num_st), 0, 0, 1, 1}, - {&__pyx_n_s_num_states, __pyx_k_num_states, sizeof(__pyx_k_num_states), 0, 0, 1, 1}, - {&__pyx_n_s_num_tr, __pyx_k_num_tr, sizeof(__pyx_k_num_tr), 0, 0, 1, 1}, - {&__pyx_n_s_num_x, __pyx_k_num_x, sizeof(__pyx_k_num_x), 0, 0, 1, 1}, - {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, - {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, - {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, - {&__pyx_n_s_ones, __pyx_k_ones, sizeof(__pyx_k_ones), 0, 0, 1, 1}, - {&__pyx_n_s_origin, __pyx_k_origin, sizeof(__pyx_k_origin), 0, 0, 1, 1}, - {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, - {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, - {&__pyx_n_s_path_prob, __pyx_k_path_prob, sizeof(__pyx_k_path_prob), 0, 0, 1, 1}, - {&__pyx_n_s_pdf, __pyx_k_pdf, sizeof(__pyx_k_pdf), 0, 0, 1, 1}, - {&__pyx_n_s_pi, __pyx_k_pi, sizeof(__pyx_k_pi), 0, 0, 1, 1}, - {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, - {&__pyx_n_s_scale, __pyx_k_scale, sizeof(__pyx_k_scale), 0, 0, 1, 1}, - {&__pyx_n_s_scipy_ndimage_filters, __pyx_k_scipy_ndimage_filters, sizeof(__pyx_k_scipy_ndimage_filters), 0, 0, 1, 1}, - {&__pyx_n_s_scipy_stats, __pyx_k_scipy_stats, sizeof(__pyx_k_scipy_stats), 0, 0, 1, 1}, - {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, - {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, - {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, - {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, - {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, - {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, - {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, - {&__pyx_n_s_tau, __pyx_k_tau, sizeof(__pyx_k_tau), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_n_s_trans, __pyx_k_trans, sizeof(__pyx_k_trans), 0, 0, 1, 1}, - {&__pyx_n_s_trans_dist, __pyx_k_trans_dist, sizeof(__pyx_k_trans_dist), 0, 0, 1, 1}, - {&__pyx_n_s_transition, __pyx_k_transition, sizeof(__pyx_k_transition), 0, 0, 1, 1}, - {&__pyx_n_s_transition_distribution, __pyx_k_transition_distribution, sizeof(__pyx_k_transition_distribution), 0, 0, 1, 1}, - {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, - {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, - {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, - {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, - {&__pyx_n_s_v_c, __pyx_k_v_c, sizeof(__pyx_k_v_c), 0, 0, 1, 1}, - {&__pyx_n_s_v_p, __pyx_k_v_p, sizeof(__pyx_k_v_p), 0, 0, 1, 1}, - {&__pyx_n_s_viterbi, __pyx_k_viterbi, sizeof(__pyx_k_viterbi), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 194, __pyx_L1_error) - __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 947, __pyx_L1_error) - __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(2, 133, __pyx_L1_error) - __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(2, 148, __pyx_L1_error) - __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(2, 151, __pyx_L1_error) - __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(2, 2, __pyx_L1_error) - __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(2, 404, __pyx_L1_error) - __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(2, 613, __pyx_L1_error) - __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(2, 832, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "madmom/features/beats_crf.pyx":134 - * # ignore division by zero warnings when taking the logarithm of 0.0, - * # the result -inf is fine anyways! - * with np.errstate(divide='ignore'): # <<<<<<<<<<<<<< - * init = np.log(init) - * trans = np.log(trans) - */ - __pyx_tuple_ = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 134, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple_); - __Pyx_GIVEREF(__pyx_tuple_); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":947 - * __pyx_import_array() - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_umath() except -1: - */ - __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 947, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":953 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_ufunc() except -1: - */ - __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 953, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__3); - __Pyx_GIVEREF(__pyx_tuple__3); - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(2, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(2, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__6); - __Pyx_GIVEREF(__pyx_tuple__6); - - /* "View.MemoryView":176 - * self.data = <char *>malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(2, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__7); - __Pyx_GIVEREF(__pyx_tuple__7); - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(2, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(2, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__9); - __Pyx_GIVEREF(__pyx_tuple__9); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(2, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(2, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__11); - __Pyx_GIVEREF(__pyx_tuple__11); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(2, 495, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__14); - __Pyx_GIVEREF(__pyx_tuple__14); - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(2, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_INCREF(__pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_tuple__15); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(2, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__16); - __Pyx_GIVEREF(__pyx_tuple__16); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(2, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__17); - __Pyx_GIVEREF(__pyx_tuple__17); - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(2, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__18); - __Pyx_GIVEREF(__pyx_slice__18); - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(2, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(2, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__20); - __Pyx_GIVEREF(__pyx_tuple__20); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(2, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__21); - __Pyx_GIVEREF(__pyx_tuple__21); - - /* "madmom/features/beats_crf.pyx":28 - * - * - * def initial_distribution(num_states, interval): # <<<<<<<<<<<<<< - * """ - * Compute the initial distribution. - */ - __pyx_tuple__22 = PyTuple_Pack(3, __pyx_n_s_num_states, __pyx_n_s_interval, __pyx_n_s_init_dist); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__22); - __Pyx_GIVEREF(__pyx_tuple__22); - __pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_features_beats_crf_pyx, __pyx_n_s_initial_distribution, 28, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 28, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":53 - * - * - * def transition_distribution(interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Compute the transition distribution between beats. - */ - __pyx_tuple__24 = PyTuple_Pack(5, __pyx_n_s_interval, __pyx_n_s_interval_sigma, __pyx_n_s_norm, __pyx_n_s_move_range, __pyx_n_s_trans_dist); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__24); - __Pyx_GIVEREF(__pyx_tuple__24); - __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_features_beats_crf_pyx, __pyx_n_s_transition_distribution, 53, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 53, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":83 - * - * - * def normalisation_factors(activations, transition_distribution): # <<<<<<<<<<<<<< - * """ - * Compute normalisation factors for model. - */ - __pyx_tuple__26 = PyTuple_Pack(3, __pyx_n_s_activations, __pyx_n_s_transition_distribution, __pyx_n_s_correlate1d); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__26); - __Pyx_GIVEREF(__pyx_tuple__26); - __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_features_beats_crf_pyx, __pyx_n_s_normalisation_factors, 83, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 83, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":106 - * - * - * def best_sequence(activations, interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Extract the best beat sequence for a piece with the Viterbi algorithm. - */ - __pyx_tuple__28 = PyTuple_Pack(7, __pyx_n_s_activations, __pyx_n_s_interval, __pyx_n_s_interval_sigma, __pyx_n_s_init, __pyx_n_s_trans, __pyx_n_s_norm_fact, __pyx_n_s_log_act); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__28); - __Pyx_GIVEREF(__pyx_tuple__28); - __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_features_beats_crf_pyx, __pyx_n_s_best_sequence, 106, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 106, __pyx_L1_error) - - /* "madmom/features/beats_crf.pyx":146 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * def viterbi(float [::1] pi, float[::1] transition, float[::1] norm_factor, # <<<<<<<<<<<<<< - * float [::1] activations, int tau): - * """ - */ - __pyx_tuple__30 = PyTuple_Pack(18, __pyx_n_s_pi, __pyx_n_s_transition, __pyx_n_s_norm_factor, __pyx_n_s_activations, __pyx_n_s_tau, __pyx_n_s_num_st, __pyx_n_s_num_tr, __pyx_n_s_num_x, __pyx_n_s_v_c, __pyx_n_s_v_p, __pyx_n_s_bps, __pyx_n_s_path, __pyx_n_s_k, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_next_state, __pyx_n_s_new_prob, __pyx_n_s_path_prob); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__30); - __Pyx_GIVEREF(__pyx_tuple__30); - __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(5, 0, 18, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_features_beats_crf_pyx, __pyx_n_s_viterbi, 146, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(0, 146, __pyx_L1_error) - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< - * cdef strided = Enum("<strided and direct>") # default - * cdef indirect = Enum("<strided and indirect>") - */ - __pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(2, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__32); - __Pyx_GIVEREF(__pyx_tuple__32); - - /* "View.MemoryView":287 - * - * cdef generic = Enum("<strided and direct or indirect>") - * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("<strided and indirect>") - * - */ - __pyx_tuple__33 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(2, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__33); - __Pyx_GIVEREF(__pyx_tuple__33); - - /* "View.MemoryView":288 - * cdef generic = Enum("<strided and direct or indirect>") - * cdef strided = Enum("<strided and direct>") # default - * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__34 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(2, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__34); - __Pyx_GIVEREF(__pyx_tuple__34); - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("<contiguous and indirect>") - * - */ - __pyx_tuple__35 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(2, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__35); - __Pyx_GIVEREF(__pyx_tuple__35); - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("<contiguous and direct>") - * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__36 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(2, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__36); - __Pyx_GIVEREF(__pyx_tuple__36); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_tuple__37 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(2, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__37); - __Pyx_GIVEREF(__pyx_tuple__37); - __pyx_codeobj__38 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__38)) __PYX_ERR(2, 1, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_float_0_000001 = PyFloat_FromDouble(0.000001); if (unlikely(!__pyx_float_0_000001)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - generic = Py_None; Py_INCREF(Py_None); - strided = Py_None; Py_INCREF(Py_None); - indirect = Py_None; Py_INCREF(Py_None); - contiguous = Py_None; Py_INCREF(Py_None); - indirect_contiguous = Py_None; Py_INCREF(Py_None); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __pyx_vtabptr_array = &__pyx_vtable_array; - __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; - if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_array.tp_print = 0; - #endif - if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) - __pyx_array_type = &__pyx_type___pyx_array; - if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_MemviewEnum.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error) - __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; - __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; - __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; - __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; - __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; - __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; - __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; - __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; - __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; - if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryview.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) - __pyx_memoryview_type = &__pyx_type___pyx_memoryview; - __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; - __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; - __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; - __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; - __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; - if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryviewslice.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error) - __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", - #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 - sizeof(PyTypeObject), - #else - sizeof(PyHeapTypeObject), - #endif - __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 200, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 200, __pyx_L1_error) - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 223, __pyx_L1_error) - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 227, __pyx_L1_error) - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 239, __pyx_L1_error) - __pyx_ptype_5numpy_generic = __Pyx_ImportType(__pyx_t_1, "numpy", "generic", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_generic) __PYX_ERR(1, 771, __pyx_L1_error) - __pyx_ptype_5numpy_number = __Pyx_ImportType(__pyx_t_1, "numpy", "number", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_number) __PYX_ERR(1, 773, __pyx_L1_error) - __pyx_ptype_5numpy_integer = __Pyx_ImportType(__pyx_t_1, "numpy", "integer", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_integer) __PYX_ERR(1, 775, __pyx_L1_error) - __pyx_ptype_5numpy_signedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "signedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_signedinteger) __PYX_ERR(1, 777, __pyx_L1_error) - __pyx_ptype_5numpy_unsignedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "unsignedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_unsignedinteger) __PYX_ERR(1, 779, __pyx_L1_error) - __pyx_ptype_5numpy_inexact = __Pyx_ImportType(__pyx_t_1, "numpy", "inexact", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_inexact) __PYX_ERR(1, 781, __pyx_L1_error) - __pyx_ptype_5numpy_floating = __Pyx_ImportType(__pyx_t_1, "numpy", "floating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_floating) __PYX_ERR(1, 783, __pyx_L1_error) - __pyx_ptype_5numpy_complexfloating = __Pyx_ImportType(__pyx_t_1, "numpy", "complexfloating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_complexfloating) __PYX_ERR(1, 785, __pyx_L1_error) - __pyx_ptype_5numpy_flexible = __Pyx_ImportType(__pyx_t_1, "numpy", "flexible", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_flexible) __PYX_ERR(1, 787, __pyx_L1_error) - __pyx_ptype_5numpy_character = __Pyx_ImportType(__pyx_t_1, "numpy", "character", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_5numpy_character) __PYX_ERR(1, 789, __pyx_L1_error) - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 827, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initbeats_crf(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initbeats_crf(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_beats_crf(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_beats_crf(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_beats_crf(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - static PyThread_type_lock __pyx_t_2[8]; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'beats_crf' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_beats_crf(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("beats_crf", __pyx_methods, __pyx_k_This_module_contains_the_speed, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_madmom__features__beats_crf) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "madmom.features.beats_crf")) { - if (unlikely(PyDict_SetItemString(modules, "madmom.features.beats_crf", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "madmom/features/beats_crf.pyx":20 - * from __future__ import absolute_import, division, print_function - * - * import numpy as np # <<<<<<<<<<<<<< - * - * cimport numpy as np - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":28 - * - * - * def initial_distribution(num_states, interval): # <<<<<<<<<<<<<< - * """ - * Compute the initial distribution. - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6madmom_8features_9beats_crf_1initial_distribution, NULL, __pyx_n_s_madmom_features_beats_crf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_initial_distribution, __pyx_t_1) < 0) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":53 - * - * - * def transition_distribution(interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Compute the transition distribution between beats. - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6madmom_8features_9beats_crf_3transition_distribution, NULL, __pyx_n_s_madmom_features_beats_crf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_transition_distribution, __pyx_t_1) < 0) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":83 - * - * - * def normalisation_factors(activations, transition_distribution): # <<<<<<<<<<<<<< - * """ - * Compute normalisation factors for model. - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6madmom_8features_9beats_crf_5normalisation_factors, NULL, __pyx_n_s_madmom_features_beats_crf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_normalisation_factors, __pyx_t_1) < 0) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":106 - * - * - * def best_sequence(activations, interval, interval_sigma): # <<<<<<<<<<<<<< - * """ - * Extract the best beat sequence for a piece with the Viterbi algorithm. - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6madmom_8features_9beats_crf_7best_sequence, NULL, __pyx_n_s_madmom_features_beats_crf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_best_sequence, __pyx_t_1) < 0) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":146 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * def viterbi(float [::1] pi, float[::1] transition, float[::1] norm_factor, # <<<<<<<<<<<<<< - * float [::1] activations, int tau): - * """ - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6madmom_8features_9beats_crf_9viterbi, NULL, __pyx_n_s_madmom_features_beats_crf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_viterbi, __pyx_t_1) < 0) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "madmom/features/beats_crf.pyx":1 - * # encoding: utf-8 # <<<<<<<<<<<<<< - * # cython: embedsignature=True - * """ - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":209 - * info.obj = self - * - * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * def __dealloc__(array self): - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 209, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 209, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_array_type); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< - * cdef strided = Enum("<strided and direct>") # default - * cdef indirect = Enum("<strided and indirect>") - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(generic); - __Pyx_DECREF_SET(generic, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":287 - * - * cdef generic = Enum("<strided and direct or indirect>") - * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("<strided and indirect>") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(strided); - __Pyx_DECREF_SET(strided, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":288 - * cdef generic = Enum("<strided and direct or indirect>") - * cdef strided = Enum("<strided and direct>") # default - * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__34, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect); - __Pyx_DECREF_SET(indirect, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("<contiguous and indirect>") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__35, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(contiguous); - __Pyx_DECREF_SET(contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("<contiguous and direct>") - * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__36, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect_contiguous); - __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":316 - * - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ - * PyThread_allocate_lock(), - */ - __pyx_memoryview_thread_locks_used = 0; - - /* "View.MemoryView":317 - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< - * PyThread_allocate_lock(), - * PyThread_allocate_lock(), - */ - __pyx_t_2[0] = PyThread_allocate_lock(); - __pyx_t_2[1] = PyThread_allocate_lock(); - __pyx_t_2[2] = PyThread_allocate_lock(); - __pyx_t_2[3] = PyThread_allocate_lock(); - __pyx_t_2[4] = PyThread_allocate_lock(); - __pyx_t_2[5] = PyThread_allocate_lock(); - __pyx_t_2[6] = PyThread_allocate_lock(); - __pyx_t_2[7] = PyThread_allocate_lock(); - memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); - - /* "View.MemoryView":549 - * info.obj = self - * - * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 549, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 549, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryview_type); - - /* "View.MemoryView":995 - * return self.from_object - * - * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 995, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 995, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryviewslice_type); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(2, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init madmom.features.beats_crf", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init madmom.features.beats_crf"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = func->ob_type->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* SliceObject */ -static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, - Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, - int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { -#if CYTHON_USE_TYPE_SLOTS - PyMappingMethods* mp; -#if PY_MAJOR_VERSION < 3 - PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; - if (likely(ms && ms->sq_ass_slice)) { - if (!has_cstart) { - if (_py_start && (*_py_start != Py_None)) { - cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); - if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstart = 0; - } - if (!has_cstop) { - if (_py_stop && (*_py_stop != Py_None)) { - cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); - if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstop = PY_SSIZE_T_MAX; - } - if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { - Py_ssize_t l = ms->sq_length(obj); - if (likely(l >= 0)) { - if (cstop < 0) { - cstop += l; - if (cstop < 0) cstop = 0; - } - if (cstart < 0) { - cstart += l; - if (cstart < 0) cstart = 0; - } - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - goto bad; - PyErr_Clear(); - } - } - return ms->sq_ass_slice(obj, cstart, cstop, value); - } -#endif - mp = Py_TYPE(obj)->tp_as_mapping; - if (likely(mp && mp->mp_ass_subscript)) -#endif - { - int result; - PyObject *py_slice, *py_start, *py_stop; - if (_py_slice) { - py_slice = *_py_slice; - } else { - PyObject* owned_start = NULL; - PyObject* owned_stop = NULL; - if (_py_start) { - py_start = *_py_start; - } else { - if (has_cstart) { - owned_start = py_start = PyInt_FromSsize_t(cstart); - if (unlikely(!py_start)) goto bad; - } else - py_start = Py_None; - } - if (_py_stop) { - py_stop = *_py_stop; - } else { - if (has_cstop) { - owned_stop = py_stop = PyInt_FromSsize_t(cstop); - if (unlikely(!py_stop)) { - Py_XDECREF(owned_start); - goto bad; - } - } else - py_stop = Py_None; - } - py_slice = PySlice_New(py_start, py_stop, Py_None); - Py_XDECREF(owned_start); - Py_XDECREF(owned_stop); - if (unlikely(!py_slice)) goto bad; - } -#if CYTHON_USE_TYPE_SLOTS - result = mp->mp_ass_subscript(obj, py_slice, value); -#else - result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice); -#endif - if (!_py_slice) { - Py_DECREF(py_slice); - } - return result; - } - PyErr_Format(PyExc_TypeError, - "'%.200s' object does not support slice %.10s", - Py_TYPE(obj)->tp_name, value ? "assignment" : "deletion"); -bad: - return -1; -} - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* SetItemInt */ -static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { - int r; - if (!j) return -1; - r = PyObject_SetItem(o, j, v); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, - CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { - PyObject* old = PyList_GET_ITEM(o, n); - Py_INCREF(v); - PyList_SET_ITEM(o, n, v); - Py_DECREF(old); - return 1; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_ass_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return -1; - PyErr_Clear(); - } - } - return m->sq_ass_item(o, i, v); - } - } -#else -#if CYTHON_COMPILING_IN_PYPY - if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) -#else - if (is_list || PySequence_Check(o)) -#endif - { - return PySequence_SetItem(o, i, v); - } -#endif - return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); -} - -/* PyCFunctionFastCall */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif -#endif - -/* PyObjectCall2Args */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallOneArg */ -#if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (__Pyx_PyFastCFunction_Check(func)) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - -/* PyObjectCallNoArg */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, NULL, 0); - } -#endif -#ifdef __Pyx_CyFunction_USED - if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) -#else - if (likely(PyCFunction_Check(func))) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); -} -#endif - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -#if PY_MAJOR_VERSION < 3 || CYTHON_USE_PYLONG_INTERNALS -#define __Pyx_PyInt_TrueDivideObjC_ZeroDivisionError(operand)\ - if (unlikely(zerodivision_check && ((operand) == 0))) {\ - PyErr_SetString(PyExc_ZeroDivisionError, "integer division by zero");\ - return NULL;\ - } -#endif -static PyObject* __Pyx_PyInt_TrueDivideObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { - (void)inplace; - (void)zerodivision_check; - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long a = PyInt_AS_LONG(op1); - __Pyx_PyInt_TrueDivideObjC_ZeroDivisionError(b) - if (8 * sizeof(long) <= 53 || likely(labs(a) <= ((PY_LONG_LONG)1 << 53))) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyInt_Type.tp_as_number->nb_true_divide(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT && 1 * PyLong_SHIFT < 53) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT && 1 * PyLong_SHIFT < 53) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT && 2 * PyLong_SHIFT < 53) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT && 2 * PyLong_SHIFT < 53) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT && 3 * PyLong_SHIFT < 53) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT && 3 * PyLong_SHIFT < 53) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_true_divide(op1, op2); - } - } - __Pyx_PyInt_TrueDivideObjC_ZeroDivisionError(b) - if ((8 * sizeof(long) <= 53 || likely(labs(a) <= ((PY_LONG_LONG)1 << 53))) - || __Pyx_sst_abs(size) <= 52 / PyLong_SHIFT) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyLong_Type.tp_as_number->nb_true_divide(op1, op2); - return PyLong_FromLong(x); - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - double result; - if (unlikely(zerodivision_check && b == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); - return NULL; - } - PyFPE_START_PROTECT("divide", return NULL) - result = ((double)a) / (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceTrueDivide : PyNumber_TrueDivide)(op1, op2); -} -#endif - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* None */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { - PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); -} - -/* MemviewSliceInit */ -static int -__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference) -{ - __Pyx_RefNannyDeclarations - int i, retval=-1; - Py_buffer *buf = &memview->view; - __Pyx_RefNannySetupContext("init_memviewslice", 0); - if (unlikely(memviewslice->memview || memviewslice->data)) { - PyErr_SetString(PyExc_ValueError, - "memviewslice is already initialized!"); - goto fail; - } - if (buf->strides) { - for (i = 0; i < ndim; i++) { - memviewslice->strides[i] = buf->strides[i]; - } - } else { - Py_ssize_t stride = buf->itemsize; - for (i = ndim - 1; i >= 0; i--) { - memviewslice->strides[i] = stride; - stride *= buf->shape[i]; - } - } - for (i = 0; i < ndim; i++) { - memviewslice->shape[i] = buf->shape[i]; - if (buf->suboffsets) { - memviewslice->suboffsets[i] = buf->suboffsets[i]; - } else { - memviewslice->suboffsets[i] = -1; - } - } - memviewslice->memview = memview; - memviewslice->data = (char *)buf->buf; - if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { - Py_INCREF(memview); - } - retval = 0; - goto no_fail; -fail: - memviewslice->memview = 0; - memviewslice->data = 0; - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} -#ifndef Py_NO_RETURN -#define Py_NO_RETURN -#endif -static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { - va_list vargs; - char msg[200]; -#ifdef HAVE_STDARG_PROTOTYPES - va_start(vargs, fmt); -#else - va_start(vargs); -#endif - vsnprintf(msg, 200, fmt, vargs); - va_end(vargs); - Py_FatalError(msg); -} -static CYTHON_INLINE int -__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)++; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE int -__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)--; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE void -__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) -{ - int first_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) - return; - if (unlikely(__pyx_get_slice_count(memview) < 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - first_time = __pyx_add_acquisition_count(memview) == 0; - if (unlikely(first_time)) { - if (have_gil) { - Py_INCREF((PyObject *) memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_INCREF((PyObject *) memview); - PyGILState_Release(_gilstate); - } - } -} -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, - int have_gil, int lineno) { - int last_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) { - memslice->memview = NULL; - return; - } - if (unlikely(__pyx_get_slice_count(memview) <= 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - last_time = __pyx_sub_acquisition_count(memview) == 1; - memslice->data = NULL; - if (unlikely(last_time)) { - if (have_gil) { - Py_CLEAR(memslice->memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_CLEAR(memslice->memview); - PyGILState_Release(_gilstate); - } - } else { - memslice->memview = NULL; - } -} - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i<n; i++) { - if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; - } -#endif - for (i=0; i<n; i++) { - if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; - } - return 0; -} -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { - PyObject *exc_type = tstate->curexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* RaiseException */ -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, - CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* None */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { - Py_ssize_t q = a / b; - Py_ssize_t r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - -/* decode_c_string */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { - Py_ssize_t length; - if (unlikely((start < 0) | (stop < 0))) { - size_t slen = strlen(cstring); - if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { - PyErr_SetString(PyExc_OverflowError, - "c-string too long to convert to Python"); - return NULL; - } - length = (Py_ssize_t) slen; - if (start < 0) { - start += length; - if (start < 0) - start = 0; - } - if (stop < 0) - stop += length; - } - if (unlikely(stop <= start)) - return __Pyx_NewRef(__pyx_empty_unicode); - length = stop - start; - cstring += start; - if (decode_func) { - return decode_func(cstring, length, errors); - } else { - return PyUnicode_Decode(cstring, length, encoding, errors); - } -} - -/* GetAttr3 */ -static PyObject *__Pyx_GetAttr3Default(PyObject *d) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(d); - return d; -} -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { - PyObject *r = __Pyx_GetAttr(o, n); - return (likely(r)) ? r : __Pyx_GetAttr3Default(d); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* RaiseNoneIterError */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/* ExtTypeTest */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/* SwapException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i<n; i++) { - if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; - } -#endif - for (i=0; i<n; i++) { - PyObject *t = PyTuple_GET_ITEM(tuple, i); - #if PY_MAJOR_VERSION < 3 - if (likely(exc_type == t)) return 1; - #endif - if (likely(PyExceptionClass_Check(t))) { - if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; - } else { - } - } - return 0; -} -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { - if (likely(err == exc_type)) return 1; - if (likely(PyExceptionClass_Check(err))) { - if (likely(PyExceptionClass_Check(exc_type))) { - return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); - } else if (likely(PyTuple_Check(exc_type))) { - return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); - } else { - } - } - return PyErr_GivenExceptionMatches(err, exc_type); -} -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { - assert(PyExceptionClass_Check(exc_type1)); - assert(PyExceptionClass_Check(exc_type2)); - if (likely(err == exc_type1 || err == exc_type2)) return 1; - if (likely(PyExceptionClass_Check(err))) { - return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); - } - return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); -} -#endif - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { - (void)inplace; - (void)zerodivision_check; - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - x = (long)((unsigned long)a + b); - if (likely((x^a) >= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - double result; - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* None */ -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* HasAttr */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { - PyObject *r; - if (unlikely(!__Pyx_PyBaseString_Check(n))) { - PyErr_SetString(PyExc_TypeError, - "hasattr(): attribute name must be string"); - return -1; - } - r = __Pyx_GetAttr(o, n); - if (unlikely(!r)) { - PyErr_Clear(); - return 0; - } else { - Py_DECREF(r); - return 1; - } -} - -/* PyObject_GenericGetAttrNoDict */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, attr_name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(attr_name)); -#endif - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - Py_INCREF(descr); - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - -/* PyObject_GenericGetAttr */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { - if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { - return PyObject_GenericGetAttr(obj, attr_name); - } - return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); -} -#endif - -/* SetVTable */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* SetupReduce */ -static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { - int ret; - PyObject *name_attr; - name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); - if (likely(name_attr)) { - ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); - } else { - ret = -1; - } - if (unlikely(ret < 0)) { - PyErr_Clear(); - ret = 0; - } - Py_XDECREF(name_attr); - return ret; -} -static int __Pyx_setup_reduce(PyObject* type_obj) { - int ret = 0; - PyObject *object_reduce = NULL; - PyObject *object_reduce_ex = NULL; - PyObject *reduce = NULL; - PyObject *reduce_ex = NULL; - PyObject *reduce_cython = NULL; - PyObject *setstate = NULL; - PyObject *setstate_cython = NULL; -#if CYTHON_USE_PYTYPE_LOOKUP - if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#else - if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#endif -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#else - object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#endif - reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; - if (reduce_ex == object_reduce_ex) { -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#else - object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#endif - reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; - if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { - reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); - if (likely(reduce_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (reduce == object_reduce || PyErr_Occurred()) { - goto __PYX_BAD; - } - setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); - if (!setstate) PyErr_Clear(); - if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { - setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); - if (likely(setstate_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (!setstate || PyErr_Occurred()) { - goto __PYX_BAD; - } - } - PyType_Modified((PyTypeObject*)type_obj); - } - } - goto __PYX_GOOD; -__PYX_BAD: - if (!PyErr_Occurred()) - PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); - ret = -1; -__PYX_GOOD: -#if !CYTHON_USE_PYTYPE_LOOKUP - Py_XDECREF(object_reduce); - Py_XDECREF(object_reduce_ex); -#endif - Py_XDECREF(reduce); - Py_XDECREF(reduce_ex); - Py_XDECREF(reduce_cython); - Py_XDECREF(setstate); - Py_XDECREF(setstate_cython); - return ret; -} - -/* TypeImport */ -#ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, - size_t size, enum __Pyx_ImportType_CheckSize check_size) -{ - PyObject *result = 0; - char warning[200]; - Py_ssize_t basicsize; -#ifdef Py_LIMITED_API - PyObject *py_basicsize; -#endif - result = PyObject_GetAttrString(module, class_name); - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%.200s.%.200s is not a type object", - module_name, class_name); - goto bad; - } -#ifndef Py_LIMITED_API - basicsize = ((PyTypeObject *)result)->tp_basicsize; -#else - py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); - if (!py_basicsize) - goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) - goto bad; -#endif - if ((size_t)basicsize < size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; - } - if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; - } - else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(result); - return NULL; -} -#endif - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); - PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - -/* MemviewSliceIsContig */ -static int -__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) -{ - int i, index, step, start; - Py_ssize_t itemsize = mvs.memview->view.itemsize; - if (order == 'F') { - step = 1; - start = 0; - } else { - step = -1; - start = ndim - 1; - } - for (i = 0; i < ndim; i++) { - index = start + step * i; - if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) - return 0; - itemsize *= mvs.shape[index]; - } - return 1; -} - -/* OverlappingSlices */ -static void -__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, - void **out_start, void **out_end, - int ndim, size_t itemsize) -{ - char *start, *end; - int i; - start = end = slice->data; - for (i = 0; i < ndim; i++) { - Py_ssize_t stride = slice->strides[i]; - Py_ssize_t extent = slice->shape[i]; - if (extent == 0) { - *out_start = *out_end = start; - return; - } else { - if (stride > 0) - end += stride * (extent - 1); - else - start += stride * (extent - 1); - } - } - *out_start = start; - *out_end = end + itemsize; -} -static int -__pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize) -{ - void *start1, *end1, *start2, *end2; - __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); - __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); - return (start1 < end2) && (start2 < end1); -} - -/* Capsule */ -static CYTHON_INLINE PyObject * -__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) -{ - PyObject *cobj; -#if PY_VERSION_HEX >= 0x02070000 - cobj = PyCapsule_New(p, sig, NULL); -#else - cobj = PyCObject_FromVoidPtr(p, NULL); -#endif - return cobj; -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparseable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* TypeInfoCompare */ - static int -__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) -{ - int i; - if (!a || !b) - return 0; - if (a == b) - return 1; - if (a->size != b->size || a->typegroup != b->typegroup || - a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { - if (a->typegroup == 'H' || b->typegroup == 'H') { - return a->size == b->size; - } else { - return 0; - } - } - if (a->ndim) { - for (i = 0; i < a->ndim; i++) - if (a->arraysize[i] != b->arraysize[i]) - return 0; - } - if (a->typegroup == 'S') { - if (a->flags != b->flags) - return 0; - if (a->fields || b->fields) { - if (!(a->fields && b->fields)) - return 0; - for (i = 0; a->fields[i].type && b->fields[i].type; i++) { - __Pyx_StructField *field_a = a->fields + i; - __Pyx_StructField *field_b = b->fields + i; - if (field_a->offset != field_b->offset || - !__pyx_typeinfo_cmp(field_a->type, field_b->type)) - return 0; - } - return !a->fields[i].type && !b->fields[i].type; - } - } - return 1; -} - -/* MemviewSliceValidateAndInit */ - static int -__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) -{ - if (buf->shape[dim] <= 1) - return 1; - if (buf->strides) { - if (spec & __Pyx_MEMVIEW_CONTIG) { - if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { - if (unlikely(buf->strides[dim] != sizeof(void *))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly contiguous " - "in dimension %d.", dim); - goto fail; - } - } else if (unlikely(buf->strides[dim] != buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_FOLLOW) { - Py_ssize_t stride = buf->strides[dim]; - if (stride < 0) - stride = -stride; - if (unlikely(stride < buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - } else { - if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not contiguous in " - "dimension %d", dim); - goto fail; - } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not indirect in " - "dimension %d", dim); - goto fail; - } else if (unlikely(buf->suboffsets)) { - PyErr_SetString(PyExc_ValueError, - "Buffer exposes suboffsets but no strides"); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) -{ - if (spec & __Pyx_MEMVIEW_DIRECT) { - if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { - PyErr_Format(PyExc_ValueError, - "Buffer not compatible with direct access " - "in dimension %d.", dim); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_PTR) { - if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly accessible " - "in dimension %d.", dim); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) -{ - int i; - if (c_or_f_flag & __Pyx_IS_F_CONTIG) { - Py_ssize_t stride = 1; - for (i = 0; i < ndim; i++) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not fortran contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { - Py_ssize_t stride = 1; - for (i = ndim - 1; i >- 1; i--) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not C contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } - return 1; -fail: - return 0; -} -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj) -{ - struct __pyx_memoryview_obj *memview, *new_memview; - __Pyx_RefNannyDeclarations - Py_buffer *buf; - int i, spec = 0, retval = -1; - __Pyx_BufFmt_Context ctx; - int from_memoryview = __pyx_memoryview_check(original_obj); - __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); - if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) - original_obj)->typeinfo)) { - memview = (struct __pyx_memoryview_obj *) original_obj; - new_memview = NULL; - } else { - memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - original_obj, buf_flags, 0, dtype); - new_memview = memview; - if (unlikely(!memview)) - goto fail; - } - buf = &memview->view; - if (unlikely(buf->ndim != ndim)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - ndim, buf->ndim); - goto fail; - } - if (new_memview) { - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; - } - if (unlikely((unsigned) buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " - "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", - buf->itemsize, - (buf->itemsize > 1) ? "s" : "", - dtype->name, - dtype->size, - (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->len > 0) { - for (i = 0; i < ndim; i++) { - spec = axes_specs[i]; - if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) - goto fail; - if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) - goto fail; - } - if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) - goto fail; - } - if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, - new_memview != NULL) == -1)) { - goto fail; - } - retval = 0; - goto no_fail; -fail: - Py_XDECREF(new_memview); - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_float(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, - &__Pyx_TypeInfo_float, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* MemviewDtypeToObject */ - static CYTHON_INLINE PyObject *__pyx_memview_get_long(const char *itemp) { - return (PyObject *) __Pyx_PyInt_From_long(*(long *) itemp); -} -static CYTHON_INLINE int __pyx_memview_set_long(const char *itemp, PyObject *obj) { - long value = __Pyx_PyInt_As_long(obj); - if ((value == (long)-1) && PyErr_Occurred()) - return 0; - *(long *) itemp = value; - return 1; -} - -/* Declarations */ - #if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -/* Arithmetic */ - #if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - #if 1 - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - if (b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabsf(b.real) >= fabsf(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); - } else { - float r = b.imag / b.real; - float s = (float)(1.0) / (b.real + b.imag * r); - return __pyx_t_float_complex_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - float r = b.real / b.imag; - float s = (float)(1.0) / (b.imag + b.real * r); - return __pyx_t_float_complex_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - if (b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); - } else { - float denom = b.real * b.real + b.imag * b.imag; - return __pyx_t_float_complex_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod_float(a, a); - case 3: - z = __Pyx_c_prod_float(a, a); - return __Pyx_c_prod_float(z, a); - case 4: - z = __Pyx_c_prod_float(a, a); - return __Pyx_c_prod_float(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = powf(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2f(0.0, -1.0); - } - } else { - r = __Pyx_c_abs_float(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -/* Declarations */ - #if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -/* Arithmetic */ - #if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - #if 1 - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - if (b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabs(b.real) >= fabs(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); - } else { - double r = b.imag / b.real; - double s = (double)(1.0) / (b.real + b.imag * r); - return __pyx_t_double_complex_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - double r = b.real / b.imag; - double s = (double)(1.0) / (b.imag + b.real * r); - return __pyx_t_double_complex_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - if (b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); - } else { - double denom = b.real * b.real + b.imag * b.imag; - return __pyx_t_double_complex_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod_double(a, a); - case 3: - z = __Pyx_c_prod_double(a, a); - return __Pyx_c_prod_double(z, a); - case 4: - z = __Pyx_c_prod_double(a, a); - return __Pyx_c_prod_double(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = pow(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2(0.0, -1.0); - } - } else { - r = __Pyx_c_abs_double(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -/* MemviewSliceCopyTemplate */ - static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object) -{ - __Pyx_RefNannyDeclarations - int i; - __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; - struct __pyx_memoryview_obj *from_memview = from_mvs->memview; - Py_buffer *buf = &from_memview->view; - PyObject *shape_tuple = NULL; - PyObject *temp_int = NULL; - struct __pyx_array_obj *array_obj = NULL; - struct __pyx_memoryview_obj *memview_obj = NULL; - __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); - for (i = 0; i < ndim; i++) { - if (unlikely(from_mvs->suboffsets[i] >= 0)) { - PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " - "indirect dimensions (axis %d)", i); - goto fail; - } - } - shape_tuple = PyTuple_New(ndim); - if (unlikely(!shape_tuple)) { - goto fail; - } - __Pyx_GOTREF(shape_tuple); - for(i = 0; i < ndim; i++) { - temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); - if(unlikely(!temp_int)) { - goto fail; - } else { - PyTuple_SET_ITEM(shape_tuple, i, temp_int); - temp_int = NULL; - } - } - array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); - if (unlikely(!array_obj)) { - goto fail; - } - __Pyx_GOTREF(array_obj); - memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - (PyObject *) array_obj, contig_flag, - dtype_is_object, - from_mvs->memview->typeinfo); - if (unlikely(!memview_obj)) - goto fail; - if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) - goto fail; - if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, - dtype_is_object) < 0)) - goto fail; - goto no_fail; -fail: - __Pyx_XDECREF(new_mvs.memview); - new_mvs.memview = NULL; - new_mvs.data = NULL; -no_fail: - __Pyx_XDECREF(shape_tuple); - __Pyx_XDECREF(temp_int); - __Pyx_XDECREF(array_obj); - __Pyx_RefNannyFinishContext(); - return new_mvs; -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const char neg_one = (char) -1, const_zero = (char) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(char) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (char) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { - return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { - return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { - return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (char) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(char) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) - case -2: - if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - } -#endif - if (sizeof(char) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - char val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (char) -1; - } - } else { - char val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (char) -1; - val = __Pyx_PyInt_As_char(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to char"); - return (char) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to char"); - return (char) -1; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, - &__Pyx_TypeInfo_long, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_long(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, - &__Pyx_TypeInfo_long, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ diff --git a/spaces/Marshalls/testmtd/feature_extraction/process_motions.py b/spaces/Marshalls/testmtd/feature_extraction/process_motions.py deleted file mode 100644 index 92f75de9a45bb66d91e3499b76ddd789649577b1..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/feature_extraction/process_motions.py +++ /dev/null @@ -1,108 +0,0 @@ -import numpy as np -# import librosa -from pathlib import Path -import json -import os.path -import sys -import argparse -import pickle -import torch - -THIS_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, os.pardir)) -DATA_DIR = os.path.join(ROOT_DIR, 'data') -sys.path.append(ROOT_DIR) -from utils import distribute_tasks - -from analysis.pymo.parsers import BVHParser -from analysis.pymo.data import Joint, MocapData -from analysis.pymo.preprocessing import * -from sklearn.pipeline import Pipeline -import joblib as jl - -parser = argparse.ArgumentParser(description="Preprocess motion data") - -parser.add_argument("data_path", type=str, help="Directory contining Beat Saber level folders") -parser.add_argument("--param", type=str, default="expmap", help="expmap, position") -parser.add_argument("--replace_existing", action="store_true") -parser.add_argument("--do_mirror", action="store_true", help="whether to augment the data with mirrored motion") -parser.add_argument("--fps", type=int, default=60) - -args = parser.parse_args() - -# makes arugments into global variables of the same name, used later in the code -globals().update(vars(args)) -data_path = Path(data_path) - -## distributing tasks accross nodes ## -from mpi4py import MPI -comm = MPI.COMM_WORLD -rank = comm.Get_rank() -size = comm.Get_size() -print(rank) - -p = BVHParser() -if do_mirror: - data_pipe = Pipeline([ - ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), - ('mir', Mirror(axis='X', append=True)), - ('root', RootTransformer('pos_rot_deltas')), - # ('jtsel', JointSelector(['Spine', 'Spine1', 'Spine2', 'Neck', 'Head', 'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToeBase'], include_root=True)), - ('jtsel', JointSelector(['Spine', 'Spine1', 'Neck', 'Head', 'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToeBase'], include_root=True)), - (param, MocapParameterizer(param)), - ('cnst', ConstantsRemover(only_cols=["Hips_Xposition", "Hips_Zposition"])), - ('np', Numpyfier()) - ]) -else: - data_pipe = Pipeline([ - ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), - ('root', RootTransformer('pos_rot_deltas')), - # ('mir', Mirror(axis='X', append=True)), - # ('jtsel', JointSelector(['Spine', 'Spine1', 'Spine2', 'Neck', 'Head', 'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToeBase'], include_root=True)), - ('jtsel', JointSelector(['Spine', 'Spine1', 'Neck', 'Head', 'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToeBase'], include_root=True)), - (param, MocapParameterizer(param)), - ('cnst', ConstantsRemover(only_cols=["Hips_Xposition", "Hips_Zposition"])), - ('np', Numpyfier()) - ]) - -def extract_joint_angles(files): - if len(files)>0: - data_all = list() - for f in files: - data_all.append(p.parse(f)) - - out_data = data_pipe.fit_transform(data_all) - - if do_mirror: - # NOTE: the datapipe will append the mirrored files to the end - assert len(out_data) == 2*len(files) - else: - assert len(out_data) == len(files) - - if rank == 0: - jl.dump(data_pipe, os.path.join(data_path, 'motion_'+param+'_data_pipe.sav')) - - fi=0 - if do_mirror: - for f in files: - features_file = f + "_"+param+".npy" - if replace_existing or not os.path.isfile(features_file): - np.save(features_file, out_data[fi]) - features_file_mirror = f[:-4]+"_mirrored" + ".bvh_"+param+".npy" - if replace_existing or not os.path.isfile(features_file_mirror): - np.save(features_file_mirror, out_data[len(files)+fi]) - fi=fi+1 - else: - for f in files: - features_file = f + "_"+param+".npy" - if replace_existing or not os.path.isfile(features_file): - np.save(features_file, out_data[fi]) - fi=fi+1 - -candidate_motion_files = sorted(data_path.glob('**/*.bvh'), key=lambda path: path.parent.__str__()) -#candidate_motion_files = candidate_motion_files[:32] -tasks = distribute_tasks(candidate_motion_files,rank,size) - -files = [path.__str__() for i, path in enumerate(candidate_motion_files) if i in tasks] - -extract_joint_angles(files) diff --git a/spaces/MercurialAi/OncoMedleyMini/OncoMedley/src/load_corpus_dir.py b/spaces/MercurialAi/OncoMedleyMini/OncoMedley/src/load_corpus_dir.py deleted file mode 100644 index 9ce76f2ed8919ab73978da526f65184d0c14b77c..0000000000000000000000000000000000000000 --- a/spaces/MercurialAi/OncoMedleyMini/OncoMedley/src/load_corpus_dir.py +++ /dev/null @@ -1,16 +0,0 @@ - -def load_corpus_dir(dir): - text_files = ['Clinical Practice Guidelines/jco.22.01063.rtf', 'Clinical Practice Guidelines/jco.22.01533.rtf', 'Clinical Practice Guidelines/jco.22.02807.rtf', 'Clinical Practice Guidelines/jco.23.00638.rtf', 'Clinical Practice Guidelines/medscape_guidelines.rtf', 'Clinical Practice Guidelines/acs_guidelines.rtf', 'Clinical Practice Guidelines/nhsuk_guidelines.rtf', 'Clinical Practice Guidelines/American_College_of_Sports_Medicine_Roundtable_on_Exercise_Guidelines_for_Cancer_Survivors.rtf', 'Clinical Practice Guidelines/Clinical_Practice_Guidelines_on_the_Use_of_Integrative_Therapies_as_Supportive_Care_in_Patients_Treated_for_Breast_Cancer.rtf', 'Clinical Practice Guidelines/acs_guidelines_pain_management.rtf'] - - texts = [] - for f in text_files: - with open(f, 'r', encoding="utf-8") as file: - text = file.read() - texts.append(text) - - text = '' - for t in texts: - text = text + t - - return text - diff --git a/spaces/MercurialAi/OncologyGPT_Probabilities/app.py b/spaces/MercurialAi/OncologyGPT_Probabilities/app.py deleted file mode 100644 index d66009ee5db4a46e57cfe8c98a7f3f9fd6c5e489..0000000000000000000000000000000000000000 --- a/spaces/MercurialAi/OncologyGPT_Probabilities/app.py +++ /dev/null @@ -1,121 +0,0 @@ -import gradio as gr -import os -os.system("pip -qq install openai") -import openai -import matplotlib.pyplot as plt -os.system("pip -qq install nltk") -from nltk.corpus import stopwords -import nltk -os.system('pip -qq install huggingface_hub["cli"]') - -nltk.download('stopwords') -stop_words = set(stopwords.words('english')) - -EX_Q1 = "How does screening help to identify a family history that may be associated with an increased risk for mutations in the breast cancer susceptibility genes BRCA1 or BRCA2?" -EX_Q2 = "What are the therapeutic options for patients with prior CDK4/6 inhibitor treatment and ESR1 wild-type tumors?" -EX_Q3 = "What criteria are used to determine a patient's eligibility for treatment with PARP inhibitors like olaparib and talazoparib for metastatic HER2-negative breast cancer? " -EX_Q4 = "What testing must a patient candidate for poly ADP-ribose polymerase (PARP) inhibitor therapy undergo to determine their eligibility? " -EX_Q5 = 'What is an atom? ' - -def make_prob_avg_plot(x): - fig = plt.figure() - if x < -3: - plt.bar("Selection Probability Average", x, color="maroon", width=0.7) - elif x < -2: - plt.bar("Selection Probability Average", x, color="red", width=0.7) - elif x < -1: - plt.bar("Selection Probability Average", x, color="yellow", width=0.7) - elif x < 0: - plt.bar("Selection Probability Average", x, color="green", width=0.7) - plt.ylim(bottom=-4) - return fig - -def make_logprob_plot(words: list, x: list): - fig = plt.figure() - plt.bar(words, x) - plt.gcf().subplots_adjust(bottom=0.2) - plt.xticks(rotation=60) - return fig - -def get_response(Q): - # clear cache before generating new response - os.system('huggingface-cli delete-cache') - - inference = openai.Completion.create( - model="davinci:ft-personal-2023-06-09-03-59-18", - prompt=Q+"->", - max_tokens=64, - temperature=0.25, - logprobs=2, - stop=[" END"] - ) - - response = str(inference.choices[0].text) - - # cut response off after end is signified - end_index = len(response) - end_markers = ["END", " ", "\n"] - for mark in end_markers: - if mark in response: - end_index = response.index(mark) - break - - response = response[:end_index] - - logprobs = inference.choices[0].logprobs.top_logprobs - - max_probs = [] - for logprob in logprobs: - probs = list(logprob.values()) - max_prob = max(probs) - max_probs.append(max_prob) - - return response, max_probs - -def bot(Q, history): - history = history or [] - c_history = list(sum(history, ())) - c_history.append(Q) - c_input = ' '.join(c_history) - output, max_probs = get_response(c_input) - words = output.split() - max_probs = max_probs[:len(words)] - - i = 0 - for word in words: - word = word.lower().replace(' ', '') - if word in stop_words: - del words[i] - del max_probs[i] - - i = i + 1 - - probs_plot = make_logprob_plot(words, max_probs) - - avg_max_probs = sum(max_probs)/len(max_probs) - - if avg_max_probs < -0.131841: - output = "Sorry, I do not feel confident enough to answer that question right now." - - history.append((Q, output)) - - return history, history, probs_plot, avg_max_probs - -def get_question_example(qe): - return qe - -with gr.Blocks() as iFace: - - chatbot = gr.Chatbot(show_label=False) - state = gr.State() - plot_description = gr.Textbox("Represents the model's probability for selecting each word - higher (less negative) values reflect more confidence", interactive=False, show_label=False) - plot = gr.Plot(show_label=False) - - Q = gr.Textbox(show_label=False, placeholder="I'm here to help.").style(container=False) - - question_example = gr.Radio(label="Inquiry Examples", choices=[EX_Q1, EX_Q2, EX_Q3, EX_Q4, EX_Q5]) - - Q.submit(bot, inputs=[Q, state], outputs=[chatbot, state]) - question_example.change(get_question_example, inputs=[question_example], outputs=Q) - -iFace.launch() diff --git a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/ext_transform.py b/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/ext_transform.py deleted file mode 100644 index 7e1104bd7b1a24303370c066d1487f83a9bfece0..0000000000000000000000000000000000000000 --- a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/ext_transform.py +++ /dev/null @@ -1,78 +0,0 @@ -import random - -import numpy as np -from skimage.filters import gaussian -import torch -from PIL import Image, ImageFilter - - -class RandomVerticalFlip(object): - def __call__(self, img): - if random.random() < 0.5: - return img.transpose(Image.FLIP_TOP_BOTTOM) - return img - - -class DeNormalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, tensor): - for t, m, s in zip(tensor, self.mean, self.std): - t.mul_(s).add_(m) - return tensor - - -class MaskToTensor(object): - def __call__(self, img): - return torch.from_numpy(np.array(img, dtype=np.int32)).long() - - -class FreeScale(object): - def __init__(self, size, interpolation=Image.BILINEAR): - self.size = tuple(reversed(size)) # size: (h, w) - self.interpolation = interpolation - - def __call__(self, img): - return img.resize(self.size, self.interpolation) - - -class FlipChannels(object): - def __call__(self, img): - img = np.array(img)[:, :, ::-1] - return Image.fromarray(img.astype(np.uint8)) - - -class RandomGaussianBlur(object): - def __call__(self, img): - sigma = 0.15 + random.random() * 1.15 - blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True) - blurred_img *= 255 - return Image.fromarray(blurred_img.astype(np.uint8)) - -# Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py - - -class Lighting(object): - """Lighting noise(AlexNet - style PCA - based noise)""" - - def __init__(self, alphastd, - eigval=(0.2175, 0.0188, 0.0045), - eigvec=((-0.5675, 0.7192, 0.4009), - (-0.5808, -0.0045, -0.8140), - (-0.5836, -0.6948, 0.4203))): - self.alphastd = alphastd - self.eigval = torch.Tensor(eigval) - self.eigvec = torch.Tensor(eigvec) - - def __call__(self, img): - if self.alphastd == 0: - return img - - alpha = img.new().resize_(3).normal_(0, self.alphastd) - rgb = self.eigvec.type_as(img).clone()\ - .mul(alpha.view(1, 3).expand(3, 3))\ - .mul(self.eigval.view(1, 3).expand(3, 3))\ - .sum(1).squeeze() - return img.add(rgb.view(3, 1, 1).expand_as(img)) diff --git a/spaces/Miuzarte/SUI-svc-3.0/README.md b/spaces/Miuzarte/SUI-svc-3.0/README.md deleted file mode 100644 index 5934920f1b4382116a782e137a00e760fd56c898..0000000000000000000000000000000000000000 --- a/spaces/Miuzarte/SUI-svc-3.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI岁己(歌声变声器) -emoji: 🕊 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MoonQiu/LongerCrafter/lvdm/models/ddpm3d.py b/spaces/MoonQiu/LongerCrafter/lvdm/models/ddpm3d.py deleted file mode 100644 index 73a2647d1fd63e6101bd333267e98e9d2a8fa481..0000000000000000000000000000000000000000 --- a/spaces/MoonQiu/LongerCrafter/lvdm/models/ddpm3d.py +++ /dev/null @@ -1,763 +0,0 @@ -""" -wild mixture of -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/CompVis/taming-transformers --- merci -""" - -from functools import partial -from contextlib import contextmanager -import numpy as np -from tqdm import tqdm -from einops import rearrange, repeat -import logging -mainlogger = logging.getLogger('mainlogger') -import torch -import torch.nn as nn -from torchvision.utils import make_grid -import pytorch_lightning as pl -from utils.utils import instantiate_from_config -from lvdm.ema import LitEma -from lvdm.distributions import DiagonalGaussianDistribution -from lvdm.models.utils_diffusion import make_beta_schedule -from lvdm.modules.encoders.ip_resampler import ImageProjModel, Resampler -from lvdm.basics import disabled_train -from lvdm.common import ( - extract_into_tensor, - noise_like, - exists, - default -) - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor=None, - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0. - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - mainlogger.info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.channels = channels - self.temporal_length = unet_config.params.temporal_length - self.image_size = image_size - if isinstance(self.image_size, int): - self.image_size = [self.image_size, self.image_size] - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - mainlogger.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - mainlogger.info(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - mainlogger.info(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - mainlogger.info("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - mainlogger.info(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - mainlogger.info(f"Missing Keys: {missing}") - if len(unexpected) > 0: - mainlogger.info(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start * - extract_into_tensor(self.scale_arr, t, x_start.shape) + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_input(self, batch, k): - x = batch[k] - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="caption", - cond_stage_trainable=False, - cond_stage_forward=None, - conditioning_key=None, - uncond_prob=0.2, - uncond_type="empty_seq", - scale_factor=1.0, - scale_by_std=False, - encoder_type="2d", - only_model=False, - use_scale=False, - scale_a=1, - scale_b=0.3, - mid_step=400, - fix_scale_bug=False, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - conditioning_key = default(conditioning_key, 'crossattn') - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - - # scale factor - self.use_scale=use_scale - if self.use_scale: - self.scale_a=scale_a - self.scale_b=scale_b - if fix_scale_bug: - scale_step=self.num_timesteps-mid_step - else: #bug - scale_step = self.num_timesteps - - scale_arr1 = np.linspace(scale_a, scale_b, mid_step) - scale_arr2 = np.full(scale_step, scale_b) - scale_arr = np.concatenate((scale_arr1, scale_arr2)) - scale_arr_prev = np.append(scale_a, scale_arr[:-1]) - to_torch = partial(torch.tensor, dtype=torch.float32) - self.register_buffer('scale_arr', to_torch(scale_arr)) - - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.first_stage_config = first_stage_config - self.cond_stage_config = cond_stage_config - self.clip_denoised = False - - self.cond_stage_forward = cond_stage_forward - self.encoder_type = encoder_type - assert(encoder_type in ["2d", "3d"]) - self.uncond_prob = uncond_prob - self.classifier_free_guidance = True if uncond_prob > 0 else False - assert(uncond_type in ["zero_embed", "empty_seq"]) - self.uncond_type = uncond_type - - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys, only_model=only_model) - self.restarted_from_ckpt = True - - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - if self.use_scale: - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start * - extract_into_tensor(self.scale_arr, t, x_start.shape) + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - else: - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - - def _freeze_model(self): - for name, para in self.model.diffusion_model.named_parameters(): - para.requires_grad = False - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - model = instantiate_from_config(config) - self.cond_stage_model = model - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def get_first_stage_encoding(self, encoder_posterior, noise=None): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample(noise=noise) - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - @torch.no_grad() - def encode_first_stage(self, x): - if self.encoder_type == "2d" and x.dim() == 5: - b, _, t, _, _ = x.shape - x = rearrange(x, 'b c t h w -> (b t) c h w') - reshape_back = True - else: - reshape_back = False - - encoder_posterior = self.first_stage_model.encode(x) - results = self.get_first_stage_encoding(encoder_posterior).detach() - - if reshape_back: - results = rearrange(results, '(b t) c h w -> b c t h w', b=b,t=t) - - return results - - @torch.no_grad() - def encode_first_stage_2DAE(self, x): - - b, _, t, _, _ = x.shape - results = torch.cat([self.get_first_stage_encoding(self.first_stage_model.encode(x[:,:,i])).detach().unsqueeze(2) for i in range(t)], dim=2) - - return results - - def decode_core(self, z, **kwargs): - if self.encoder_type == "2d" and z.dim() == 5: - b, _, t, _, _ = z.shape - z = rearrange(z, 'b c t h w -> (b t) c h w') - reshape_back = True - else: - reshape_back = False - - z = 1. / self.scale_factor * z - - results = self.first_stage_model.decode(z, **kwargs) - - if reshape_back: - results = rearrange(results, '(b t) c h w -> b c t h w', b=b,t=t) - return results - - @torch.no_grad() - def decode_first_stage(self, z, **kwargs): - return self.decode_core(z, **kwargs) - - def apply_model(self, x_noisy, t, cond, **kwargs): - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - x_recon = self.model(x_noisy, t, **cond, **kwargs) - - if isinstance(x_recon, tuple): - return x_recon[0] - else: - return x_recon - - def _get_denoise_row_from_list(self, samples, desc=''): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device))) - n_log_timesteps = len(denoise_row) - - denoise_row = torch.stack(denoise_row) # n_log_timesteps, b, C, H, W - - if denoise_row.dim() == 5: - # img, num_imgs= n_log_timesteps * bs, grid_size=[bs,n_log_timesteps] - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_log_timesteps) - elif denoise_row.dim() == 6: - # video, grid_size=[n_log_timesteps*bs, t] - video_length = denoise_row.shape[3] - denoise_grid = rearrange(denoise_row, 'n b c t h w -> b n c t h w') - denoise_grid = rearrange(denoise_grid, 'b n c t h w -> (b n) c t h w') - denoise_grid = rearrange(denoise_grid, 'n c t h w -> (n t) c h w') - denoise_grid = make_grid(denoise_grid, nrow=video_length) - else: - raise ValueError - - return denoise_grid - - - @torch.no_grad() - def decode_first_stage_2DAE(self, z, **kwargs): - - b, _, t, _, _ = z.shape - z = 1. / self.scale_factor * z - results = torch.cat([self.first_stage_model.decode(z[:,:,i], **kwargs).unsqueeze(2) for i in range(t)], dim=2) - - return results - - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_x0=False, score_corrector=None, corrector_kwargs=None, **kwargs): - t_in = t - model_out = self.apply_model(x, t_in, c, **kwargs) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - - if return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_x0=False, \ - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, **kwargs): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_x0=return_x0, \ - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, **kwargs) - if return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, \ - timesteps=None, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, **kwargs): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - # sample an initial noise - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - if start_T is not None: - timesteps = min(timesteps, start_T) - - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, **kwargs) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - -class LatentVisualDiffusion(LatentDiffusion): - def __init__(self, cond_img_config, finegrained=False, random_cond=False, *args, **kwargs): - super().__init__(*args, **kwargs) - self.random_cond = random_cond - self.instantiate_img_embedder(cond_img_config, freeze=True) - num_tokens = 16 if finegrained else 4 - self.image_proj_model = self.init_projector(use_finegrained=finegrained, num_tokens=num_tokens, input_dim=1024,\ - cross_attention_dim=1024, dim=1280) - - def instantiate_img_embedder(self, config, freeze=True): - embedder = instantiate_from_config(config) - if freeze: - self.embedder = embedder.eval() - self.embedder.train = disabled_train - for param in self.embedder.parameters(): - param.requires_grad = False - - def init_projector(self, use_finegrained, num_tokens, input_dim, cross_attention_dim, dim): - if not use_finegrained: - image_proj_model = ImageProjModel(clip_extra_context_tokens=num_tokens, cross_attention_dim=cross_attention_dim, - clip_embeddings_dim=input_dim - ) - else: - image_proj_model = Resampler(dim=input_dim, depth=4, dim_head=64, heads=12, num_queries=num_tokens, - embedding_dim=dim, output_dim=cross_attention_dim, ff_mult=4 - ) - return image_proj_model - - ## Never delete this func: it is used in log_images() and inference stage - def get_image_embeds(self, batch_imgs): - ## img: b c h w - img_token = self.embedder(batch_imgs) - img_emb = self.image_proj_model(img_token) - return img_emb - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, - c_adm=None, s=None, mask=None, **kwargs): - # temporal_context = fps is foNone - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t, **kwargs) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc, **kwargs) - elif self.conditioning_key == 'hybrid': - ## it is just right [b,c,t,h,w]: concatenate in channel dim - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'resblockcond': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - elif self.conditioning_key == 'hybrid-adm': - assert c_adm is not None - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc, y=c_adm) - elif self.conditioning_key == 'hybrid-time': - assert s is not None - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc, s=s) - elif self.conditioning_key == 'concat-time-mask': - # assert s is not None - # mainlogger.info('x & mask:',x.shape,c_concat[0].shape) - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t, context=None, s=s, mask=mask) - elif self.conditioning_key == 'concat-adm-mask': - # assert s is not None - # mainlogger.info('x & mask:',x.shape,c_concat[0].shape) - if c_concat is not None: - xc = torch.cat([x] + c_concat, dim=1) - else: - xc = x - out = self.diffusion_model(xc, t, context=None, y=s, mask=mask) - elif self.conditioning_key == 'hybrid-adm-mask': - cc = torch.cat(c_crossattn, 1) - if c_concat is not None: - xc = torch.cat([x] + c_concat, dim=1) - else: - xc = x - out = self.diffusion_model(xc, t, context=cc, y=s, mask=mask) - elif self.conditioning_key == 'hybrid-time-adm': # adm means y, e.g., class index - # assert s is not None - assert c_adm is not None - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc, s=s, y=c_adm) - else: - raise NotImplementedError() - - return out \ No newline at end of file diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/schedules/schedule_adamw_cos_10e.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/schedules/schedule_adamw_cos_10e.py deleted file mode 100644 index 4f5c32a3236e5d5000a020c2460991986d61e261..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/schedules/schedule_adamw_cos_10e.py +++ /dev/null @@ -1,21 +0,0 @@ -# optimizer -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict( - type='AdamW', - lr=4e-4, - betas=(0.9, 0.999), - eps=1e-08, - weight_decay=0.01)) -train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=10, val_interval=1) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -# learning policy -param_scheduler = [ - dict( - type='CosineAnnealingLR', - T_max=10, - eta_min=4e-6, - convert_to_iter_based=True) -] diff --git a/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textdet/hiertext_converter.py b/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textdet/hiertext_converter.py deleted file mode 100644 index 9ca0163099c815382fe3362da1b0525d109bc23f..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textdet/hiertext_converter.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import json -import os.path as osp - -import numpy as np -from shapely.geometry import Polygon - -from mmocr.utils import dump_ocr_data - - -def collect_level_info(annotation): - """Collect information from any level in HierText. - - Args: - annotation (dict): dict at each level - - Return: - anno (dict): dict containing annotations - """ - iscrowd = 0 if annotation['legible'] else 1 - vertices = np.array(annotation['vertices']) - polygon = Polygon(vertices) - area = polygon.area - min_x, min_y, max_x, max_y = polygon.bounds - bbox = [min_x, min_y, max_x - min_x, max_y - min_y] - segmentation = [i for j in vertices for i in j] - anno = dict( - iscrowd=iscrowd, - category_id=1, - bbox=bbox, - area=area, - segmentation=[segmentation]) - return anno - - -def collect_hiertext_info(root_path, level, split, print_every=1000): - """Collect the annotation information. - - The annotation format is as the following: - { - "info": { - "date": "release date", - "version": "current version" - }, - "annotations": [ // List of dictionaries, one for each image. - { - "image_id": "the filename of corresponding image.", - "image_width": image_width, // (int) The image width. - "image_height": image_height, // (int) The image height. - "paragraphs": [ // List of paragraphs. - { - "vertices": [[x1, y1], [x2, y2],...,[xn, yn]] - "legible": true - "lines": [ - { - "vertices": [[x1, y1], [x2, y2],...,[x4, y4]] - "text": L - "legible": true, - "handwritten": false - "vertical": false, - "words": [ - { - "vertices": [[x1, y1], [x2, y2],...,[xm, ym]] - "text": "the text content of this word", - "legible": true - "handwritten": false, - "vertical": false, - }, ... - ] - }, ... - ] - }, ... - ] - }, ... - ] - } - - Args: - root_path (str): Root path to the dataset - level (str): Level of annotations, which should be 'word', 'line', - or 'paragraphs' - split (str): Dataset split, which should be 'train' or 'validation' - print_every (int): Print log information per iter - - Returns: - img_info (dict): The dict of the img and annotation information - """ - - annotation_path = osp.join(root_path, 'annotations/' + split + '.jsonl') - if not osp.exists(annotation_path): - raise Exception( - f'{annotation_path} not exists, please check and try again.') - - annotation = json.load(open(annotation_path))['annotations'] - img_infos = [] - for i, img_annos in enumerate(annotation): - if i > 0 and i % print_every == 0: - print(f'{i}/{len(annotation)}') - img_info = {} - img_info['file_name'] = img_annos['image_id'] + '.jpg' - img_info['height'] = img_annos['image_height'] - img_info['width'] = img_annos['image_width'] - img_info['segm_file'] = annotation_path - anno_info = [] - for paragraph in img_annos['paragraphs']: - if level == 'paragraph': - anno = collect_level_info(paragraph) - anno_info.append(anno) - elif level == 'line': - for line in paragraph['lines']: - anno = collect_level_info(line) - anno_info.append(anno) - elif level == 'word': - for line in paragraph['lines']: - for word in line['words']: - anno = collect_level_info(line) - anno_info.append(anno) - img_info.update(anno_info=anno_info) - img_infos.append(img_info) - return img_infos - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Generate training and validation set of HierText ') - parser.add_argument('root_path', help='Root dir path of HierText') - parser.add_argument( - '--level', - default='word', - help='HierText provides three levels of annotation', - choices=['word', 'line', 'paragraph']) - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - root_path = args.root_path - print('Processing training set...') - training_infos = collect_hiertext_info(root_path, args.level, 'train') - dump_ocr_data(training_infos, - osp.join(root_path, 'instances_training.json'), 'textdet') - print('Processing validation set...') - val_infos = collect_hiertext_info(root_path, args.level, 'val') - dump_ocr_data(val_infos, osp.join(root_path, 'instances_val.json'), - 'textdet') - print('Finish') - - -if __name__ == '__main__': - main() diff --git a/spaces/NATSpeech/DiffSpeech/utils/commons/dataset_utils.py b/spaces/NATSpeech/DiffSpeech/utils/commons/dataset_utils.py deleted file mode 100644 index 44c2ca0ce3226fa21bf9d7c7fa889b23ef9b0fa9..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/utils/commons/dataset_utils.py +++ /dev/null @@ -1,247 +0,0 @@ -import os -import sys -import traceback -import types -from functools import wraps -from itertools import chain -import numpy as np -import torch.utils.data -from torch.utils.data import ConcatDataset -from utils.commons.hparams import hparams - - -def collate_1d_or_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): - if len(values[0].shape) == 1: - return collate_1d(values, pad_idx, left_pad, shift_right, max_len, shift_id) - else: - return collate_2d(values, pad_idx, left_pad, shift_right, max_len) - - -def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): - """Convert a list of 1d tensors into a padded 2d tensor.""" - size = max(v.size(0) for v in values) if max_len is None else max_len - res = values[0].new(len(values), size).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if shift_right: - dst[1:] = src[:-1] - dst[0] = shift_id - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) - return res - - -def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None): - """Convert a list of 2d tensors into a padded 3d tensor.""" - size = max(v.size(0) for v in values) if max_len is None else max_len - res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if shift_right: - dst[1:] = src[:-1] - else: - dst.copy_(src) - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) - return res - - -def _is_batch_full(batch, num_tokens, max_tokens, max_sentences): - if len(batch) == 0: - return 0 - if len(batch) == max_sentences: - return 1 - if num_tokens > max_tokens: - return 1 - return 0 - - -def batch_by_size( - indices, num_tokens_fn, max_tokens=None, max_sentences=None, - required_batch_size_multiple=1, distributed=False -): - """ - Yield mini-batches of indices bucketed by size. Batches may contain - sequences of different lengths. - - Args: - indices (List[int]): ordered list of dataset indices - num_tokens_fn (callable): function that returns the number of tokens at - a given index - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - required_batch_size_multiple (int, optional): require batch size to - be a multiple of N (default: 1). - """ - max_tokens = max_tokens if max_tokens is not None else sys.maxsize - max_sentences = max_sentences if max_sentences is not None else sys.maxsize - bsz_mult = required_batch_size_multiple - - if isinstance(indices, types.GeneratorType): - indices = np.fromiter(indices, dtype=np.int64, count=-1) - - sample_len = 0 - sample_lens = [] - batch = [] - batches = [] - for i in range(len(indices)): - idx = indices[i] - num_tokens = num_tokens_fn(idx) - sample_lens.append(num_tokens) - sample_len = max(sample_len, num_tokens) - - assert sample_len <= max_tokens, ( - "sentence at index {} of size {} exceeds max_tokens " - "limit of {}!".format(idx, sample_len, max_tokens) - ) - num_tokens = (len(batch) + 1) * sample_len - - if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): - mod_len = max( - bsz_mult * (len(batch) // bsz_mult), - len(batch) % bsz_mult, - ) - batches.append(batch[:mod_len]) - batch = batch[mod_len:] - sample_lens = sample_lens[mod_len:] - sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 - batch.append(idx) - if len(batch) > 0: - batches.append(batch) - return batches - - -def unpack_dict_to_list(samples): - samples_ = [] - bsz = samples.get('outputs').size(0) - for i in range(bsz): - res = {} - for k, v in samples.items(): - try: - res[k] = v[i] - except: - pass - samples_.append(res) - return samples_ - - -def remove_padding(x, padding_idx=0): - if x is None: - return None - assert len(x.shape) in [1, 2] - if len(x.shape) == 2: # [T, H] - return x[np.abs(x).sum(-1) != padding_idx] - elif len(x.shape) == 1: # [T] - return x[x != padding_idx] - - -def data_loader(fn): - """ - Decorator to make any fx with this use the lazy property - :param fn: - :return: - """ - - wraps(fn) - attr_name = '_lazy_' + fn.__name__ - - def _get_data_loader(self): - try: - value = getattr(self, attr_name) - except AttributeError: - try: - value = fn(self) # Lazy evaluation, done only once. - except AttributeError as e: - # Guard against AttributeError suppression. (Issue #142) - traceback.print_exc() - error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e) - raise RuntimeError(error) from e - setattr(self, attr_name, value) # Memoize evaluation. - return value - - return _get_data_loader - - -class BaseDataset(torch.utils.data.Dataset): - def __init__(self, shuffle): - super().__init__() - self.hparams = hparams - self.shuffle = shuffle - self.sort_by_len = hparams['sort_by_len'] - self.sizes = None - - @property - def _sizes(self): - return self.sizes - - def __getitem__(self, index): - raise NotImplementedError - - def collater(self, samples): - raise NotImplementedError - - def __len__(self): - return len(self._sizes) - - def num_tokens(self, index): - return self.size(index) - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return min(self._sizes[index], hparams['max_frames']) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)) - if self.sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) - - -class BaseConcatDataset(ConcatDataset): - def collater(self, samples): - return self.datasets[0].collater(samples) - - @property - def _sizes(self): - if not hasattr(self, 'sizes'): - self.sizes = list(chain.from_iterable([d._sizes for d in self.datasets])) - return self.sizes - - def size(self, index): - return min(self._sizes[index], hparams['max_frames']) - - def num_tokens(self, index): - return self.size(index) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.datasets[0].shuffle: - indices = np.random.permutation(len(self)) - if self.datasets[0].sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return self.datasets[0].num_workers diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/gated_feedforward.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/gated_feedforward.py deleted file mode 100644 index 11c912885a7b8eb68e6d764653275fb2b5d2de92..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/gated_feedforward.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Keras-based gated feedforward layer.""" -# pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import gin -import tensorflow as tf - - -@tf.keras.utils.register_keras_serializable(package="Text") -@gin.configurable -class GatedFeedforward(tf.keras.layers.Layer): - """Gated linear feedforward layer. - - This layer follows the paper "GLU Variants Improve Transformer" - (https://arxiv.org/abs/2002.05202). In additional, it allows to stack - multiple feedforward blocks and specify the position of dropout layer. - - Arguments: - intermediate_size: Size of the intermediate layer. - intermediate_activation: Activation for the intermediate layer. - dropout: Dropout probability for the output dropout. - use_gate: Whether to use gated linear units. If True, assuming `GELU` as - the activation and omitting bias, will apply - `GEGLU(x, W, V, W_2) = (GEGLU(xW) * xV)W2`; if False, will follow - "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) paper - and apply `FFN(x, W, W_2) = GELU(xW_1)W_2.` - num_blocks: The number of feedforward blocks to stack. Each block contains - a (gated) linear layer and a fully connected layer followed by dropout, - layer norm and residual. - dropout_position: Where to apply the dropout, the value can be either - `before_residual` or `after_residual`. If `before_residual`, will apply - `layer_output = layer_norm(dropout(layer_output) + layer_input)`; - if `after residual`, will apply - `layer_output = dropout(layer_norm(layer_output + layer_input))`. - kernel_initializer: Initializer for dense layer kernels. - bias_initializer: Initializer for dense layer biases. - kernel_regularizer: Regularizer for dense layer kernels. - bias_regularizer: Regularizer for dense layer biases. - activity_regularizer: Regularizer for dense layer activity. - kernel_constraint: Constraint for dense layer kernels. - bias_constraint: Constraint for dense layer kernels. - """ - - def __init__(self, - intermediate_size, - intermediate_activation, - dropout, - use_gate=True, - num_blocks=1, - dropout_position="before_residual", - kernel_initializer="glorot_uniform", - bias_initializer="zeros", - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - **kwargs): - super(GatedFeedforward, self).__init__(**kwargs) - self._intermediate_size = intermediate_size - self._intermediate_activation = intermediate_activation - self._dropout = dropout - self._use_gate = use_gate - self._num_blocks = num_blocks - self._dropout_position = dropout_position - if self._dropout_position not in ("before_residual", "after_residual"): - raise ValueError( - "The dropout_position should be either `before_residual` or" - "`after_residual`, got: %s" % self._dropout_position) - - self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) - self._bias_initializer = tf.keras.initializers.get(bias_initializer) - self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) - self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) - self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) - self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) - self._bias_constraint = tf.keras.constraints.get(bias_constraint) - - def build(self, input_shape): - hidden_size = input_shape.as_list()[-1] - - common_kwargs = dict( - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer, - activity_regularizer=self._activity_regularizer, - kernel_constraint=self._kernel_constraint, - bias_constraint=self._bias_constraint) - self._intermediate_dense = [] - self._intermediate_activation_layers = [] - self._gate_dense = [] - self._output_dense = [] - self._output_dropout = [] - self._output_layer_norm = [] - activation_policy = tf.keras.mixed_precision.experimental.global_policy() - if activation_policy.name == "mixed_bfloat16": - # bfloat16 causes BERT with the LAMB optimizer to not converge - # as well, so we use float32. - # TODO(b/154538392): Investigate this. - activation_policy = tf.float32 - for i in range(self._num_blocks): - self._intermediate_dense.append( - tf.keras.layers.experimental.EinsumDense( - "abc,cd->abd", - output_shape=(None, self._intermediate_size), - bias_axes="d", - name="intermediate_%d" % i, - **common_kwargs)) - self._intermediate_activation_layers.append(tf.keras.layers.Activation( - self._intermediate_activation, dtype=activation_policy)) - if self._use_gate: - self._gate_dense.append( - tf.keras.layers.experimental.EinsumDense( - "abc,cd->abd", - output_shape=(None, self._intermediate_size), - bias_axes="d", - name="gate_%d" % i, - **common_kwargs)) - self._output_dense.append( - tf.keras.layers.experimental.EinsumDense( - "abc,cd->abd", - output_shape=(None, hidden_size), - bias_axes="d", - name="output_%d" % i, - **common_kwargs)) - self._output_dropout.append( - tf.keras.layers.Dropout(rate=self._dropout)) - # Use float32 in layernorm for numeric stability. - self._output_layer_norm.append( - tf.keras.layers.LayerNormalization( - name="output_layer_norm_%d" % i, - axis=-1, - epsilon=1e-12, - dtype=tf.float32)) - - def get_config(self): - config = { - "intermediate_size": - self._intermediate_size, - "intermediate_activation": - self._intermediate_activation, - "dropout": - self._dropout, - "use_gate": - self._use_gate, - "num_blocks": - self._num_blocks, - "dropout_position": - self._dropout_position, - "kernel_initializer": - tf.keras.initializers.serialize(self._kernel_initializer), - "bias_initializer": - tf.keras.initializers.serialize(self._bias_initializer), - "kernel_regularizer": - tf.keras.regularizers.serialize(self._kernel_regularizer), - "bias_regularizer": - tf.keras.regularizers.serialize(self._bias_regularizer), - "activity_regularizer": - tf.keras.regularizers.serialize(self._activity_regularizer), - "kernel_constraint": - tf.keras.constraints.serialize(self._kernel_constraint), - "bias_constraint": - tf.keras.constraints.serialize(self._bias_constraint) - } - base_config = super(GatedFeedforward, self).get_config() - return dict(list(base_config.items()) + list(config.items())) - - def call(self, inputs): - layer_output = inputs - for i in range(self._num_blocks): - layer_input = layer_output - intermediate_output = self._intermediate_dense[i](layer_input) - intermediate_output = self._intermediate_activation_layers[i]( - intermediate_output) - if self._use_gate: - gated_linear = self._gate_dense[i](layer_input) - intermediate_output = intermediate_output * gated_linear - - layer_output = self._output_dense[i](intermediate_output) - if self._dropout_position == "before_residual": - layer_output = self._output_dropout[i](layer_output) - - # During mixed precision training, `layer_input` may be from layer norm. - # If so, it is always fp32. Cast layer_output to fp32 for the subsequent - # add. - if layer_input.dtype == tf.float32: - layer_output = tf.cast(layer_output, tf.float32) - layer_output = self._output_layer_norm[i](layer_output + layer_input) - if self._dropout_position == "after_residual": - layer_output = self._output_dropout[i](layer_output) - - return layer_output diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/nhnet/README.md b/spaces/NCTCMumbai/NCTC/models/official/nlp/nhnet/README.md deleted file mode 100644 index 14c55636ab52b4582cb6b12e88a282c7adbb059e..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/nhnet/README.md +++ /dev/null @@ -1,168 +0,0 @@ -# Multi-doc News Headline Generation Model: NHNet - -This repository contains TensorFlow 2.x implementation for NHNet [[1]](#1) as -well as instructions for producing the data we described in the paper. - -## Introduction - -NHNet is a multi-doc news headline generation model. It extends a standard -Transformer-based encoder-decoder model to multi-doc setting and relies on an -article-level attention layer to capture information common to most (if not all) -input news articles in a news cluster or story, and provide robustness against -potential outliers in the input due to clustering quality. - -Our academic paper [[1]](#1) which describes NHNet in detail can be found here: -https://arxiv.org/abs/2001.09386. - -## Dataset - -**Raw Data:** One can [download](https://github.com/google-research-datasets/NewSHead) -our multi-doc headline dataset which -contains 369,940 news stories and 932,571 unique URLs. We split these stories -into train (359,940 stories), validation (5,000 stories) and test set (5,000 -stories) by timestamp. - -More information, please checkout: -https://github.com/google-research-datasets/NewSHead - -### Crawling - -Unfortunately, we will not be able to release the pre-processed dataset that is -exactly used in the paper. Users need to crawl the URLs and the recommended -pre-processing is using an open-sourced library to download and parse the news -content including title and leading paragraphs. For ease of this process, we -provide a config of [news-please](https://github.com/fhamborg/news-please) that -will crawl and extract news articles on a local machine. - -First, install the `news-please` CLI (requires python 3.x) -```shell -$ pip3 install news-please -``` - -Next, run the crawler with our provided [config and URL list](https://github.com/google-research-datasets/NewSHead/releases) - -```shell -# Sets to path of the downloaded data folder. -$ DATA_FOLDER=/path/to/downloaded_dataset - -# Uses CLI interface to crawl. We assume news_please subfolder contains the -# decompressed config.cfg and sitelist.hjson. -$ news-please -c $DATA_FOLDER/news_please -``` -By default, it will store crawled -articles under `/tmp/nhnet/`. To terminate the process press `CTRL+C`. - -The crawling may take some days (48 hours in our test) and it depends on the -network environment and #threads set in the config. As the crawling tool won't -stop automatically, it is not straightforward to check the progress. We suggest -to terminate the job if there are no new articles crawled in a short time period -(e.g., 10 minutes) by running -```shell -$ find /tmp/nhnet -type f | wc -l -``` -Please note that it is expected that some URLs are no longer available on the -web as time goes by. - -### Data Processing - -Given the crawled articles under `/tmp/nhnet/`, we would like to transform these -textual articles into a set of `TFRecord` files containing serialized -tensorflow.Example protocol buffers, with feature keys following the BERT -[[2]](#2) tradition but is extended for multiple text segments. We will later -use these processed TFRecords for training and evaluation. - -To do this, please first download a [BERT pretrained checkpoint](https://github.com/tensorflow/models/tree/master/official/nlp/bert#access-to-pretrained-checkpoints) -(`BERT-Base,Uncased` preferred for efficiency) and decompress the `tar.gz` file. -We need the vocabulary file and later use the checkpoint for NHNet -initialization. - -Next, we can run the following data preprocess script which may take a few hours - to read files and tokenize article content. - - -```shell -# Recall that we use DATA_FOLDER=/path/to/downloaded_dataset. -$ python3 raw_data_preprocess.py \ - -crawled_articles=/tmp/nhnet \ - -vocab=/path/to/bert_checkpoint/vocab.txt \ - -do_lower_case=True \ - -len_title=15 \ - -len_passage=200 \ - -max_num_articles=5 \ - -data_folder=$DATA_FOLDER -``` - -This python script will export processed train/valid/eval files under -`$DATA_FOLDER/processed/`. - -## Training - -Please first install TensorFlow 2 and Tensorflow Model Garden following the -[requirments section](https://github.com/tensorflow/models/tree/master/official#requirements). - -### CPU/GPU -```shell -$ python3 trainer.py \ - --mode=train_and_eval \ - --vocab=/path/to/bert_checkpoint/vocab.txt \ - --init_checkpoint=/path/to/bert_checkpoint/bert_model.ckpt \ - --params_override='init_from_bert2bert=false' \ - --train_file_pattern=$DATA_FOLDER/processed/train.tfrecord* \ - --model_dir=/path/to/output/model \ - --len_title=15 \ - --len_passage=200 \ - --max_num_articles=5 \ - --model_type=nhnet \ - --train_batch_size=16 \ - --train_steps=10000 \ - --steps_per_loop=1 \ - --checkpoint_interval=100 -``` - -### TPU -```shell -$ python3 trainer.py \ - --mode=train_and_eval \ - --vocab=/path/to/bert_checkpoint/vocab.txt \ - --init_checkpoint=/path/to/bert_checkpoint/bert_model.ckpt \ - --params_override='init_from_bert2bert=false' \ - --train_file_pattern=$DATA_FOLDER/processed/train.tfrecord* \ - --model_dir=/path/to/output/model \ - --len_title=15 \ - --len_passage=200 \ - --max_num_articles=5 \ - --model_type=nhnet \ - --train_batch_size=1024 \ - --train_steps=10000 \ - --steps_per_loop=1000 \ - --checkpoint_interval=1000 \ - --distribution_strategy=tpu \ - --tpu=grpc://${TPU_IP_ADDRESS}:8470 -``` -In the paper, we train more than 10k steps with batch size set as 1024 with -TPU-v3-64. - -Note that, `trainer.py` also supports `train` mode and continuous `eval` mode. -For large scale TPU training, we recommend the have a process running the -`train` mode and another process running the continuous `eval` mode which can -runs on GPUs. -This is the setting we commonly used for large-scale experiments, because `eval` -will be non-blocking to the expensive training load. - -### Metrics -**Note: the metrics reported by `evaluation.py` are approximated on -word-piece level rather than the real string tokens. Some metrics like BLEU -scores can be off.** - -We will release a colab to evaluate results on string-level soon. - -## References - -<a id="1">[1]</a> Xiaotao Gu, Yuning Mao, Jiawei Han, Jialu Liu, You Wu, Cong -Yu, Daniel Finnie, Hongkun Yu, Jiaqi Zhai and Nicholas Zukoski "Generating -Representative Headlines for News Stories": https://arxiv.org/abs/2001.09386. -World Wide Web Conf. (WWW’2020). - -<a id="2">[2]</a> Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina -Toutanova "BERT: Pre-training of Deep Bidirectional Transformers for Language -Understanding": https://arxiv.org/abs/1810.04805. diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/dataloader_utils.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/dataloader_utils.py deleted file mode 100644 index da82203511da50393a352bf75ee56f25c6626c05..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/dataloader_utils.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility functions for dataloader.""" - -import tensorflow as tf - -from official.vision.detection.utils import input_utils - - -def process_source_id(source_id): - """Processes source_id to the right format.""" - if source_id.dtype == tf.string: - source_id = tf.cast(tf.strings.to_number(source_id), tf.int64) - with tf.control_dependencies([source_id]): - source_id = tf.cond( - pred=tf.equal(tf.size(input=source_id), 0), - true_fn=lambda: tf.cast(tf.constant(-1), tf.int64), - false_fn=lambda: tf.identity(source_id)) - return source_id - - -def pad_groundtruths_to_fixed_size(gt, n): - """Pads the first dimension of groundtruths labels to the fixed size.""" - gt['boxes'] = input_utils.pad_to_fixed_size(gt['boxes'], n, -1) - gt['is_crowds'] = input_utils.pad_to_fixed_size(gt['is_crowds'], n, 0) - gt['areas'] = input_utils.pad_to_fixed_size(gt['areas'], n, -1) - gt['classes'] = input_utils.pad_to_fixed_size(gt['classes'], n, -1) - return gt diff --git a/spaces/Naveejnk/MyGenAIChatBot/README.md b/spaces/Naveejnk/MyGenAIChatBot/README.md deleted file mode 100644 index 978fc88177d4ef1e6fa72a273191c68190578b33..0000000000000000000000000000000000000000 --- a/spaces/Naveejnk/MyGenAIChatBot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyGenAIChatBot -emoji: 😻 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nephele/bert-vits2-multi-voice/losses.py b/spaces/Nephele/bert-vits2-multi-voice/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/Nephele/bert-vits2-multi-voice/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/NoCrypt/mikuTTS/vc_infer_pipeline.py b/spaces/NoCrypt/mikuTTS/vc_infer_pipeline.py deleted file mode 100644 index ed2aacd1866379563006e3cf4dd40472f7ab4692..0000000000000000000000000000000000000000 --- a/spaces/NoCrypt/mikuTTS/vc_infer_pipeline.py +++ /dev/null @@ -1,451 +0,0 @@ -import os -import sys -import traceback -from functools import lru_cache -from time import time as ttime - -import faiss -import librosa -import numpy as np -import parselmouth -import pyworld -import torch -import torch.nn.functional as F -import torchcrepe -from scipy import signal - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - model = "full" - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/wav2vec_criterion.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/wav2vec_criterion.py deleted file mode 100644 index e04786cc3b75517cefd06303f98f8536f9279311..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/wav2vec_criterion.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass, field -from typing import List, Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from fairseq.logging.meters import safe_round -from fairseq.utils import is_xla_tensor - - -@dataclass -class Wav2VecCriterionConfig(FairseqDataclass): - infonce: bool = field( - default=False, - metadata={ - "help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)" - }, - ) - loss_weights: Optional[List[float]] = field( - default=None, - metadata={"help": "weights for additional loss terms (not first one)"}, - ) - log_keys: List[str] = field( - default_factory=lambda: [], - metadata={"help": "output keys to log"}, - ) - -@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig) -class Wav2vecCriterion(FairseqCriterion): - def __init__(self, task, infonce=False, loss_weights=None, log_keys=None): - super().__init__(task) - self.infonce = infonce - self.loss_weights = loss_weights - self.log_keys = [] if log_keys is None else log_keys - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - logits = model.get_logits(net_output).float() - target = model.get_targets(sample, net_output) - self.xla = is_xla_tensor(logits) - - # XXX: handle weights on xla. - weights = None - if hasattr(model, "get_target_weights") and not self.infonce: - weights = model.get_target_weights(target, net_output) - if torch.is_tensor(weights): - weights = weights.float() - - losses = [] - - reduction = "none" if ((not reduce) or self.xla) else "sum" - if self.infonce: - loss = F.cross_entropy(logits, target, reduction=reduction) - else: - loss = F.binary_cross_entropy_with_logits( - logits, target.float(), weights, reduction=reduction - ) - - if self.xla: - # tpu-comment: since dynamic shapes lead to recompilations on xla, - # we don't shrink tensors using mask_indices. - # Instead, we use mask indices to adjust loss. - mi = ( - sample['net_input']['mask_indices'] - .transpose(0, 1) # logits are transposed in `model.get_logits` - .reshape(logits.size(0)) - ) - loss = (loss * mi).sum() if reduce else (loss * mi) - - if 'sample_size' in sample: - sample_size = sample['sample_size'] - elif 'mask_indices' in sample['net_input']: - sample_size = sample['net_input']['mask_indices'].sum() - else: - sample_size = target.numel() if self.infonce else target.long().sum().item() - losses.append(loss.detach().clone()) - - if self.loss_weights is not None: - assert hasattr(model, "get_extra_losses") - extra_losses = model.get_extra_losses(net_output) - if torch.is_tensor(extra_losses): - extra_losses = [extra_losses] - if len(self.loss_weights) == 1 and len(extra_losses) != 1: - self.loss_weights = [self.loss_weights[0]] * len(extra_losses) - assert len(extra_losses) == len( - self.loss_weights - ), f"{len(extra_losses)}, {len(self.loss_weights)}" - for p, coef in zip(extra_losses, self.loss_weights): - if coef != 0 and p is not None: - p = coef * p.float() * sample_size - loss += p - losses.append(p) - - logging_output = { - "loss": loss.item() if (reduce and not self.xla) else loss.detach(), - "ntokens": sample_size, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - } - - for lk in self.log_keys: - # Only store "logits" and "target" for computing MAP and MAUC - # during validation - if lk == "logits": - if not self.training: - logging_output["logits"] = logits.cpu().numpy() - elif lk == "target": - if not self.training: - # If the targets have been mixed with the predictions of - # teacher models, find the original targets - if hasattr(model, "get_original_targets"): - original_target = model.get_original_targets(sample, net_output) - else: - original_target = target - logging_output["target"] = original_target.cpu().numpy() - elif lk in net_output: - value = net_output[lk] - if not is_xla_tensor(value): - value = float(value) - logging_output[lk] = value - - if len(losses) > 1: - for i, l in enumerate(losses): - logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach() - - if self.infonce: - with torch.no_grad(): - if logits.numel() == 0: - corr = 0 - count = 0 - else: - assert logits.dim() > 1, logits.shape - max = logits.argmax(-1) == 0 - min = logits.argmin(-1) == 0 - if is_xla_tensor(logits): - max, min = max * mi, min * mi - both = max & min - corr = max.long().sum() - both.long().sum() - count = mi.sum() - else: - both = max & min - corr = max.long().sum().item() - both.long().sum().item() - count = float(max.numel()) - - logging_output["correct"] = corr - logging_output["count"] = count - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - nsentences = utils.item( - sum(log.get("nsentences", 0) for log in logging_outputs) - ) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3 - ) - metrics.log_scalar("ntokens", ntokens) - metrics.log_scalar("nsentences", nsentences) - - correct = sum(log.get("correct", 0) for log in logging_outputs) - metrics.log_scalar("_correct", correct) - - total = sum(log.get("count", 0) for log in logging_outputs) - metrics.log_scalar("_total", total) - - if total > 0: - metrics.log_derived( - "accuracy", - lambda meters: safe_round( - meters["_correct"].sum / meters["_total"].sum, 5 - ) - if meters["_total"].sum > 0 - else float("nan"), - ) - - builtin_keys = { - "loss", - "ntokens", - "nsentences", - "sample_size", - "correct", - "count", - } - - for k in logging_outputs[0]: - if k not in builtin_keys: - val = sum(log.get(k, 0) for log in logging_outputs) - if k.startswith("loss"): - metrics.log_scalar( - k, val / (sample_size or 1) / math.log(2), sample_size, round=3 - ) - else: - metrics.log_scalar(k, val / len(logging_outputs), round=3) - - # FIXME: revert when gather based xla reduction is implemented - #@staticmethod - #def logging_outputs_can_be_summed() -> bool: - def logging_outputs_can_be_summed(self) -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - # XXX: Gather based reduction not implemented for xla yet. - # So we fall to sum based reduction for xla. - return self.xla diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py deleted file mode 100644 index a28cd607a096844438f6a3ba6b007d94d67d1bc8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import csv -from pathlib import Path - - -def main(args): - """ - `uid syn ref text` - """ - in_root = Path(args.generation_root).resolve() - ext = args.audio_format - with open(args.audio_manifest) as f, open(args.output_path, "w") as f_out: - reader = csv.DictReader( - f, delimiter="\t", quotechar=None, doublequote=False, - lineterminator="\n", quoting=csv.QUOTE_NONE - ) - header = ["id", "syn", "ref", "text", "speaker"] - f_out.write("\t".join(header) + "\n") - for row in reader: - dir_name = f"{ext}_{args.sample_rate}hz_{args.vocoder}" - id_ = row["id"] - syn = (in_root / dir_name / f"{id_}.{ext}").as_posix() - ref = row["audio"] - if args.use_resynthesized_target: - ref = (in_root / f"{dir_name}_tgt" / f"{id_}.{ext}").as_posix() - sample = [id_, syn, ref, row["tgt_text"], row["speaker"]] - f_out.write("\t".join(sample) + "\n") - print(f"wrote evaluation file to {args.output_path}") - - -if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser() - parser.add_argument( - "--generation-root", help="output directory for generate_waveform.py" - ) - parser.add_argument( - "--audio-manifest", - help="used to determine the original utterance ID and text" - ) - parser.add_argument( - "--output-path", help="path to output evaluation spec file" - ) - parser.add_argument( - "--use-resynthesized-target", action="store_true", - help="use resynthesized reference instead of the original audio" - ) - parser.add_argument("--vocoder", type=str, default="griffin_lim") - parser.add_argument("--sample-rate", type=int, default=22_050) - parser.add_argument("--audio-format", type=str, default="wav") - args = parser.parse_args() - - main(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/dynamic_convolution.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/dynamic_convolution.py deleted file mode 100644 index 0121d453b9e026f5128dd41fce691aa1b4486448..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/dynamic_convolution.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.modules.fairseq_dropout import FairseqDropout - -from .unfold import unfold1d - - -def DynamicConv( - input_size, - kernel_size=1, - padding_l=None, - num_heads=1, - weight_dropout=0.0, - weight_softmax=False, - renorm_padding=False, - bias=False, - conv_bias=False, - query_size=None, - in_proj=False, -): - if torch.cuda.is_available(): - try: - from fairseq.modules.dynamicconv_layer import DynamicconvLayer - - return DynamicconvLayer( - input_size, - kernel_size=kernel_size, - padding_l=padding_l, - num_heads=num_heads, - weight_dropout=weight_dropout, - weight_softmax=weight_softmax, - renorm_padding=renorm_padding, - bias=bias, - conv_bias=conv_bias, - query_size=query_size, - ) - except ImportError as e: - print(e) - return DynamicConv1dTBC( - input_size, - kernel_size=kernel_size, - padding_l=padding_l, - num_heads=num_heads, - weight_dropout=weight_dropout, - weight_softmax=weight_softmax, - renorm_padding=renorm_padding, - bias=bias, - conv_bias=conv_bias, - query_size=query_size, - ) - - -def Linear(in_features, out_features, bias=True): - m = nn.Linear(in_features, out_features, bias) - nn.init.xavier_uniform_(m.weight) - if bias: - nn.init.constant_(m.bias, 0.0) - return m - - -@with_incremental_state -class DynamicConv1dTBC(nn.Module): - """Dynamic lightweight convolution taking T x B x C inputs - Args: - input_size: # of channels of the input - kernel_size: convolution channels - padding_l: padding to the left when using "same" padding - num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) - weight_dropout: the drop rate of the DropConnect to drop the weight - weight_softmax: normalize the weight with softmax before the convolution - renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) - bias: use bias - conv_bias: bias of the convolution - query_size: specified when feeding a different input as the query - in_proj: project the input and generate the filter together - - Shape: - Input: TxBxC, i.e. (timesteps, batch_size, input_size) - Output: TxBxC, i.e. (timesteps, batch_size, input_size) - - Attributes: - weight: the learnable weights of the module of shape - `(num_heads, 1, kernel_size)` - bias: the learnable bias of the module of shape `(input_size)` - """ - - def __init__( - self, - input_size, - kernel_size=1, - padding_l=None, - num_heads=1, - weight_dropout=0.0, - weight_softmax=False, - renorm_padding=False, - bias=False, - conv_bias=False, - query_size=None, - in_proj=False, - ): - super().__init__() - self.input_size = input_size - self.query_size = input_size if query_size is None else query_size - self.kernel_size = kernel_size - self.padding_l = padding_l - self.num_heads = num_heads - self.weight_dropout_module = FairseqDropout( - weight_dropout, module_name=self.__class__.__name__ - ) - self.weight_softmax = weight_softmax - self.renorm_padding = renorm_padding - - if in_proj: - self.weight_linear = Linear( - self.input_size, self.input_size + num_heads * kernel_size * 1 - ) - else: - self.weight_linear = Linear( - self.query_size, num_heads * kernel_size * 1, bias=bias - ) - if conv_bias: - self.conv_bias = nn.Parameter(torch.Tensor(input_size)) - else: - self.conv_bias = None - self.reset_parameters() - - @property - def in_proj(self): - return ( - self.weight_linear.out_features - == self.input_size + self.num_heads * self.kernel_size - ) - - def reset_parameters(self): - self.weight_linear.reset_parameters() - if self.conv_bias is not None: - nn.init.constant_(self.conv_bias, 0.0) - - def forward(self, x, incremental_state=None, query=None, unfold=None): - """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C - args: - x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) - incremental_state: A dict to keep the state - unfold: unfold the input or not. If not, we use the matrix trick instead - query: use the specified query to predict the conv filters - """ - unfold = ( - x.size(0) > 512 if unfold is None else unfold - ) # use unfold mode as default for long sequence to save memory - unfold = unfold or (incremental_state is not None) - assert query is None or not self.in_proj - - if query is None: - query = x - if unfold: - output = self._forward_unfolded(x, incremental_state, query) - else: - output = self._forward_expanded(x, incremental_state, query) - - if self.conv_bias is not None: - output = output + self.conv_bias.view(1, 1, -1) - return output - - def _forward_unfolded(self, x, incremental_state, query): - """The conventional implementation of convolutions. - Unfolding the input by having a window shifting to the right.""" - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - R = C // H - assert R * H == C == self.input_size - - if self.in_proj: - proj = self.weight_linear(x) - x = proj.narrow(2, 0, self.input_size).contiguous() - weight = ( - proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) - ) - else: - weight = self.weight_linear(query).view(T * B * H, -1) - - # renorm_padding is only implemented in _forward_expanded - assert not self.renorm_padding or incremental_state is not None - - if incremental_state is not None: - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is None: - input_buffer = x.new() - x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) - if self.kernel_size > 1: - self._set_input_buffer( - incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] - ) - x_unfold = x_unfold.view(T * B * H, R, -1) - else: - padding_l = self.padding_l - if K > T and padding_l == K - 1: - weight = weight.narrow(1, K - T, T) - K, padding_l = T, T - 1 - # unfold the input: T x B x C --> T' x B x C x K - x_unfold = unfold1d(x, K, padding_l, 0) - x_unfold = x_unfold.view(T * B * H, R, K) - - if self.weight_softmax and not self.renorm_padding: - weight = F.softmax(weight, dim=1) - weight = weight.narrow(1, 0, K) - - if incremental_state is not None: - weight = weight[:, -x_unfold.size(2) :] - K = weight.size(1) - - if self.weight_softmax and self.renorm_padding: - weight = F.softmax(weight, dim=1) - - weight = self.weight_dropout_module(weight, inplace=False) - - output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 - output = output.view(T, B, C) - return output - - def _forward_expanded(self, x, incremental_stat, query): - """Turn the convolution filters into band matrices and do matrix multiplication. - This is faster when the sequence is short, but less memory efficient. - This is not used in the decoder during inference. - """ - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - R = C // H - assert R * H == C == self.input_size - if self.in_proj: - proj = self.weight_linear(x) - x = proj.narrow(2, 0, self.input_size).contiguous() - weight = ( - proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) - ) - else: - weight = self.weight_linear(query).view(T * B * H, -1) - - if not self.renorm_padding: - if self.weight_softmax: - weight = F.softmax(weight, dim=1) - weight = self.weight_dropout_module(weight, inplace=False) - weight = weight.narrow(1, 0, K).contiguous() - weight = weight.view(T, B * H, K).transpose(0, 1) - - x = x.view(T, B * H, R).transpose(0, 1) - if self.weight_softmax and self.renorm_padding: - # turn the convolution filters into band matrices - weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) - weight_expanded.as_strided( - (B * H, T, K), (T * (T + K - 1), T + K, 1) - ).copy_(weight) - weight_expanded = weight_expanded.narrow(2, self.padding_l, T) - # normalize the weight over valid positions like self-attention - weight_expanded = F.softmax(weight_expanded, dim=2) - weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) - else: - P = self.padding_l - # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length - if K > T and P == K - 1: - weight = weight.narrow(2, K - T, T) - K, P = T, T - 1 - # turn the convolution filters into band matrices - weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) - weight_expanded.as_strided( - (B * H, T, K), (T * (T + K - 1), T + K, 1) - ).copy_(weight) - weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T - output = torch.bmm(weight_expanded, x) - output = output.transpose(0, 1).contiguous().view(T, B, C) - return output - - def reorder_incremental_state(self, incremental_state, new_order): - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - input_buffer = input_buffer.index_select(1, new_order) - self._set_input_buffer(incremental_state, input_buffer) - - def _get_input_buffer(self, incremental_state): - return utils.get_incremental_state(self, incremental_state, "input_buffer") - - def _set_input_buffer(self, incremental_state, new_buffer): - return utils.set_incremental_state( - self, incremental_state, "input_buffer", new_buffer - ) - - def extra_repr(self): - s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}".format( - self.input_size, - self.kernel_size, - self.padding_l, - self.num_heads, - self.weight_softmax, - self.conv_bias is not None, - self.renorm_padding, - self.in_proj, - ) - - if self.query_size != self.input_size: - s += ", query_size={}".format(self.query_size) - if self.weight_dropout_module.p > 0.0: - s += ", weight_dropout={}".format(self.weight_dropout_module.p) - return s diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/quantization_utils.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/quantization_utils.py deleted file mode 100644 index 11fc414c852b199b80a569bf024272535929abcc..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/quantization_utils.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -from fairseq.modules.quantization import pq, quantization_options, scalar -from omegaconf import DictConfig - - -logger = logging.getLogger(__name__) - - -def quantize_model_scalar(model, model_cfg: DictConfig): - quant_noise_scalar = getattr(model_cfg, "quant_noise_scalar", 0) or 0 - if quant_noise_scalar > 0: - # quantize_model edits the model in place - scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000) - return model - - -class Quantizer(object): - def __init__(self, config_path, max_epoch, max_update): - try: - import yaml - except ImportError: - raise ImportError("Please install yaml with: pip install yaml") - - # parse config - if config_path: - with open(config_path) as config_file: - config = quantization_options.parse_config_yaml( - yaml.safe_load(config_file) - ) - else: - config = quantization_options.parse_config_yaml({}) - - self.n_centroids_config = config["n_centroids"] - self.block_sizes_config = config["block_sizes"] - self.layers_to_quantize = config["layers_to_quantize"] - - # We assume that training will run for a fixed number of epochs - # (or updates) and that we should train for equal durations - # between iterations of PQ. - num_iterations = len(self.layers_to_quantize) - if max_epoch > 0: - assert max_epoch % num_iterations == 0, ( - "for iterative PQ, --max-epoch (={}) must be evenly divisible by " - "len(layers_to_quantize) (={})".format(max_epoch, num_iterations) - ) - self.epoch_schedule = max_epoch // num_iterations - else: - self.epoch_schedule = None - if max_update > 0: - assert max_update % num_iterations == 0, ( - "for iterative PQ, --max-update (={}) must be evenly divisible by " - "len(layers_to_quantize) (={})".format(max_update, num_iterations) - ) - self.update_schedule = max_update // num_iterations - else: - self.update_schedule = None - assert (self.epoch_schedule is not None) ^ ( - self.update_schedule is not None - ), "for iterative PQ, cannot specify both --max-update and --max-epoch" - - # 0 is a special value for quantization step, which will force - # the first call to begin_epoch() to call step() - self.quantization_step = 0 - - def set_trainer(self, trainer): - self.trainer = trainer - self.size_tracker = pq.SizeTracker(self.trainer.get_model()) - - def step(self): - """Move to the next stage of quantization.""" - if self.quantization_step >= len(self.layers_to_quantize): - # Maybe we just finished the last training step or we loaded - # a checkpoint for an iterative PQ model which previously - # finished training. Either way, don't quantize again. - return - - logger.info( - "quantizing model (step={}; layers_to_quantize[step]={})".format( - self.quantization_step, self.layers_to_quantize[self.quantization_step] - ) - ) - quantized_layers = pq.quantize_model_( - self.trainer.get_model(), - self.size_tracker, - self.layers_to_quantize, - self.block_sizes_config, - self.n_centroids_config, - step=self.quantization_step, - ) - logger.info("quantized layers: {}".format(quantized_layers)) - logger.info(self.size_tracker) - - self.quantization_step += 1 - - # reintialize the Trainer since model parameters have changed - self.trainer.reinitialize() - - def begin_epoch(self, epoch): - """Called at the beginning of each epoch (epochs start at 1).""" - if ( - ( - self.epoch_schedule is not None - and epoch > 0 - and (epoch - 1) % self.epoch_schedule == 0 - ) - # we always step once in the beginning, even if using - # update-based quantization - or self.quantization_step == 0 - ): - self.step() - - def step_update(self, num_updates): - """Called at the end of each step.""" - if ( - self.update_schedule is not None - and num_updates > 0 - and num_updates % self.update_schedule == 0 - ): - self.step() - - def state_dict(self): - return { - "n_centroids_config": self.n_centroids_config, - "block_sizes_config": self.block_sizes_config, - "layers_to_quantize": self.layers_to_quantize, - "epoch_schedule": self.epoch_schedule, - "update_schedule": self.update_schedule, - "quantization_step": self.quantization_step, - } - - def load_state_dict(self, state_dict): - self.n_centroids_config = state_dict["n_centroids_config"] - self.block_sizes_config = state_dict["block_sizes_config"] - self.layers_to_quantize = state_dict["layers_to_quantize"] - self.epoch_schedule = state_dict["epoch_schedule"] - self.update_schedule = state_dict["update_schedule"] - self.quantization_step = state_dict["quantization_step"] diff --git a/spaces/OlaWod/FreeVC/speaker_encoder/visualizations.py b/spaces/OlaWod/FreeVC/speaker_encoder/visualizations.py deleted file mode 100644 index ec00fc64d6e9fda2bb8e613531066ac824df1451..0000000000000000000000000000000000000000 --- a/spaces/OlaWod/FreeVC/speaker_encoder/visualizations.py +++ /dev/null @@ -1,178 +0,0 @@ -from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from datetime import datetime -from time import perf_counter as timer -import matplotlib.pyplot as plt -import numpy as np -# import webbrowser -import visdom -import umap - -colormap = np.array([ - [76, 255, 0], - [0, 127, 70], - [255, 0, 0], - [255, 217, 38], - [0, 135, 255], - [165, 0, 165], - [255, 167, 255], - [0, 255, 255], - [255, 96, 38], - [142, 76, 0], - [33, 0, 127], - [0, 0, 0], - [183, 183, 183], -], dtype=np.float) / 255 - - -class Visualizations: - def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False): - # Tracking data - self.last_update_timestamp = timer() - self.update_every = update_every - self.step_times = [] - self.losses = [] - self.eers = [] - print("Updating the visualizations every %d steps." % update_every) - - # If visdom is disabled TODO: use a better paradigm for that - self.disabled = disabled - if self.disabled: - return - - # Set the environment name - now = str(datetime.now().strftime("%d-%m %Hh%M")) - if env_name is None: - self.env_name = now - else: - self.env_name = "%s (%s)" % (env_name, now) - - # Connect to visdom and open the corresponding window in the browser - try: - self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True) - except ConnectionError: - raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to " - "start it.") - # webbrowser.open("http://localhost:8097/env/" + self.env_name) - - # Create the windows - self.loss_win = None - self.eer_win = None - # self.lr_win = None - self.implementation_win = None - self.projection_win = None - self.implementation_string = "" - - def log_params(self): - if self.disabled: - return - from speaker_encoder import params_data - from speaker_encoder import params_model - param_string = "<b>Model parameters</b>:<br>" - for param_name in (p for p in dir(params_model) if not p.startswith("__")): - value = getattr(params_model, param_name) - param_string += "\t%s: %s<br>" % (param_name, value) - param_string += "<b>Data parameters</b>:<br>" - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - param_string += "\t%s: %s<br>" % (param_name, value) - self.vis.text(param_string, opts={"title": "Parameters"}) - - def log_dataset(self, dataset: SpeakerVerificationDataset): - if self.disabled: - return - dataset_string = "" - dataset_string += "<b>Speakers</b>: %s\n" % len(dataset.speakers) - dataset_string += "\n" + dataset.get_logs() - dataset_string = dataset_string.replace("\n", "<br>") - self.vis.text(dataset_string, opts={"title": "Dataset"}) - - def log_implementation(self, params): - if self.disabled: - return - implementation_string = "" - for param, value in params.items(): - implementation_string += "<b>%s</b>: %s\n" % (param, value) - implementation_string = implementation_string.replace("\n", "<br>") - self.implementation_string = implementation_string - self.implementation_win = self.vis.text( - implementation_string, - opts={"title": "Training implementation"} - ) - - def update(self, loss, eer, step): - # Update the tracking data - now = timer() - self.step_times.append(1000 * (now - self.last_update_timestamp)) - self.last_update_timestamp = now - self.losses.append(loss) - self.eers.append(eer) - print(".", end="") - - # Update the plots every <update_every> steps - if step % self.update_every != 0: - return - time_string = "Step time: mean: %5dms std: %5dms" % \ - (int(np.mean(self.step_times)), int(np.std(self.step_times))) - print("\nStep %6d Loss: %.4f EER: %.4f %s" % - (step, np.mean(self.losses), np.mean(self.eers), time_string)) - if not self.disabled: - self.loss_win = self.vis.line( - [np.mean(self.losses)], - [step], - win=self.loss_win, - update="append" if self.loss_win else None, - opts=dict( - legend=["Avg. loss"], - xlabel="Step", - ylabel="Loss", - title="Loss", - ) - ) - self.eer_win = self.vis.line( - [np.mean(self.eers)], - [step], - win=self.eer_win, - update="append" if self.eer_win else None, - opts=dict( - legend=["Avg. EER"], - xlabel="Step", - ylabel="EER", - title="Equal error rate" - ) - ) - if self.implementation_win is not None: - self.vis.text( - self.implementation_string + ("<b>%s</b>" % time_string), - win=self.implementation_win, - opts={"title": "Training implementation"}, - ) - - # Reset the tracking - self.losses.clear() - self.eers.clear() - self.step_times.clear() - - def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None, - max_speakers=10): - max_speakers = min(max_speakers, len(colormap)) - embeds = embeds[:max_speakers * utterances_per_speaker] - - n_speakers = len(embeds) // utterances_per_speaker - ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker) - colors = [colormap[i] for i in ground_truth] - - reducer = umap.UMAP() - projected = reducer.fit_transform(embeds) - plt.scatter(projected[:, 0], projected[:, 1], c=colors) - plt.gca().set_aspect("equal", "datalim") - plt.title("UMAP projection (step %d)" % step) - if not self.disabled: - self.projection_win = self.vis.matplot(plt, win=self.projection_win) - if out_fpath is not None: - plt.savefig(out_fpath) - plt.clf() - - def save(self): - if not self.disabled: - self.vis.save([self.env_name]) - \ No newline at end of file diff --git a/spaces/OpenGVLab/DragGAN/drag_gan.py b/spaces/OpenGVLab/DragGAN/drag_gan.py deleted file mode 100644 index 33344f25c2d210bf491c692ac2e328e231a32472..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/DragGAN/drag_gan.py +++ /dev/null @@ -1,236 +0,0 @@ -import copy -import os -import random -import urllib.request - -import torch -import torch.nn.functional as FF -import torch.optim -from torchvision import utils -from tqdm import tqdm - -from stylegan2.model import Generator - - -class DownloadProgressBar(tqdm): - def update_to(self, b=1, bsize=1, tsize=None): - if tsize is not None: - self.total = tsize - self.update(b * bsize - self.n) - - -def get_path(base_path): - BASE_DIR = os.path.join('checkpoints') - - save_path = os.path.join(BASE_DIR, base_path) - if not os.path.exists(save_path): - url = f"https://huggingface.co/aaronb/StyleGAN2/resolve/main/{base_path}" - print(f'{base_path} not found') - print('Try to download from huggingface: ', url) - os.makedirs(os.path.dirname(save_path), exist_ok=True) - download_url(url, save_path) - print('Downloaded to ', save_path) - return save_path - - -def download_url(url, output_path): - with DownloadProgressBar(unit='B', unit_scale=True, - miniters=1, desc=url.split('/')[-1]) as t: - urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to) - - -class CustomGenerator(Generator): - def prepare( - self, - styles, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, f"noise_{i}") for i in range(self.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.n_latent - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - - else: - latent = styles[0] - - else: - if inject_index is None: - inject_index = random.randint(1, self.n_latent - 1) - - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - - return latent, noise - - def generate( - self, - latent, - noise, - ): - out = self.input(latent) - out = self.conv1(out, latent[:, 0], noise=noise[0]) - - skip = self.to_rgb1(out, latent[:, 1]) - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - if out.shape[-1] == 256: F = out - i += 2 - - image = skip - F = FF.interpolate(F, image.shape[-2:], mode='bilinear') - return image, F - - -def stylegan2( - size=1024, - channel_multiplier=2, - latent=512, - n_mlp=8, - ckpt='stylegan2-ffhq-config-f.pt' -): - g_ema = CustomGenerator(size, latent, n_mlp, channel_multiplier=channel_multiplier) - checkpoint = torch.load(get_path(ckpt)) - g_ema.load_state_dict(checkpoint["g_ema"], strict=False) - g_ema.requires_grad_(False) - g_ema.eval() - return g_ema - - -def bilinear_interpolate_torch(im, y, x): - """ - im : B,C,H,W - y : 1,numPoints -- pixel location y float - x : 1,numPOints -- pixel location y float - """ - device = im.device - - x0 = torch.floor(x).long().to(device) - x1 = x0 + 1 - - y0 = torch.floor(y).long().to(device) - y1 = y0 + 1 - - wa = ((x1.float() - x) * (y1.float() - y)).to(device) - wb = ((x1.float() - x) * (y - y0.float())).to(device) - wc = ((x - x0.float()) * (y1.float() - y)).to(device) - wd = ((x - x0.float()) * (y - y0.float())).to(device) - # Instead of clamp - x1 = x1 - torch.floor(x1 / im.shape[3]).int().to(device) - y1 = y1 - torch.floor(y1 / im.shape[2]).int().to(device) - Ia = im[:, :, y0, x0] - Ib = im[:, :, y1, x0] - Ic = im[:, :, y0, x1] - Id = im[:, :, y1, x1] - - return Ia * wa + Ib * wb + Ic * wc + Id * wd - - -def drag_gan(g_ema, latent: torch.Tensor, noise, F, handle_points, target_points, mask, max_iters=1000): - handle_points0 = copy.deepcopy(handle_points) - n = len(handle_points) - r1, r2, lam, d = 3, 12, 20, 1 - - def neighbor(x, y, d): - points = [] - for i in range(x - d, x + d): - for j in range(y - d, y + d): - points.append(torch.tensor([i, j]).float().to(latent.device)) - return points - - F0 = F.detach().clone() - - latent_trainable = latent[:, :6, :].detach().clone().requires_grad_(True) - latent_untrainable = latent[:, 6:, :].detach().clone().requires_grad_(False) - optimizer = torch.optim.Adam([latent_trainable], lr=2e-3) - for iter in range(max_iters): - for s in range(1): - optimizer.zero_grad() - latent = torch.cat([latent_trainable, latent_untrainable], dim=1) - sample2, F2 = g_ema.generate(latent, noise) - - # motion supervision - loss = 0 - for i in range(n): - pi, ti = handle_points[i], target_points[i] - di = (ti - pi) / torch.sum((ti - pi)**2) - - for qi in neighbor(int(pi[0]), int(pi[1]), r1): - # f1 = F[..., int(qi[0]), int(qi[1])] - # f2 = F2[..., int(qi[0] + di[0]), int(qi[1] + di[1])] - f1 = bilinear_interpolate_torch(F2, qi[0], qi[1]).detach() - f2 = bilinear_interpolate_torch(F2, qi[0] + di[0], qi[1] + di[1]) - loss += FF.l1_loss(f2, f1) - - if mask is not None: - loss += ((F2 - F0) * (1 - mask)).abs().mean() * lam - - loss.backward() - optimizer.step() - - # point tracking - with torch.no_grad(): - sample2, F2 = g_ema.generate(latent, noise) - for i in range(n): - pi = handle_points0[i] - # f = F0[..., int(pi[0]), int(pi[1])] - f0 = bilinear_interpolate_torch(F0, pi[0], pi[1]) - minv = 1e9 - minx = 1e9 - miny = 1e9 - for qi in neighbor(int(handle_points[i][0]), int(handle_points[i][1]), r2): - # f2 = F2[..., int(qi[0]), int(qi[1])] - f2 = bilinear_interpolate_torch(F2, qi[0], qi[1]) - v = torch.norm(f2 - f0, p=1) - if v < minv: - minv = v - minx = int(qi[0]) - miny = int(qi[1]) - handle_points[i][0] = minx - handle_points[i][1] = miny - - F = F2.detach().clone() - if iter % 1 == 0: - print(iter, loss.item(), handle_points, target_points) - # p = handle_points[0].int() - # sample2[0, :, p[0] - 5:p[0] + 5, p[1] - 5:p[1] + 5] = sample2[0, :, p[0] - 5:p[0] + 5, p[1] - 5:p[1] + 5] * 0 - # t = target_points[0].int() - # sample2[0, :, t[0] - 5:t[0] + 5, t[1] - 5:t[1] + 5] = sample2[0, :, t[0] - 5:t[0] + 5, t[1] - 5:t[1] + 5] * 255 - - # sample2[0, :, 210, 134] = sample2[0, :, 210, 134] * 0 - # utils.save_image(sample2, "test2.png", normalize=True, range=(-1, 1)) - - yield sample2, latent, F2, handle_points diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/losses/base_loss.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/losses/base_loss.py deleted file mode 100644 index e5cd5fa8d571b2da829b87f0784bd38978158ce7..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/losses/base_loss.py +++ /dev/null @@ -1,528 +0,0 @@ -import logging -from abc import abstractmethod, ABC - -import numpy as np -import sklearn -import sklearn.svm -import torch -import torch.nn as nn -import torch.nn.functional as F -from joblib import Parallel, delayed -from scipy import linalg - -from models.ade20k import SegmentationModule, NUM_CLASS, segm_options -from .fid.inception import InceptionV3 -from .lpips import PerceptualLoss -from .ssim import SSIM - -LOGGER = logging.getLogger(__name__) - - -def get_groupings(groups): - """ - :param groups: group numbers for respective elements - :return: dict of kind {group_idx: indices of the corresponding group elements} - """ - label_groups, count_groups = np.unique(groups, return_counts=True) - - indices = np.argsort(groups) - - grouping = dict() - cur_start = 0 - for label, count in zip(label_groups, count_groups): - cur_end = cur_start + count - cur_indices = indices[cur_start:cur_end] - grouping[label] = cur_indices - cur_start = cur_end - return grouping - - -class EvaluatorScore(nn.Module): - @abstractmethod - def forward(self, pred_batch, target_batch, mask): - pass - - @abstractmethod - def get_value(self, groups=None, states=None): - pass - - @abstractmethod - def reset(self): - pass - - -class PairwiseScore(EvaluatorScore, ABC): - def __init__(self): - super().__init__() - self.individual_values = None - - def get_value(self, groups=None, states=None): - """ - :param groups: - :return: - total_results: dict of kind {'mean': score mean, 'std': score std} - group_results: None, if groups is None; - else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} - """ - individual_values = torch.cat(states, dim=-1).reshape(-1).cpu().numpy() if states is not None \ - else self.individual_values - - total_results = { - 'mean': individual_values.mean(), - 'std': individual_values.std() - } - - if groups is None: - return total_results, None - - group_results = dict() - grouping = get_groupings(groups) - for label, index in grouping.items(): - group_scores = individual_values[index] - group_results[label] = { - 'mean': group_scores.mean(), - 'std': group_scores.std() - } - return total_results, group_results - - def reset(self): - self.individual_values = [] - - -class SSIMScore(PairwiseScore): - def __init__(self, window_size=11): - super().__init__() - self.score = SSIM(window_size=window_size, size_average=False).eval() - self.reset() - - def forward(self, pred_batch, target_batch, mask=None): - batch_values = self.score(pred_batch, target_batch) - self.individual_values = np.hstack([ - self.individual_values, batch_values.detach().cpu().numpy() - ]) - return batch_values - - -class LPIPSScore(PairwiseScore): - def __init__(self, model='net-lin', net='vgg', model_path=None, use_gpu=True): - super().__init__() - self.score = PerceptualLoss(model=model, net=net, model_path=model_path, - use_gpu=use_gpu, spatial=False).eval() - self.reset() - - def forward(self, pred_batch, target_batch, mask=None): - batch_values = self.score(pred_batch, target_batch).flatten() - self.individual_values = np.hstack([ - self.individual_values, batch_values.detach().cpu().numpy() - ]) - return batch_values - - -def fid_calculate_activation_statistics(act): - mu = np.mean(act, axis=0) - sigma = np.cov(act, rowvar=False) - return mu, sigma - - -def calculate_frechet_distance(activations_pred, activations_target, eps=1e-6): - mu1, sigma1 = fid_calculate_activation_statistics(activations_pred) - mu2, sigma2 = fid_calculate_activation_statistics(activations_target) - - diff = mu1 - mu2 - - # Product might be almost singular - covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) - if not np.isfinite(covmean).all(): - msg = ('fid calculation produces singular product; ' - 'adding %s to diagonal of cov estimates') % eps - LOGGER.warning(msg) - offset = np.eye(sigma1.shape[0]) * eps - covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) - - # Numerical error might give slight imaginary component - if np.iscomplexobj(covmean): - # if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): - if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2): - m = np.max(np.abs(covmean.imag)) - raise ValueError('Imaginary component {}'.format(m)) - covmean = covmean.real - - tr_covmean = np.trace(covmean) - - return (diff.dot(diff) + np.trace(sigma1) + - np.trace(sigma2) - 2 * tr_covmean) - - -class FIDScore(EvaluatorScore): - def __init__(self, dims=2048, eps=1e-6): - LOGGER.info("FIDscore init called") - super().__init__() - if getattr(FIDScore, '_MODEL', None) is None: - block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] - FIDScore._MODEL = InceptionV3([block_idx]).eval() - self.model = FIDScore._MODEL - self.eps = eps - self.reset() - LOGGER.info("FIDscore init done") - - def forward(self, pred_batch, target_batch, mask=None): - activations_pred = self._get_activations(pred_batch) - activations_target = self._get_activations(target_batch) - - self.activations_pred.append(activations_pred.detach().cpu()) - self.activations_target.append(activations_target.detach().cpu()) - - return activations_pred, activations_target - - def get_value(self, groups=None, states=None): - LOGGER.info("FIDscore get_value called") - activations_pred, activations_target = zip(*states) if states is not None \ - else (self.activations_pred, self.activations_target) - activations_pred = torch.cat(activations_pred).cpu().numpy() - activations_target = torch.cat(activations_target).cpu().numpy() - - total_distance = calculate_frechet_distance(activations_pred, activations_target, eps=self.eps) - total_results = dict(mean=total_distance) - - if groups is None: - group_results = None - else: - group_results = dict() - grouping = get_groupings(groups) - for label, index in grouping.items(): - if len(index) > 1: - group_distance = calculate_frechet_distance(activations_pred[index], activations_target[index], - eps=self.eps) - group_results[label] = dict(mean=group_distance) - - else: - group_results[label] = dict(mean=float('nan')) - - self.reset() - - LOGGER.info("FIDscore get_value done") - - return total_results, group_results - - def reset(self): - self.activations_pred = [] - self.activations_target = [] - - def _get_activations(self, batch): - activations = self.model(batch)[0] - if activations.shape[2] != 1 or activations.shape[3] != 1: - assert False, \ - 'We should not have got here, because Inception always scales inputs to 299x299' - # activations = F.adaptive_avg_pool2d(activations, output_size=(1, 1)) - activations = activations.squeeze(-1).squeeze(-1) - return activations - - -class SegmentationAwareScore(EvaluatorScore): - def __init__(self, weights_path): - super().__init__() - self.segm_network = SegmentationModule(weights_path=weights_path, use_default_normalization=True).eval() - self.target_class_freq_by_image_total = [] - self.target_class_freq_by_image_mask = [] - self.pred_class_freq_by_image_mask = [] - - def forward(self, pred_batch, target_batch, mask): - pred_segm_flat = self.segm_network.predict(pred_batch)[0].view(pred_batch.shape[0], -1).long().detach().cpu().numpy() - target_segm_flat = self.segm_network.predict(target_batch)[0].view(pred_batch.shape[0], -1).long().detach().cpu().numpy() - mask_flat = (mask.view(mask.shape[0], -1) > 0.5).detach().cpu().numpy() - - batch_target_class_freq_total = [] - batch_target_class_freq_mask = [] - batch_pred_class_freq_mask = [] - - for cur_pred_segm, cur_target_segm, cur_mask in zip(pred_segm_flat, target_segm_flat, mask_flat): - cur_target_class_freq_total = np.bincount(cur_target_segm, minlength=NUM_CLASS)[None, ...] - cur_target_class_freq_mask = np.bincount(cur_target_segm[cur_mask], minlength=NUM_CLASS)[None, ...] - cur_pred_class_freq_mask = np.bincount(cur_pred_segm[cur_mask], minlength=NUM_CLASS)[None, ...] - - self.target_class_freq_by_image_total.append(cur_target_class_freq_total) - self.target_class_freq_by_image_mask.append(cur_target_class_freq_mask) - self.pred_class_freq_by_image_mask.append(cur_pred_class_freq_mask) - - batch_target_class_freq_total.append(cur_target_class_freq_total) - batch_target_class_freq_mask.append(cur_target_class_freq_mask) - batch_pred_class_freq_mask.append(cur_pred_class_freq_mask) - - batch_target_class_freq_total = np.concatenate(batch_target_class_freq_total, axis=0) - batch_target_class_freq_mask = np.concatenate(batch_target_class_freq_mask, axis=0) - batch_pred_class_freq_mask = np.concatenate(batch_pred_class_freq_mask, axis=0) - return batch_target_class_freq_total, batch_target_class_freq_mask, batch_pred_class_freq_mask - - def reset(self): - super().reset() - self.target_class_freq_by_image_total = [] - self.target_class_freq_by_image_mask = [] - self.pred_class_freq_by_image_mask = [] - - -def distribute_values_to_classes(target_class_freq_by_image_mask, values, idx2name): - assert target_class_freq_by_image_mask.ndim == 2 and target_class_freq_by_image_mask.shape[0] == values.shape[0] - total_class_freq = target_class_freq_by_image_mask.sum(0) - distr_values = (target_class_freq_by_image_mask * values[..., None]).sum(0) - result = distr_values / (total_class_freq + 1e-3) - return {idx2name[i]: val for i, val in enumerate(result) if total_class_freq[i] > 0} - - -def get_segmentation_idx2name(): - return {i - 1: name for i, name in segm_options['classes'].set_index('Idx', drop=True)['Name'].to_dict().items()} - - -class SegmentationAwarePairwiseScore(SegmentationAwareScore): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.individual_values = [] - self.segm_idx2name = get_segmentation_idx2name() - - def forward(self, pred_batch, target_batch, mask): - cur_class_stats = super().forward(pred_batch, target_batch, mask) - score_values = self.calc_score(pred_batch, target_batch, mask) - self.individual_values.append(score_values) - return cur_class_stats + (score_values,) - - @abstractmethod - def calc_score(self, pred_batch, target_batch, mask): - raise NotImplementedError() - - def get_value(self, groups=None, states=None): - """ - :param groups: - :return: - total_results: dict of kind {'mean': score mean, 'std': score std} - group_results: None, if groups is None; - else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} - """ - if states is not None: - (target_class_freq_by_image_total, - target_class_freq_by_image_mask, - pred_class_freq_by_image_mask, - individual_values) = states - else: - target_class_freq_by_image_total = self.target_class_freq_by_image_total - target_class_freq_by_image_mask = self.target_class_freq_by_image_mask - pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask - individual_values = self.individual_values - - target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0) - target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0) - pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0) - individual_values = np.concatenate(individual_values, axis=0) - - total_results = { - 'mean': individual_values.mean(), - 'std': individual_values.std(), - **distribute_values_to_classes(target_class_freq_by_image_mask, individual_values, self.segm_idx2name) - } - - if groups is None: - return total_results, None - - group_results = dict() - grouping = get_groupings(groups) - for label, index in grouping.items(): - group_class_freq = target_class_freq_by_image_mask[index] - group_scores = individual_values[index] - group_results[label] = { - 'mean': group_scores.mean(), - 'std': group_scores.std(), - ** distribute_values_to_classes(group_class_freq, group_scores, self.segm_idx2name) - } - return total_results, group_results - - def reset(self): - super().reset() - self.individual_values = [] - - -class SegmentationClassStats(SegmentationAwarePairwiseScore): - def calc_score(self, pred_batch, target_batch, mask): - return 0 - - def get_value(self, groups=None, states=None): - """ - :param groups: - :return: - total_results: dict of kind {'mean': score mean, 'std': score std} - group_results: None, if groups is None; - else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} - """ - if states is not None: - (target_class_freq_by_image_total, - target_class_freq_by_image_mask, - pred_class_freq_by_image_mask, - _) = states - else: - target_class_freq_by_image_total = self.target_class_freq_by_image_total - target_class_freq_by_image_mask = self.target_class_freq_by_image_mask - pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask - - target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0) - target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0) - pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0) - - target_class_freq_by_image_total_marginal = target_class_freq_by_image_total.sum(0).astype('float32') - target_class_freq_by_image_total_marginal /= target_class_freq_by_image_total_marginal.sum() - - target_class_freq_by_image_mask_marginal = target_class_freq_by_image_mask.sum(0).astype('float32') - target_class_freq_by_image_mask_marginal /= target_class_freq_by_image_mask_marginal.sum() - - pred_class_freq_diff = (pred_class_freq_by_image_mask - target_class_freq_by_image_mask).sum(0) / (target_class_freq_by_image_mask.sum(0) + 1e-3) - - total_results = dict() - total_results.update({f'total_freq/{self.segm_idx2name[i]}': v - for i, v in enumerate(target_class_freq_by_image_total_marginal) - if v > 0}) - total_results.update({f'mask_freq/{self.segm_idx2name[i]}': v - for i, v in enumerate(target_class_freq_by_image_mask_marginal) - if v > 0}) - total_results.update({f'mask_freq_diff/{self.segm_idx2name[i]}': v - for i, v in enumerate(pred_class_freq_diff) - if target_class_freq_by_image_total_marginal[i] > 0}) - - if groups is None: - return total_results, None - - group_results = dict() - grouping = get_groupings(groups) - for label, index in grouping.items(): - group_target_class_freq_by_image_total = target_class_freq_by_image_total[index] - group_target_class_freq_by_image_mask = target_class_freq_by_image_mask[index] - group_pred_class_freq_by_image_mask = pred_class_freq_by_image_mask[index] - - group_target_class_freq_by_image_total_marginal = group_target_class_freq_by_image_total.sum(0).astype('float32') - group_target_class_freq_by_image_total_marginal /= group_target_class_freq_by_image_total_marginal.sum() - - group_target_class_freq_by_image_mask_marginal = group_target_class_freq_by_image_mask.sum(0).astype('float32') - group_target_class_freq_by_image_mask_marginal /= group_target_class_freq_by_image_mask_marginal.sum() - - group_pred_class_freq_diff = (group_pred_class_freq_by_image_mask - group_target_class_freq_by_image_mask).sum(0) / ( - group_target_class_freq_by_image_mask.sum(0) + 1e-3) - - cur_group_results = dict() - cur_group_results.update({f'total_freq/{self.segm_idx2name[i]}': v - for i, v in enumerate(group_target_class_freq_by_image_total_marginal) - if v > 0}) - cur_group_results.update({f'mask_freq/{self.segm_idx2name[i]}': v - for i, v in enumerate(group_target_class_freq_by_image_mask_marginal) - if v > 0}) - cur_group_results.update({f'mask_freq_diff/{self.segm_idx2name[i]}': v - for i, v in enumerate(group_pred_class_freq_diff) - if group_target_class_freq_by_image_total_marginal[i] > 0}) - - group_results[label] = cur_group_results - return total_results, group_results - - -class SegmentationAwareSSIM(SegmentationAwarePairwiseScore): - def __init__(self, *args, window_size=11, **kwargs): - super().__init__(*args, **kwargs) - self.score_impl = SSIM(window_size=window_size, size_average=False).eval() - - def calc_score(self, pred_batch, target_batch, mask): - return self.score_impl(pred_batch, target_batch).detach().cpu().numpy() - - -class SegmentationAwareLPIPS(SegmentationAwarePairwiseScore): - def __init__(self, *args, model='net-lin', net='vgg', model_path=None, use_gpu=True, **kwargs): - super().__init__(*args, **kwargs) - self.score_impl = PerceptualLoss(model=model, net=net, model_path=model_path, - use_gpu=use_gpu, spatial=False).eval() - - def calc_score(self, pred_batch, target_batch, mask): - return self.score_impl(pred_batch, target_batch).flatten().detach().cpu().numpy() - - -def calculade_fid_no_img(img_i, activations_pred, activations_target, eps=1e-6): - activations_pred = activations_pred.copy() - activations_pred[img_i] = activations_target[img_i] - return calculate_frechet_distance(activations_pred, activations_target, eps=eps) - - -class SegmentationAwareFID(SegmentationAwarePairwiseScore): - def __init__(self, *args, dims=2048, eps=1e-6, n_jobs=-1, **kwargs): - super().__init__(*args, **kwargs) - if getattr(FIDScore, '_MODEL', None) is None: - block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] - FIDScore._MODEL = InceptionV3([block_idx]).eval() - self.model = FIDScore._MODEL - self.eps = eps - self.n_jobs = n_jobs - - def calc_score(self, pred_batch, target_batch, mask): - activations_pred = self._get_activations(pred_batch) - activations_target = self._get_activations(target_batch) - return activations_pred, activations_target - - def get_value(self, groups=None, states=None): - """ - :param groups: - :return: - total_results: dict of kind {'mean': score mean, 'std': score std} - group_results: None, if groups is None; - else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} - """ - if states is not None: - (target_class_freq_by_image_total, - target_class_freq_by_image_mask, - pred_class_freq_by_image_mask, - activation_pairs) = states - else: - target_class_freq_by_image_total = self.target_class_freq_by_image_total - target_class_freq_by_image_mask = self.target_class_freq_by_image_mask - pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask - activation_pairs = self.individual_values - - target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0) - target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0) - pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0) - activations_pred, activations_target = zip(*activation_pairs) - activations_pred = np.concatenate(activations_pred, axis=0) - activations_target = np.concatenate(activations_target, axis=0) - - total_results = { - 'mean': calculate_frechet_distance(activations_pred, activations_target, eps=self.eps), - 'std': 0, - **self.distribute_fid_to_classes(target_class_freq_by_image_mask, activations_pred, activations_target) - } - - if groups is None: - return total_results, None - - group_results = dict() - grouping = get_groupings(groups) - for label, index in grouping.items(): - if len(index) > 1: - group_activations_pred = activations_pred[index] - group_activations_target = activations_target[index] - group_class_freq = target_class_freq_by_image_mask[index] - group_results[label] = { - 'mean': calculate_frechet_distance(group_activations_pred, group_activations_target, eps=self.eps), - 'std': 0, - **self.distribute_fid_to_classes(group_class_freq, - group_activations_pred, - group_activations_target) - } - else: - group_results[label] = dict(mean=float('nan'), std=0) - return total_results, group_results - - def distribute_fid_to_classes(self, class_freq, activations_pred, activations_target): - real_fid = calculate_frechet_distance(activations_pred, activations_target, eps=self.eps) - - fid_no_images = Parallel(n_jobs=self.n_jobs)( - delayed(calculade_fid_no_img)(img_i, activations_pred, activations_target, eps=self.eps) - for img_i in range(activations_pred.shape[0]) - ) - errors = real_fid - fid_no_images - return distribute_values_to_classes(class_freq, errors, self.segm_idx2name) - - def _get_activations(self, batch): - activations = self.model(batch)[0] - if activations.shape[2] != 1 or activations.shape[3] != 1: - activations = F.adaptive_avg_pool2d(activations, output_size=(1, 1)) - activations = activations.squeeze(-1).squeeze(-1).detach().cpu().numpy() - return activations diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/meshes.py b/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/meshes.py deleted file mode 100644 index 284de6c5bef4c17078b316fa2f4501b33dcb2444..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/meshes.py +++ /dev/null @@ -1,93 +0,0 @@ -import numpy as np - -from .materials import body_material - -# green -# GT_SMPL = body_material(0.009, 0.214, 0.029) -GT_SMPL = body_material(0.035, 0.415, 0.122) - -# blue -# GEN_SMPL = body_material(0.022, 0.129, 0.439) -# Blues => cmap(0.87) -# GEN_SMPL = body_material(0.035, 0.322, 0.615) -# Oranges => cmap(0.87) -GEN_SMPL = body_material(0.658, 0.214, 0.0114) - - -class Meshes: - def __init__(self, data, *, gt, mode, faces_path, canonicalize, always_on_floor, oldrender=True, is_smplx=False, **kwargs): - data = prepare_meshes(data, canonicalize=canonicalize, - always_on_floor=always_on_floor, - is_smplx=is_smplx) - - if isinstance(faces_path, str): - self.faces = np.load(faces_path) - else: - self.faces = faces_path - - self.data = data - self.mode = mode - self.oldrender = oldrender - - self.N = len(data) - self.trajectory = data[:, :, [0, 1]].mean(1) - - if gt: - self.mat = GT_SMPL - else: - self.mat = GEN_SMPL - - def get_sequence_mat(self, frac): - import matplotlib - # cmap = matplotlib.cm.get_cmap('Blues') - cmap = matplotlib.cm.get_cmap('Oranges') - # begin = 0.60 - # end = 0.90 - begin = 0.50 - end = 0.90 - rgbcolor = cmap(begin + (end-begin)*frac) - mat = body_material(*rgbcolor, oldrender=self.oldrender) - return mat - - def get_root(self, index): - return self.data[index].mean(0) - - def get_mean_root(self): - return self.data.mean((0, 1)) - - def load_in_blender(self, index, mat): - vertices = self.data[index] - faces = self.faces - name = f"{str(index).zfill(4)}" - - from .tools import load_numpy_vertices_into_blender - load_numpy_vertices_into_blender(vertices, faces, name, mat) - - return name - - def __len__(self): - return self.N - - -def prepare_meshes(data, canonicalize=True, always_on_floor=False, is_smplx=False): - if canonicalize: - print("No canonicalization for now") - - # fitted mesh do not need fixing axis - # fix axis - if is_smplx: - data[..., 1] = - data[..., 1] - # data[..., 0] = - data[..., 0] - - - # Swap axis (gravity=Z instead of Y) - data = data[..., [2, 0, 1]] - - # Remove the floor - data[..., 2] -= data[..., 2].min() - - # Put all the body on the floor - if always_on_floor: - data[..., 2] -= data[..., 2].min(1)[:, None] - - return data diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/font.py b/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/font.py deleted file mode 100644 index 5ac530d7b949f50314a0d9cf5d744bedcace0571..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/font.py +++ /dev/null @@ -1,272 +0,0 @@ -"""Font texture loader and processor. - -Author: Matthew Matl -""" -import freetype -import numpy as np -import os - -import OpenGL -from OpenGL.GL import * - -from .constants import TextAlign, FLOAT_SZ -from .texture import Texture -from .sampler import Sampler - - -class FontCache(object): - """A cache for fonts. - """ - - def __init__(self, font_dir=None): - self._font_cache = {} - self.font_dir = font_dir - if self.font_dir is None: - base_dir, _ = os.path.split(os.path.realpath(__file__)) - self.font_dir = os.path.join(base_dir, 'fonts') - - def get_font(self, font_name, font_pt): - # If it's a file, load it directly, else, try to load from font dir. - if os.path.isfile(font_name): - font_filename = font_name - _, font_name = os.path.split(font_name) - font_name, _ = os.path.split(font_name) - else: - font_filename = os.path.join(self.font_dir, font_name) + '.ttf' - - cid = OpenGL.contextdata.getContext() - key = (cid, font_name, int(font_pt)) - - if key not in self._font_cache: - self._font_cache[key] = Font(font_filename, font_pt) - return self._font_cache[key] - - def clear(self): - for key in self._font_cache: - self._font_cache[key].delete() - self._font_cache = {} - - -class Character(object): - """A single character, with its texture and attributes. - """ - - def __init__(self, texture, size, bearing, advance): - self.texture = texture - self.size = size - self.bearing = bearing - self.advance = advance - - -class Font(object): - """A font object. - - Parameters - ---------- - font_file : str - The file to load the font from. - font_pt : int - The height of the font in pixels. - """ - - def __init__(self, font_file, font_pt=40): - self.font_file = font_file - self.font_pt = int(font_pt) - self._face = freetype.Face(font_file) - self._face.set_pixel_sizes(0, font_pt) - self._character_map = {} - - for i in range(0, 128): - - # Generate texture - face = self._face - face.load_char(chr(i)) - buf = face.glyph.bitmap.buffer - src = (np.array(buf) / 255.0).astype(np.float32) - src = src.reshape((face.glyph.bitmap.rows, - face.glyph.bitmap.width)) - tex = Texture( - sampler=Sampler( - magFilter=GL_LINEAR, - minFilter=GL_LINEAR, - wrapS=GL_CLAMP_TO_EDGE, - wrapT=GL_CLAMP_TO_EDGE - ), - source=src, - source_channels='R', - ) - character = Character( - texture=tex, - size=np.array([face.glyph.bitmap.width, - face.glyph.bitmap.rows]), - bearing=np.array([face.glyph.bitmap_left, - face.glyph.bitmap_top]), - advance=face.glyph.advance.x - ) - self._character_map[chr(i)] = character - - self._vbo = None - self._vao = None - - @property - def font_file(self): - """str : The file the font was loaded from. - """ - return self._font_file - - @font_file.setter - def font_file(self, value): - self._font_file = value - - @property - def font_pt(self): - """int : The height of the font in pixels. - """ - return self._font_pt - - @font_pt.setter - def font_pt(self, value): - self._font_pt = int(value) - - def _add_to_context(self): - - self._vao = glGenVertexArrays(1) - glBindVertexArray(self._vao) - self._vbo = glGenBuffers(1) - glBindBuffer(GL_ARRAY_BUFFER, self._vbo) - glBufferData(GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, None, GL_DYNAMIC_DRAW) - glEnableVertexAttribArray(0) - glVertexAttribPointer( - 0, 4, GL_FLOAT, GL_FALSE, 4 * FLOAT_SZ, ctypes.c_void_p(0) - ) - glBindVertexArray(0) - - glPixelStorei(GL_UNPACK_ALIGNMENT, 1) - for c in self._character_map: - ch = self._character_map[c] - if not ch.texture._in_context(): - ch.texture._add_to_context() - - def _remove_from_context(self): - for c in self._character_map: - ch = self._character_map[c] - ch.texture.delete() - if self._vao is not None: - glDeleteVertexArrays(1, [self._vao]) - glDeleteBuffers(1, [self._vbo]) - self._vao = None - self._vbo = None - - def _in_context(self): - return self._vao is not None - - def _bind(self): - glBindVertexArray(self._vao) - - def _unbind(self): - glBindVertexArray(0) - - def delete(self): - self._unbind() - self._remove_from_context() - - def render_string(self, text, x, y, scale=1.0, - align=TextAlign.BOTTOM_LEFT): - """Render a string to the current view buffer. - - Note - ---- - Assumes correct shader program already bound w/ uniforms set. - - Parameters - ---------- - text : str - The text to render. - x : int - Horizontal pixel location of text. - y : int - Vertical pixel location of text. - scale : int - Scaling factor for text. - align : int - One of the TextAlign options which specifies where the ``x`` - and ``y`` parameters lie on the text. For example, - :attr:`.TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate - the position of the bottom-left corner of the textbox. - """ - glActiveTexture(GL_TEXTURE0) - glEnable(GL_BLEND) - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) - glDisable(GL_DEPTH_TEST) - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - self._bind() - - # Determine width and height of text relative to x, y - width = 0.0 - height = 0.0 - for c in text: - ch = self._character_map[c] - height = max(height, ch.bearing[1] * scale) - width += (ch.advance >> 6) * scale - - # Determine offsets based on alignments - xoff = 0 - yoff = 0 - if align == TextAlign.BOTTOM_RIGHT: - xoff = -width - elif align == TextAlign.BOTTOM_CENTER: - xoff = -width / 2.0 - elif align == TextAlign.TOP_LEFT: - yoff = -height - elif align == TextAlign.TOP_RIGHT: - yoff = -height - xoff = -width - elif align == TextAlign.TOP_CENTER: - yoff = -height - xoff = -width / 2.0 - elif align == TextAlign.CENTER: - xoff = -width / 2.0 - yoff = -height / 2.0 - elif align == TextAlign.CENTER_LEFT: - yoff = -height / 2.0 - elif align == TextAlign.CENTER_RIGHT: - xoff = -width - yoff = -height / 2.0 - - x += xoff - y += yoff - - ch = None - for c in text: - ch = self._character_map[c] - xpos = x + ch.bearing[0] * scale - ypos = y - (ch.size[1] - ch.bearing[1]) * scale - w = ch.size[0] * scale - h = ch.size[1] * scale - - vertices = np.array([ - [xpos, ypos, 0.0, 0.0], - [xpos + w, ypos, 1.0, 0.0], - [xpos + w, ypos + h, 1.0, 1.0], - [xpos + w, ypos + h, 1.0, 1.0], - [xpos, ypos + h, 0.0, 1.0], - [xpos, ypos, 0.0, 0.0], - ], dtype=np.float32) - - ch.texture._bind() - - glBindBuffer(GL_ARRAY_BUFFER, self._vbo) - glBufferData( - GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, vertices, GL_DYNAMIC_DRAW - ) - # TODO MAKE THIS MORE EFFICIENT, lgBufferSubData is broken - # glBufferSubData( - # GL_ARRAY_BUFFER, 0, 6 * 4 * FLOAT_SZ, - # np.ascontiguousarray(vertices.flatten) - # ) - glDrawArrays(GL_TRIANGLES, 0, 6) - x += (ch.advance >> 6) * scale - - self._unbind() - if ch: - ch.texture._unbind() diff --git a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/utils/visualize_images.py b/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/utils/visualize_images.py deleted file mode 100644 index 0d54e7512d00df39c367dcf351a91152d2fcd823..0000000000000000000000000000000000000000 --- a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/utils/visualize_images.py +++ /dev/null @@ -1,93 +0,0 @@ -from torch.utils.tensorboard import SummaryWriter -from PIL.JpegImagePlugin import JpegImageFile -import matplotlib.pyplot as plt -from typing import * -from math import * -import numpy as np -import random -import torch -import os - -# use a style with no grid -plt.style.use("_mpl-gallery-nogrid") - -def visualize_images(images_dict: Dict[str, Iterable[Union[JpegImageFile, torch.Tensor, np.ndarray]]], - log_directory: str = "fake_face_logs", - n_images: int = 40, - figsize = (15, 15), - seed: Union[int, None] = None, - show: bool = True - ): - """Visualize some images from a dictionary - - Args: - images_dict (Dict[str, Iterable[Union[JpegImageFile, torch.Tensor, np.ndarray]]]): The dictionary of the images with key indicating the tag - log_directory (str, optional): The tensorboard log directory. Defaults to "fake_face_logs". - n_images (int, optional): The number of images. Defaults to 40. - figsize (tuple, optional): The figure size. Defaults to (15, 15). - seed (Union[int, None], optional): The seed. Defaults to None. - show (bool): Indicate if we want to show the figure. Defaults to True. - """ - - assert len(images_dict) > 0 - - assert isinstance(images_dict, dict) - - # add seed - random.seed(seed) - - # verify if we must add a title for each image - add_titles = len(images_dict) > 1 - - images_ = [] - - # modify the dictionary to obtain a tuple of images with their corresponding tags - for key in images_dict: - - for image in images_dict[key]: - - images_.append((key, image)) - - # we take the number of images in the list if n_images is larger - if n_images > len(images_): n_images = len(images_) - - # choose random images - images = random.choices(images_, k = n_images) - - if isinstance(images[0], JpegImageFile): - - images = [np.array(image[1]) for image in images if type(image[1]) in [JpegImageFile, torch.Tensor, np.ndarray]] - - # calculate the number of rows and columns - n_rows = ceil(sqrt(n_images)) - - fig, axs = plt.subplots(nrows=n_rows, ncols=n_rows, figsize = figsize) - - # flat the axes - axs = axs.flat - - # trace images - for i in range(n_images): - - axs[i].imshow(images[i][1], interpolation = "nearest") - - if add_titles: axs[i].set_title(images[i][0]) - - axs[i].axis('off') - - # add padding to the figure - fig.tight_layout() - - # deleting no necessary plots - [fig.delaxes(axs[i]) for i in range(n_images, n_rows * n_rows)] - - # add figure to tensorboard - with SummaryWriter(os.path.join(log_directory, "images")) as writer: - - # identify the tag - tag = "_".join(list(images_dict)) if add_titles else list(images_dict.keys())[0] - - writer.add_figure(tag = tag, figure = fig) - - if show: return fig - diff --git a/spaces/Panel-Org/panel-template/Dockerfile b/spaces/Panel-Org/panel-template/Dockerfile deleted file mode 100644 index c33a0787f9bfc4eb7088822ae9e724bad601c068..0000000000000000000000000000000000000000 --- a/spaces/Panel-Org/panel-template/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt -RUN python3 -m pip install --no-cache-dir --upgrade pip -RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["panel", "serve", "/code/app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "*"] - -RUN mkdir /.cache -RUN chmod 777 /.cache -RUN mkdir .chroma -RUN chmod 777 .chroma \ No newline at end of file diff --git a/spaces/ParagKesharDas360/MovieRecommadationApp/rating.py b/spaces/ParagKesharDas360/MovieRecommadationApp/rating.py deleted file mode 100644 index 5c1c589638678ea4b91529170b546b5367702b8e..0000000000000000000000000000000000000000 --- a/spaces/ParagKesharDas360/MovieRecommadationApp/rating.py +++ /dev/null @@ -1,375 +0,0 @@ -import streamlit as st -import pandas as pd -from datetime import datetime -import streamlit as st -import argparse -import csv -# st.set_page_config(page_title="Rate a Movie", page_icon=":movie_camera:", layout="wide", initial_sidebar_state="expanded") - -@st.cache -# Load movies.csv and ratings.csv files -def load_movies(): - movies = pd.read_csv('movies.csv') - return movies - -movies = load_movies() -ratings = pd.read_csv('ratings.csv') - -# Load login.csv file and create a dictionary to map usernames to new_user_ids -login = pd.read_csv('login.csv') - -# Create a function to get the movieId based on the movie title - -def find_movie_id_by_title(title): - with open('movies.csv', 'r',encoding='utf-8') as file: - reader = csv.reader(file) - next(reader) # skip header row - for row in reader: - if row[1] == title: - return row[0] - return None - -def find_movie_id_by_genres(title): - with open('movies.csv', 'r',encoding='utf-8') as file: - reader = csv.reader(file) - next(reader) # skip header row - for row in reader: - if row[1] == title: - return row[2] - return None - -def find_user_rating(user_id, movie_id): - user_ratings = ratings[(ratings['userId'] == user_id) & (ratings['movieId'] == movie_id)] - if len(user_ratings) > 0: - return user_ratings.index[0] - else: - return None - - -# st.set_page_config(page_title="Rate a Movie", page_icon=":movie_camera:", layout="wide", initial_sidebar_state="expanded") - -# Retrieve the username and user ID arguments -parser = argparse.ArgumentParser() -parser.add_argument("--username", type=str) -parser.add_argument("--user_id", type=int) -args = parser.parse_args() -username = args.username -user_id = args.user_id - -# Display the username and user ID -st.write(f"Welcome, {username} who's UserID: {user_id}!") - - -# Create the streamlit app -st.title('Rate a Movie') - -# Get the movie title from the user -# Get the new_user_id and movieId based on the inputs - -movie_title = st.selectbox("Movie Title", movies['title'].tolist()) -movie_id=find_movie_id_by_title(movie_title) -movie_genres=find_movie_id_by_genres(movie_title) -timestamp = str(int(datetime.now().timestamp())) -# st.text(movie_title+"'s movie_id is "+movie_id+" of genres "+movie_genres+timestamp) -row_index=find_user_rating(int(user_id),int(movie_id)) -if row_index: - prevoius_rating=ratings.iloc[row_index].loc['rating'] -else: - prevoius_rating=None -st.subheader("Movie Details") -st.info(f"Movie Title: {movie_title}") -st.info(f"Movie ID: {movie_id}") -st.info(f"Movie Genres: {movie_genres}") -st.info(f"Movie Rating:{prevoius_rating}") - -# Get the rating from the user -rating = st.slider('Rating', 1.0, 5.0, step=0.5) -col11, col22 = st.columns(2) - -rateBtn = col11.button("Rate Now") -unrateBtn = col22.button("Unrate Now") - -st.text("Row Index is "+str(row_index)) -if rateBtn: - if row_index is not None: - # If the user has already rated this movie, update the rating in the ratings DataFrame - ratings.at[row_index, 'rating'] = rating - st.success(f"Rating has updated") - else: - # If the user has not yet rated this movie, add a new row to the ratings DataFrame - new_row = {'userId': user_id, 'movieId': movie_id, 'rating': rating, 'timestamp': int(datetime.now().timestamp())} - ratings = ratings.append(new_row, ignore_index=True) - st.success(f"New rating has added ") - - # Write the updated ratings DataFrame to the ratings.csv file - ratings.to_csv('ratings.csv', index=False) - - -if unrateBtn: - # If the "Unrate" button was clicked, delete the rating from the ratings DataFrame - if row_index is not None: - ratings.drop(row_index, inplace=True) - st.success(f"Rating has deleted") - else: - st.warning("No rating found to delete.") - - # Write the updated ratings DataFrame to the ratings.csv file - ratings.to_csv('ratings.csv', index=False) - -# st.experimental_rerun() -# st.experimental_rerun() - - -# import streamlit as st -# st.header("This is recommand System") - -import os - - -os.environ['TF_CPP_MIN_LOG_LEVEL']='2' - -import tensorflow as tf -from ipaddress import summarize_address_range -import streamlit as st -import pandas as pd -import numpy as np -import pickle -import keras.optimizers -import keras.regularizers -from keras import layers -from sklearn.feature_extraction.text import TfidfVectorizer -from sklearn.feature_extraction.text import CountVectorizer -from sklearn.metrics.pairwise import cosine_similarity -from sklearn.metrics.pairwise import linear_kernel -import matplotlib.pyplot as plt -import requests -from typing import List -# from key import api_key - -# api_key : str = "8265db1679663a7ea12ac168da84d2ee8" -st.sidebar.write("Sidebar") -res=st.sidebar.radio("Select Any Movie Recommandation System",options=("Content Based Movie Prediction","Collaborative Movie Prediction")) -st.header("MOVIE RECOMMADATION SYSTEM") - -if(res=="Content Based Movie Prediction"): - # movielist=pickle.load(open('movie1Dict.pkl','rb')) - # movie=pd.DataFrame(movielist) - # st.selectbox("Choose your Favorite Movie: ",movie["title"].values) - def fetch_poster(movie_id): - url = "https://api.themoviedb.org/3/movie/{}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US".format(movie_id) - data = requests.get(url) - data = data.json() - poster_path = data['poster_path'] - full_path = "https://image.tmdb.org/t/p/w500/" + poster_path - return full_path - - def recommend(movie): - index = movies[movies['title'] == movie].index[0] - distances = sorted(list(enumerate(similarity[index])), reverse=True, key=lambda x: x[1]) - recommended_movie_names = [] - recommended_movie_posters = [] - for i in distances[1:6]: - # fetch the movie poster - movie_id = movies.iloc[i[0]].movie_id - recommended_movie_posters.append(fetch_poster(movie_id)) - recommended_movie_names.append(movies.iloc[i[0]].title) - - return recommended_movie_names,recommended_movie_posters - - - # st.header('Movie Recommender System') - movie_dict=pickle.load(open('movie_list.pkl','rb')) - movies=pd.DataFrame(movie_dict) - - similarity=pickle.load(open('sim.pkl','rb')) - movies=pd.DataFrame(movie_dict) - - movie_list = movies['title'].values - selected_movie = st.selectbox( - "Type or select a movie from the dropdown", - movies["title"].values - ) - - if st.button('Show Recommendation'): - recommended_movie_names,recommended_movie_posters = recommend(selected_movie) - col1, col2, col3, col4, col5 = st.columns(5) - with col1: - st.text(recommended_movie_names[0]) - st.image(recommended_movie_posters[0]) - with col2: - st.text(recommended_movie_names[1]) - st.image(recommended_movie_posters[1]) - - with col3: - st.text(recommended_movie_names[2]) - st.image(recommended_movie_posters[2]) - with col4: - st.text(recommended_movie_names[3]) - st.image(recommended_movie_posters[3]) - with col5: - st.text(recommended_movie_names[4]) - st.image(recommended_movie_posters[4]) - - -elif(res=="Collaborative Movie Prediction"): - st.text("Get movie prediction based on your rated hisory") - kk=user_id - - pUser=kk - if st.button("Predict"): - df = pd.read_csv("ratings.csv") - user_ids = df["userId"].unique().tolist() - user2user_encoded = {x: i for i, x in enumerate(user_ids)} - userencoded2user = {i: x for i, x in enumerate(user_ids)} - movie_ids = df["movieId"].unique().tolist() - movie2movie_encoded = {x: i for i, x in enumerate(movie_ids)} - movie_encoded2movie = {i: x for i, x in enumerate(movie_ids)} - df["user"] = df["userId"].map(user2user_encoded) - df["movie"] = df["movieId"].map(movie2movie_encoded) - num_users = len(user2user_encoded) - num_movies = len(movie_encoded2movie) - # min and max ratings will be used to normalize the ratings later - min_rating = min(df["rating"]) - max_rating = max(df["rating"]) - # cast the ratings to float32 - df["rating"] = df["rating"].values.astype(np.float32) - - df = df.sample(frac=1, random_state=42) - - x = df[["user", "movie"]].values - ## print(type(x)) - - # Normalize the targets between 0 and 1. Makes it easy to train. - y = df["rating"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values - - ## print(type(y)) - - # Assuming training on 90% of the data and validating on 10%. - # might change this to 99/1 - train_indices = int(0.99 * df.shape[0]) - - x_train, x_val, y_train, y_val = ( - x[:train_indices], - x[train_indices:], - y[:train_indices], - y[train_indices:], - ) - - EMBEDDING_SIZE = 50 - - class RecommenderNet(keras.Model): - def __init__(self, num_users, num_movies, embedding_size, **kwargs): - super(RecommenderNet, self).__init__(**kwargs) - self.num_users = num_users - self.num_movies = num_movies - self.embedding_size = embedding_size - self.user_embedding = layers.Embedding( - num_users, - embedding_size, - embeddings_initializer="he_normal", - embeddings_regularizer=keras.regularizers.l2(1e-6), - ) - self.user_bias = layers.Embedding(num_users, 1) - self.movie_embedding = layers.Embedding( - num_movies, - embedding_size, - embeddings_initializer="he_normal", - embeddings_regularizer=keras.regularizers.l2(1e-6), - ) - self.movie_bias = layers.Embedding(num_movies, 1) - - def call(self, inputs): - user_vector = self.user_embedding(inputs[:, 0]) - user_bias = self.user_bias(inputs[:, 0]) - movie_vector = self.movie_embedding(inputs[:, 1]) - movie_bias = self.movie_bias(inputs[:, 1]) - dot_user_movie = tf.tensordot(user_vector, movie_vector, 2) - # Add all the components (including bias) - x = dot_user_movie + user_bias + movie_bias - # The sigmoid activation forces the rating to between 0 and 1 - return tf.nn.sigmoid(x) - - model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE) - - model.compile( - loss=tf.keras.losses.BinaryCrossentropy(), - optimizer=keras.optimizers.Adam(learning_rate=0.001), - ) - - history = model.fit( - x=x_train, - y=y_train, - batch_size=32, - epochs=1, - verbose=1, - validation_data=(x_val, y_val), - ) - loss = history.history["loss"] - val_loss = history.history["val_loss"] - movie_df = pd.read_csv("movies.csv") - print(hi) - # Let us get a user and see the top recommendations. - - # Pick a user at random. - user_id = pUser - - # Get all movies watched by the user. - movies_watched_by_user = df[df.userId == user_id] - - # Get the movies not watched by the user. - movies_not_watched = movie_df[ - ~movie_df["movieId"].isin(movies_watched_by_user.movieId.values) - ]["movieId"] - - movies_not_watched = list( - set(movies_not_watched).intersection(set(movie2movie_encoded.keys())) - ) - movies_not_watched = [[movie2movie_encoded.get(x)] for x in movies_not_watched] - - user_encoder = user2user_encoded.get(user_id) - - user_movie_array = np.hstack( - ([[user_encoder]] * len(movies_not_watched), movies_not_watched) - ) - print(type(user_movie_array)) - ratings = model.predict(user_movie_array).flatten() - - top_ratings_indices = ratings.argsort()[-10:][::-1] - - recommended_movie_ids = [ - movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices - ] - - print("Showing recommendations for user: {}".format(user_id)) - st.subheader("Showing recommendations for user:") - # st.header(f"",pUser) - st.text("{}".format(pUser)) - print("====" * 9) - # st.write("====" *9 ) - print("Movies with high ratings from user") - st.subheader("Movies with high ratings from user") - print("----" * 8) - st.write("----" * 8) - - top_movies_user = ( - movies_watched_by_user.sort_values(by="rating", ascending=False) - .head(5) - .movieId.values - ) - movie_df_rows = movie_df[movie_df["movieId"].isin(top_movies_user)] - for row in movie_df_rows.itertuples(): - print(row.title, ":", row.genres) - st.write(row.title, ":", row.genres) - - print("\n") - print("----" * 8) - st.write("----" * 8) - print("Top 10 movie recommendations") - st.subheader("Top 10 movie recommendations") - print("----" * 8) - st.write("----" * 8) - recommended_movies = movie_df[movie_df["movieId"].isin(recommended_movie_ids)] - for row in recommended_movies.itertuples(): - print(row.title, ":", row.genres) - st.write(row.title, ":", row.genres) - diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-64.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-64.go deleted file mode 100644 index 9ffdfa85c86126672701cb8c4b7fdd229a058452..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-64.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/Gryphe-MythoMix-L2-13b/README.md b/spaces/PeepDaSlan9/Gryphe-MythoMix-L2-13b/README.md deleted file mode 100644 index 0a79f290269c6eec09b6d74ec9843e3f60fdb8d0..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Gryphe-MythoMix-L2-13b/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gryphe MythoMix L2 13b -emoji: 🐢 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/utils/dist.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/utils/dist.py deleted file mode 100644 index 2b25a7a205a0379414873fa6c2be2fc6f444f00a..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/utils/dist.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Utilities related to distributed mode. - -By default, the reduce of metrics and such are done on GPU, since it's more straightforward (we reuse the NCCL backend) -If you want to reduce on CPU instead (required for big datasets like GQA), use the env variable MDETR_CPU_REDUCE=1 -""" -import functools -import io -import os - -import torch -import torch.distributed as dist - -_LOCAL_PROCESS_GROUP = None - - -@functools.lru_cache() -def _get_global_gloo_group(): - """ - Return a process group based on gloo backend, containing all the ranks - The result is cached. - """ - - if dist.get_backend() == "nccl": - return dist.new_group(backend="gloo") - - return dist.group.WORLD - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - - world_size = get_world_size() - if world_size == 1: - return [data] - - cpu_group = None - if os.getenv("MDETR_CPU_REDUCE") == "1": - cpu_group = _get_global_gloo_group() - - buffer = io.BytesIO() - torch.save(data, buffer) - data_view = buffer.getbuffer() - device = "cuda" if cpu_group is None else "cpu" - tensor = torch.ByteTensor(data_view).to(device) - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long) - size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)] - if cpu_group is None: - dist.all_gather(size_list, local_size) - else: - print("gathering on cpu") - dist.all_gather(size_list, local_size, group=cpu_group) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - assert isinstance(local_size.item(), int) - local_size = int(local_size.item()) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device)) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device) - tensor = torch.cat((tensor, padding), dim=0) - if cpu_group is None: - dist.all_gather(tensor_list, tensor) - else: - dist.all_gather(tensor_list, tensor, group=cpu_group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] - buffer = io.BytesIO(tensor.cpu().numpy()) - obj = torch.load(buffer) - data_list.append(obj) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - """ - Returns: - True if distributed training is enabled - """ - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - """ - Returns: - The number of processes in the process group - """ - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - """ - Returns: - The rank of the current process within the global process group. - """ - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def get_local_rank() -> int: - """ - Returns: - The rank of the current process within the local (per-machine) process group. - """ - if not dist.is_available(): - return 0 - if not dist.is_initialized(): - return 0 - assert _LOCAL_PROCESS_GROUP is not None - return dist.get_rank(group=_LOCAL_PROCESS_GROUP) - - -def get_local_size() -> int: - """ - Returns: - The size of the per-machine process group, - i.e. the number of processes per machine. - """ - if not dist.is_available(): - return 1 - if not dist.is_initialized(): - return 1 - return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) - - -def is_main_process(): - """Return true if the current process is the main one""" - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - """Utility function to save only from the main process""" - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - """Initialize distributed training, if appropriate""" - if "RANK" in os.environ and "WORLD_SIZE" in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ["WORLD_SIZE"]) - args.gpu = int(os.environ["LOCAL_RANK"]) - elif "SLURM_PROCID" in os.environ: - args.rank = int(os.environ["SLURM_PROCID"]) - args.gpu = args.rank % torch.cuda.device_count() - else: - print("Not using distributed mode") - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) - - dist.init_process_group( - backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank - ) - dist.barrier() - setup_for_distributed(args.rank == 0) diff --git a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/text/thai.py b/spaces/Plachta/VITS-Umamusume-voice-synthesizer/text/thai.py deleted file mode 100644 index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/text/thai.py +++ /dev/null @@ -1,44 +0,0 @@ -import re -from num_thai.thainumbers import NumThai - - -num = NumThai() - -# List of (Latin alphabet, Thai) pairs: -_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'เอ'), - ('b','บี'), - ('c','ซี'), - ('d','ดี'), - ('e','อี'), - ('f','เอฟ'), - ('g','จี'), - ('h','เอช'), - ('i','ไอ'), - ('j','เจ'), - ('k','เค'), - ('l','แอล'), - ('m','เอ็ม'), - ('n','เอ็น'), - ('o','โอ'), - ('p','พี'), - ('q','คิว'), - ('r','แอร์'), - ('s','เอส'), - ('t','ที'), - ('u','ยู'), - ('v','วี'), - ('w','ดับเบิลยู'), - ('x','เอ็กซ์'), - ('y','วาย'), - ('z','ซี') -]] - - -def num_to_thai(text): - return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text) - -def latin_to_thai(text): - for regex, replacement in _latin_to_thai: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/RMXK/RVC_HFF/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/RMXK/RVC_HFF/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/abc.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/abc.py deleted file mode 100644 index e6e498efabfab0dcf31cd7731f8f821cc423bc4f..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/abc.py +++ /dev/null @@ -1,33 +0,0 @@ -from abc import ABC - - -class RichRenderable(ABC): - """An abstract base class for Rich renderables. - - Note that there is no need to extend this class, the intended use is to check if an - object supports the Rich renderable protocol. For example:: - - if isinstance(my_object, RichRenderable): - console.print(my_object) - - """ - - @classmethod - def __subclasshook__(cls, other: type) -> bool: - """Check if this class supports the rich render protocol.""" - return hasattr(other, "__rich_console__") or hasattr(other, "__rich__") - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.text import Text - - t = Text() - print(isinstance(Text, RichRenderable)) - print(isinstance(t, RichRenderable)) - - class Foo: - pass - - f = Foo() - print(isinstance(f, RichRenderable)) - print(isinstance("", RichRenderable)) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/spinner.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/spinner.py deleted file mode 100644 index 0879088e14c2af9224a6cde62d220539ba9c34e1..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/spinner.py +++ /dev/null @@ -1,136 +0,0 @@ -from typing import cast, List, Optional, TYPE_CHECKING, Union - -from ._spinners import SPINNERS -from .measure import Measurement -from .table import Table -from .text import Text - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderResult, RenderableType - from .style import StyleType - - -class Spinner: - def __init__( - self, - name: str, - text: "RenderableType" = "", - *, - style: Optional["StyleType"] = None, - speed: float = 1.0, - ) -> None: - """A spinner animation. - - Args: - name (str): Name of spinner (run python -m rich.spinner). - text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". - style (StyleType, optional): Style for spinner animation. Defaults to None. - speed (float, optional): Speed factor for animation. Defaults to 1.0. - - Raises: - KeyError: If name isn't one of the supported spinner animations. - """ - try: - spinner = SPINNERS[name] - except KeyError: - raise KeyError(f"no spinner called {name!r}") - self.text: "Union[RenderableType, Text]" = ( - Text.from_markup(text) if isinstance(text, str) else text - ) - self.frames = cast(List[str], spinner["frames"])[:] - self.interval = cast(float, spinner["interval"]) - self.start_time: Optional[float] = None - self.style = style - self.speed = speed - self.frame_no_offset: float = 0.0 - self._update_speed = 0.0 - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - yield self.render(console.get_time()) - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - text = self.render(0) - return Measurement.get(console, options, text) - - def render(self, time: float) -> "RenderableType": - """Render the spinner for a given time. - - Args: - time (float): Time in seconds. - - Returns: - RenderableType: A renderable containing animation frame. - """ - if self.start_time is None: - self.start_time = time - - frame_no = ((time - self.start_time) * self.speed) / ( - self.interval / 1000.0 - ) + self.frame_no_offset - frame = Text( - self.frames[int(frame_no) % len(self.frames)], style=self.style or "" - ) - - if self._update_speed: - self.frame_no_offset = frame_no - self.start_time = time - self.speed = self._update_speed - self._update_speed = 0.0 - - if not self.text: - return frame - elif isinstance(self.text, (str, Text)): - return Text.assemble(frame, " ", self.text) - else: - table = Table.grid(padding=1) - table.add_row(frame, self.text) - return table - - def update( - self, - *, - text: "RenderableType" = "", - style: Optional["StyleType"] = None, - speed: Optional[float] = None, - ) -> None: - """Updates attributes of a spinner after it has been started. - - Args: - text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". - style (StyleType, optional): Style for spinner animation. Defaults to None. - speed (float, optional): Speed factor for animation. Defaults to None. - """ - if text: - self.text = Text.from_markup(text) if isinstance(text, str) else text - if style: - self.style = style - if speed: - self._update_speed = speed - - -if __name__ == "__main__": # pragma: no cover - from time import sleep - - from .columns import Columns - from .panel import Panel - from .live import Live - - all_spinners = Columns( - [ - Spinner(spinner_name, text=Text(repr(spinner_name), style="green")) - for spinner_name in sorted(SPINNERS.keys()) - ], - column_first=True, - expand=True, - ) - - with Live( - Panel(all_spinners, title="Spinners", border_style="blue"), - refresh_per_second=20, - ) as live: - while True: - sleep(0.1) diff --git a/spaces/RaulS/D-Pose/README.md b/spaces/RaulS/D-Pose/README.md deleted file mode 100644 index 05b9be0a47017854fc1f3b8ae824ee83f3625c5f..0000000000000000000000000000000000000000 --- a/spaces/RaulS/D-Pose/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: D-Pose -emoji: 🦾 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.0.17 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/RedBaron5/PatentSolver/App/bin/FindTechnologies.py b/spaces/RedBaron5/PatentSolver/App/bin/FindTechnologies.py deleted file mode 100644 index da4afaab78de41bee535475cc4659b1a31a85f0c..0000000000000000000000000000000000000000 --- a/spaces/RedBaron5/PatentSolver/App/bin/FindTechnologies.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -* -import sys -import os -import math -import xlsxwriter -from textblob import TextBlob as tb - -class FindTechnologies(object): - - def __init__(self): - - print("Starting") - - def tf(word, blob): - return (float)(blob.noun_phrases.count(word)) / (float)(len(blob.noun_phrases)) - - - def n_containing(word, bloblist): - return sum(1 for blob in bloblist if word in blob.noun_phrases) - - - def idf(word, bloblist): - return math.log(len(bloblist) / (float)(1 + n_containing(word, bloblist))) - - - def tfidf(word, blob, bloblist): - return tf(word, blob) * idf(word, bloblist) - - - # Create an excel file for validation purpose - - def get_technologies(self): - folder_path = "C:/Users/asouili01/Documents/PatSemBeta-v3/Data/input/Gaggenau/" - stopwords = open('C:/Users/asouili01/Documents/PIXSEB/Ressources/stopwords.txt', 'r').read().split('\r\n') - bloblist = [] - - filenamelist = [] - - for path, dirs, files in os.walk(folder_path): - for filename in files: - print(filename) - filenamelist.append(filename) - name, extension = filename.split('.') - filepath = folder_path + "/" + filename - filehandler = open(filepath, "r",encoding="utf-8") - - content = filehandler.read() - filteredtext = [t for t in content if t.lower() not in stopwords] - filteredcontent = ''.join(filteredtext) - blob = 'blob_' + name.lower() - print (blob) - blob = tb(filteredcontent.lower()) - bloblist.append(blob) - - print(bloblist) - - for i, blob in enumerate(bloblist): - print("Top words in document {}".format(i + 1)) - scores = {word: tfidf(word, blob, bloblist) for word in blob.noun_phrases} - sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True) - for word, score in sorted_words[:5]: - print("\tWord: {}, TF-IDF: {}".format(word, round(score, 10))) - diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/grid_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/grid_head.py deleted file mode 100644 index 83058cbdda934ebfc3a76088e1820848ac01b78b..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/grid_head.py +++ /dev/null @@ -1,359 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, kaiming_init, normal_init - -from mmdet.models.builder import HEADS, build_loss - - -@HEADS.register_module() -class GridHead(nn.Module): - - def __init__(self, - grid_points=9, - num_convs=8, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - point_feat_channels=64, - deconv_kernel_size=4, - class_agnostic=False, - loss_grid=dict( - type='CrossEntropyLoss', use_sigmoid=True, - loss_weight=15), - conv_cfg=None, - norm_cfg=dict(type='GN', num_groups=36)): - super(GridHead, self).__init__() - self.grid_points = grid_points - self.num_convs = num_convs - self.roi_feat_size = roi_feat_size - self.in_channels = in_channels - self.conv_kernel_size = conv_kernel_size - self.point_feat_channels = point_feat_channels - self.conv_out_channels = self.point_feat_channels * self.grid_points - self.class_agnostic = class_agnostic - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': - assert self.conv_out_channels % norm_cfg['num_groups'] == 0 - - assert self.grid_points >= 4 - self.grid_size = int(np.sqrt(self.grid_points)) - if self.grid_size * self.grid_size != self.grid_points: - raise ValueError('grid_points must be a square number') - - # the predicted heatmap is half of whole_map_size - if not isinstance(self.roi_feat_size, int): - raise ValueError('Only square RoIs are supporeted in Grid R-CNN') - self.whole_map_size = self.roi_feat_size * 4 - - # compute point-wise sub-regions - self.sub_regions = self.calc_sub_regions() - - self.convs = [] - for i in range(self.num_convs): - in_channels = ( - self.in_channels if i == 0 else self.conv_out_channels) - stride = 2 if i == 0 else 1 - padding = (self.conv_kernel_size - 1) // 2 - self.convs.append( - ConvModule( - in_channels, - self.conv_out_channels, - self.conv_kernel_size, - stride=stride, - padding=padding, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=True)) - self.convs = nn.Sequential(*self.convs) - - self.deconv1 = nn.ConvTranspose2d( - self.conv_out_channels, - self.conv_out_channels, - kernel_size=deconv_kernel_size, - stride=2, - padding=(deconv_kernel_size - 2) // 2, - groups=grid_points) - self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) - self.deconv2 = nn.ConvTranspose2d( - self.conv_out_channels, - grid_points, - kernel_size=deconv_kernel_size, - stride=2, - padding=(deconv_kernel_size - 2) // 2, - groups=grid_points) - - # find the 4-neighbor of each grid point - self.neighbor_points = [] - grid_size = self.grid_size - for i in range(grid_size): # i-th column - for j in range(grid_size): # j-th row - neighbors = [] - if i > 0: # left: (i - 1, j) - neighbors.append((i - 1) * grid_size + j) - if j > 0: # up: (i, j - 1) - neighbors.append(i * grid_size + j - 1) - if j < grid_size - 1: # down: (i, j + 1) - neighbors.append(i * grid_size + j + 1) - if i < grid_size - 1: # right: (i + 1, j) - neighbors.append((i + 1) * grid_size + j) - self.neighbor_points.append(tuple(neighbors)) - # total edges in the grid - self.num_edges = sum([len(p) for p in self.neighbor_points]) - - self.forder_trans = nn.ModuleList() # first-order feature transition - self.sorder_trans = nn.ModuleList() # second-order feature transition - for neighbors in self.neighbor_points: - fo_trans = nn.ModuleList() - so_trans = nn.ModuleList() - for _ in range(len(neighbors)): - # each transition module consists of a 5x5 depth-wise conv and - # 1x1 conv. - fo_trans.append( - nn.Sequential( - nn.Conv2d( - self.point_feat_channels, - self.point_feat_channels, - 5, - stride=1, - padding=2, - groups=self.point_feat_channels), - nn.Conv2d(self.point_feat_channels, - self.point_feat_channels, 1))) - so_trans.append( - nn.Sequential( - nn.Conv2d( - self.point_feat_channels, - self.point_feat_channels, - 5, - 1, - 2, - groups=self.point_feat_channels), - nn.Conv2d(self.point_feat_channels, - self.point_feat_channels, 1))) - self.forder_trans.append(fo_trans) - self.sorder_trans.append(so_trans) - - self.loss_grid = build_loss(loss_grid) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): - # TODO: compare mode = "fan_in" or "fan_out" - kaiming_init(m) - for m in self.modules(): - if isinstance(m, nn.ConvTranspose2d): - normal_init(m, std=0.001) - nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01)) - - def forward(self, x): - assert x.shape[-1] == x.shape[-2] == self.roi_feat_size - # RoI feature transformation, downsample 2x - x = self.convs(x) - - c = self.point_feat_channels - # first-order fusion - x_fo = [None for _ in range(self.grid_points)] - for i, points in enumerate(self.neighbor_points): - x_fo[i] = x[:, i * c:(i + 1) * c] - for j, point_idx in enumerate(points): - x_fo[i] = x_fo[i] + self.forder_trans[i][j]( - x[:, point_idx * c:(point_idx + 1) * c]) - - # second-order fusion - x_so = [None for _ in range(self.grid_points)] - for i, points in enumerate(self.neighbor_points): - x_so[i] = x[:, i * c:(i + 1) * c] - for j, point_idx in enumerate(points): - x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) - - # predicted heatmap with fused features - x2 = torch.cat(x_so, dim=1) - x2 = self.deconv1(x2) - x2 = F.relu(self.norm1(x2), inplace=True) - heatmap = self.deconv2(x2) - - # predicted heatmap with original features (applicable during training) - if self.training: - x1 = x - x1 = self.deconv1(x1) - x1 = F.relu(self.norm1(x1), inplace=True) - heatmap_unfused = self.deconv2(x1) - else: - heatmap_unfused = heatmap - - return dict(fused=heatmap, unfused=heatmap_unfused) - - def calc_sub_regions(self): - """Compute point specific representation regions. - - See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. - """ - # to make it consistent with the original implementation, half_size - # is computed as 2 * quarter_size, which is smaller - half_size = self.whole_map_size // 4 * 2 - sub_regions = [] - for i in range(self.grid_points): - x_idx = i // self.grid_size - y_idx = i % self.grid_size - if x_idx == 0: - sub_x1 = 0 - elif x_idx == self.grid_size - 1: - sub_x1 = half_size - else: - ratio = x_idx / (self.grid_size - 1) - 0.25 - sub_x1 = max(int(ratio * self.whole_map_size), 0) - - if y_idx == 0: - sub_y1 = 0 - elif y_idx == self.grid_size - 1: - sub_y1 = half_size - else: - ratio = y_idx / (self.grid_size - 1) - 0.25 - sub_y1 = max(int(ratio * self.whole_map_size), 0) - sub_regions.append( - (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) - return sub_regions - - def get_targets(self, sampling_results, rcnn_train_cfg): - # mix all samples (across images) together. - pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], - dim=0).cpu() - pos_gt_bboxes = torch.cat( - [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() - assert pos_bboxes.shape == pos_gt_bboxes.shape - - # expand pos_bboxes to 2x of original size - x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 - y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 - x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 - y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 - pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) - pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) - - num_rois = pos_bboxes.shape[0] - map_size = self.whole_map_size - # this is not the final target shape - targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), - dtype=torch.float) - - # pre-compute interpolation factors for all grid points. - # the first item is the factor of x-dim, and the second is y-dim. - # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) - factors = [] - for j in range(self.grid_points): - x_idx = j // self.grid_size - y_idx = j % self.grid_size - factors.append((1 - x_idx / (self.grid_size - 1), - 1 - y_idx / (self.grid_size - 1))) - - radius = rcnn_train_cfg.pos_radius - radius2 = radius**2 - for i in range(num_rois): - # ignore small bboxes - if (pos_bbox_ws[i] <= self.grid_size - or pos_bbox_hs[i] <= self.grid_size): - continue - # for each grid point, mark a small circle as positive - for j in range(self.grid_points): - factor_x, factor_y = factors[j] - gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( - 1 - factor_x) * pos_gt_bboxes[i, 2] - gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( - 1 - factor_y) * pos_gt_bboxes[i, 3] - - cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * - map_size) - cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * - map_size) - - for x in range(cx - radius, cx + radius + 1): - for y in range(cy - radius, cy + radius + 1): - if x >= 0 and x < map_size and y >= 0 and y < map_size: - if (x - cx)**2 + (y - cy)**2 <= radius2: - targets[i, j, y, x] = 1 - # reduce the target heatmap size by a half - # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). - sub_targets = [] - for i in range(self.grid_points): - sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] - sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) - sub_targets = torch.cat(sub_targets, dim=1) - sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) - return sub_targets - - def loss(self, grid_pred, grid_targets): - loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) - loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) - loss_grid = loss_fused + loss_unfused - return dict(loss_grid=loss_grid) - - def get_bboxes(self, det_bboxes, grid_pred, img_metas): - # TODO: refactoring - assert det_bboxes.shape[0] == grid_pred.shape[0] - det_bboxes = det_bboxes.cpu() - cls_scores = det_bboxes[:, [4]] - det_bboxes = det_bboxes[:, :4] - grid_pred = grid_pred.sigmoid().cpu() - - R, c, h, w = grid_pred.shape - half_size = self.whole_map_size // 4 * 2 - assert h == w == half_size - assert c == self.grid_points - - # find the point with max scores in the half-sized heatmap - grid_pred = grid_pred.view(R * c, h * w) - pred_scores, pred_position = grid_pred.max(dim=1) - xs = pred_position % w - ys = pred_position // w - - # get the position in the whole heatmap instead of half-sized heatmap - for i in range(self.grid_points): - xs[i::self.grid_points] += self.sub_regions[i][0] - ys[i::self.grid_points] += self.sub_regions[i][1] - - # reshape to (num_rois, grid_points) - pred_scores, xs, ys = tuple( - map(lambda x: x.view(R, c), [pred_scores, xs, ys])) - - # get expanded pos_bboxes - widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) - heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) - x1 = (det_bboxes[:, 0, None] - widths / 2) - y1 = (det_bboxes[:, 1, None] - heights / 2) - # map the grid point to the absolute coordinates - abs_xs = (xs.float() + 0.5) / w * widths + x1 - abs_ys = (ys.float() + 0.5) / h * heights + y1 - - # get the grid points indices that fall on the bbox boundaries - x1_inds = [i for i in range(self.grid_size)] - y1_inds = [i * self.grid_size for i in range(self.grid_size)] - x2_inds = [ - self.grid_points - self.grid_size + i - for i in range(self.grid_size) - ] - y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] - - # voting of all grid points on some boundary - bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) - bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) - bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) - bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) - - bbox_res = torch.cat( - [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) - bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1]) - bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0]) - - return bbox_res diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py deleted file mode 100644 index 88bb10d44026ba9f21756eaea9e550841cd59b9f..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -import torch.nn.functional as F - -from ..builder import PIXEL_SAMPLERS -from .base_pixel_sampler import BasePixelSampler - - -@PIXEL_SAMPLERS.register_module() -class OHEMPixelSampler(BasePixelSampler): - """Online Hard Example Mining Sampler for segmentation. - - Args: - context (nn.Module): The context of sampler, subclass of - :obj:`BaseDecodeHead`. - thresh (float, optional): The threshold for hard example selection. - Below which, are prediction with low confidence. If not - specified, the hard examples will be pixels of top ``min_kept`` - loss. Default: None. - min_kept (int, optional): The minimum number of predictions to keep. - Default: 100000. - """ - - def __init__(self, context, thresh=None, min_kept=100000): - super(OHEMPixelSampler, self).__init__() - self.context = context - assert min_kept > 1 - self.thresh = thresh - self.min_kept = min_kept - - def sample(self, seg_logit, seg_label): - """Sample pixels that have high loss or with low prediction confidence. - - Args: - seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) - seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) - - Returns: - torch.Tensor: segmentation weight, shape (N, H, W) - """ - with torch.no_grad(): - assert seg_logit.shape[2:] == seg_label.shape[2:] - assert seg_label.shape[1] == 1 - seg_label = seg_label.squeeze(1).long() - batch_kept = self.min_kept * seg_label.size(0) - valid_mask = seg_label != self.context.ignore_index - seg_weight = seg_logit.new_zeros(size=seg_label.size()) - valid_seg_weight = seg_weight[valid_mask] - if self.thresh is not None: - seg_prob = F.softmax(seg_logit, dim=1) - - tmp_seg_label = seg_label.clone().unsqueeze(1) - tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 - seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) - sort_prob, sort_indices = seg_prob[valid_mask].sort() - - if sort_prob.numel() > 0: - min_threshold = sort_prob[min(batch_kept, - sort_prob.numel() - 1)] - else: - min_threshold = 0.0 - threshold = max(min_threshold, self.thresh) - valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. - else: - losses = self.context.loss_decode( - seg_logit, - seg_label, - weight=None, - ignore_index=self.context.ignore_index, - reduction_override='none') - # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa - _, sort_indices = losses[valid_mask].sort(descending=True) - valid_seg_weight[sort_indices[:batch_kept]] = 1. - - seg_weight[valid_mask] = valid_seg_weight - - return seg_weight diff --git a/spaces/Rohit001/emotion_detection/app.py b/spaces/Rohit001/emotion_detection/app.py deleted file mode 100644 index 00a5d6ac67d0c53daa441f39789d86ef269b5a93..0000000000000000000000000000000000000000 --- a/spaces/Rohit001/emotion_detection/app.py +++ /dev/null @@ -1,75 +0,0 @@ -from flask import Flask,render_template -from flask_socketio import SocketIO,emit -import base64 -import numpy as np -import cv2 -import time -from deepface import DeepFace - - -app = Flask(__name__) -app.config['SECRET_KEY'] = 'secret!' -socket = SocketIO(app,async_mode="eventlet") - - -def base64_to_image(base64_string): - # Extract the base64 encoded binary data from the input string - base64_data = base64_string.split(",")[1] - # Decode the base64 data to bytes - image_bytes = base64.b64decode(base64_data) - # Convert the bytes to numpy array - image_array = np.frombuffer(image_bytes, dtype=np.uint8) - # Decode the numpy array as an image using OpenCV - image = cv2.imdecode(image_array, cv2.IMREAD_COLOR) - return image - -def music_link(emo): - if emo == "fear": - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/54i4ygGRUiT04Ro6ZcsXqo?utm_source=generator&theme=0" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - elif emo == "angry": - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/0dsl5hbFdVT7scb7Vakkwa?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - elif emo == 'neutral': - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/37i9dQZEVXbLZ52XmnySJg?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - elif emo =='sad': - res= '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/37i9dQZF1EVKuMoAJjoTIw?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - elif emo == 'disgust': - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/3a8ssl2IKbhSmEzzIPYvbC?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - elif emo == 'happy': - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/37i9dQZF1EVJSvZp5AOML2?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - elif emo == 'surprise': - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/37i9dQZF1DX5cZuAHLNjGz?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - else: - res = '<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/37i9dQZEVXbMDoHDwVN2tF?utm_source=generator" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>' - - return res - - - - -@socket.on("connect") -def test_connect(): - print("Connected") - emit("my response", {"data": "Connected"}) - -@socket.on("image") -def receive_image(image): - # Decode the base64-encoded image data - image = base64_to_image(image) - image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA) - # emit("processed_image", image) - # Predicts the model - cv2.imwrite("./res.jpg",image) - objs = DeepFace.analyze(img_path = "./res.jpg", - actions = ['emotion']) - time.sleep(3) - emo = objs[0]['dominant_emotion'] - res = music_link(emo) - emit("result",{"emo":str(emo),"res":res}) - -@app.route("/") -def home(): - return render_template("index.html") - -if __name__ == '__main__': - # app.run(debug=True) - socket.run(app, debug=True,port=7860,host="0.0.0.0") diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/perceptual_path_length.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/perceptual_path_length.py deleted file mode 100644 index d070f45a04efed7e9492fddb85078be306753282..0000000000000000000000000000000000000000 --- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/perceptual_path_length.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator -Architecture for Generative Adversarial Networks". Matches the original -implementation by Karras et al. at -https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py""" - -import copy -import numpy as np -import torch -import dnnlib -from . import metric_utils - -#---------------------------------------------------------------------------- - -# Spherical interpolation of a batch of vectors. -def slerp(a, b, t): - a = a / a.norm(dim=-1, keepdim=True) - b = b / b.norm(dim=-1, keepdim=True) - d = (a * b).sum(dim=-1, keepdim=True) - p = t * torch.acos(d) - c = b - d * a - c = c / c.norm(dim=-1, keepdim=True) - d = a * torch.cos(p) + c * torch.sin(p) - d = d / d.norm(dim=-1, keepdim=True) - return d - -#---------------------------------------------------------------------------- - -class PPLSampler(torch.nn.Module): - def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16): - assert space in ['z', 'w'] - assert sampling in ['full', 'end'] - super().__init__() - self.G = copy.deepcopy(G) - self.G_kwargs = G_kwargs - self.epsilon = epsilon - self.space = space - self.sampling = sampling - self.crop = crop - self.vgg16 = copy.deepcopy(vgg16) - - def forward(self, c): - # Generate random latents and interpolation t-values. - t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0) - z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2) - - # Interpolate in W or Z. - if self.space == 'w': - w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2) - wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2)) - wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon) - else: # space == 'z' - zt0 = slerp(z0, z1, t.unsqueeze(1)) - zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon) - wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2) - - # Randomize noise buffers. - for name, buf in self.G.named_buffers(): - if name.endswith('.noise_const'): - buf.copy_(torch.randn_like(buf)) - - # Generate images. - img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs) - - # Center crop. - if self.crop: - assert img.shape[2] == img.shape[3] - c = img.shape[2] // 8 - img = img[:, :, c*3 : c*7, c*2 : c*6] - - # Downsample to 256x256. - factor = self.G.img_resolution // 256 - if factor > 1: - img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5]) - - # Scale dynamic range from [-1,1] to [0,255]. - img = (img + 1) * (255 / 2) - if self.G.img_channels == 1: - img = img.repeat([1, 3, 1, 1]) - - # Evaluate differential LPIPS. - lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2) - dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2 - return dist - -#---------------------------------------------------------------------------- - -def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False): - dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) - vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' - vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose) - - # Setup sampler. - sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16) - sampler.eval().requires_grad_(False).to(opts.device) - if jit: - c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device) - sampler = torch.jit.trace(sampler, [c], check_trace=False) - - # Sampling loop. - dist = [] - progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples) - for batch_start in range(0, num_samples, batch_size * opts.num_gpus): - progress.update(batch_start) - c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)] - c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device) - x = sampler(c) - for src in range(opts.num_gpus): - y = x.clone() - if opts.num_gpus > 1: - torch.distributed.broadcast(y, src=src) - dist.append(y) - progress.update(num_samples) - - # Compute PPL. - if opts.rank != 0: - return float('nan') - dist = torch.cat(dist)[:num_samples].cpu().numpy() - lo = np.percentile(dist, 1, interpolation='lower') - hi = np.percentile(dist, 99, interpolation='higher') - ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean() - return float(ppl) - -#---------------------------------------------------------------------------- diff --git a/spaces/SIGGRAPH2022/sketch2pose/src/spin/hmr.py b/spaces/SIGGRAPH2022/sketch2pose/src/spin/hmr.py deleted file mode 100644 index 851352bb1516a9fae9180370795e05f89be0d467..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/sketch2pose/src/spin/hmr.py +++ /dev/null @@ -1,196 +0,0 @@ -import math - -import numpy as np -import torch -import torch.nn as nn -import torchvision.models.resnet as resnet - - -def rot6d_to_rotmat(x): - """Convert 6D rotation representation to 3x3 rotation matrix. - Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 - Input: - (B,6) Batch of 6-D rotation representations - Output: - (B,3,3) Batch of corresponding rotation matrices - """ - - x = x.view(-1, 3, 2) - a1 = x[:, :, 0] - a2 = x[:, :, 1] - b1 = nn.functional.normalize(a1) - b2 = nn.functional.normalize( - a2 - torch.einsum("bi,bi->b", b1, a2).unsqueeze(-1) * b1 - ) - - b3 = torch.cross(b1, b2) - - return torch.stack((b1, b2, b3), dim=-1) - - -class Bottleneck(nn.Module): - """Redefinition of Bottleneck residual block - Adapted from the official PyTorch implementation - """ - - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, planes, kernel_size=3, stride=stride, padding=1, bias=False - ) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class HMR(nn.Module): - """SMPL Iterative Regressor with ResNet50 backbone""" - - def __init__(self, block, layers, smpl_mean_params): - self.inplanes = 64 - super(HMR, self).__init__() - self.n_shape = 10 - self.n_cam = 3 - self.n_joints = 24 - npose = self.n_joints * 6 - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.avgpool = nn.AvgPool2d(7, stride=1) - self.fc1 = nn.Linear(512 * block.expansion + npose + self.n_shape + self.n_cam, 1024) - self.drop1 = nn.Dropout() - self.fc2 = nn.Linear(1024, 1024) - self.drop2 = nn.Dropout() - self.decpose = nn.Linear(1024, npose) - self.decshape = nn.Linear(1024, self.n_shape) - self.deccam = nn.Linear(1024, self.n_cam) - nn.init.xavier_uniform_(self.decpose.weight, gain=0.01) - nn.init.xavier_uniform_(self.decshape.weight, gain=0.01) - nn.init.xavier_uniform_(self.deccam.weight, gain=0.01) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2.0 / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - mean_params = np.load(smpl_mean_params) - init_pose = torch.from_numpy(mean_params["pose"][:]).unsqueeze(0) - init_shape = torch.from_numpy( - mean_params["shape"][:].astype("float32") - ).unsqueeze(0) - init_cam = torch.from_numpy(mean_params["cam"]).unsqueeze(0) - self.register_buffer("init_pose", init_pose) - self.register_buffer("init_shape", init_shape) - self.register_buffer("init_cam", init_cam) - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False, - ), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3): - - batch_size = x.shape[0] - - if init_pose is None: - init_pose = self.init_pose.expand(batch_size, -1) - if init_shape is None: - init_shape = self.init_shape.expand(batch_size, -1) - if init_cam is None: - init_cam = self.init_cam.expand(batch_size, -1) - - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x1 = self.layer1(x) - x2 = self.layer2(x1) - x3 = self.layer3(x2) - x4 = self.layer4(x3) - - xf = self.avgpool(x4) - xf = xf.view(xf.size(0), -1) - - pred_pose = init_pose - pred_shape = init_shape - pred_cam = init_cam - for _ in range(n_iter): - xc = torch.cat([xf, pred_pose, pred_shape, pred_cam], 1) - xc = self.fc1(xc) - xc = self.drop1(xc) - xc = self.fc2(xc) - xc = self.drop2(xc) - pred_pose = self.decpose(xc) + pred_pose - pred_shape = self.decshape(xc) + pred_shape - pred_cam = self.deccam(xc) + pred_cam - - pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, self.n_joints, 3, 3) - - return pred_rotmat, pred_shape, pred_cam - - -def hmr(smpl_mean_params, pretrained=True, **kwargs): - """Constructs an HMR model with ResNet50 backbone. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs) - if pretrained: - resnet_imagenet = resnet.resnet50(pretrained=True) - model.load_state_dict(resnet_imagenet.state_dict(), strict=False) - return model diff --git a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py b/spaces/SQSora/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py deleted file mode 100644 index 258b618cd338322365dfa25bec468a0a3f70ccd1..0000000000000000000000000000000000000000 --- a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import IPython.display as ipd -import torch -import commons -import utils -import ONNXVITS_infer -from text import text_to_sequence - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - -hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json") - -net_g = ONNXVITS_infer.SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) -_ = net_g.eval() - -_ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g) - -text1 = get_text("おはようございます。", hps) -stn_tst = text1 -with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - sid = torch.LongTensor([0]) - audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy() -print(audio) \ No newline at end of file diff --git a/spaces/SUSSYMANBI/nerijs-pixel-art-xl-sdxl/README.md b/spaces/SUSSYMANBI/nerijs-pixel-art-xl-sdxl/README.md deleted file mode 100644 index 95f13ae8993a89a695069eb63d619b64f9f441fe..0000000000000000000000000000000000000000 --- a/spaces/SUSSYMANBI/nerijs-pixel-art-xl-sdxl/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Nerijs Pixel Art Xl Sdxl -emoji: 📚 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Silentlin/DiffSinger/tasks/base_task.py b/spaces/Silentlin/DiffSinger/tasks/base_task.py deleted file mode 100644 index b74d25c85ce8a86865c5d5a09f3f92579ffb2074..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/tasks/base_task.py +++ /dev/null @@ -1,360 +0,0 @@ -import glob -import re -import subprocess -from datetime import datetime - -import matplotlib - -matplotlib.use('Agg') - -from utils.hparams import hparams, set_hparams -import random -import sys -import numpy as np -import torch.distributed as dist -from pytorch_lightning.loggers import TensorBoardLogger -from utils.pl_utils import LatestModelCheckpoint, BaseTrainer, data_loader, DDP -from torch import nn -import torch.utils.data -import utils -import logging -import os - -torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) - -log_format = '%(asctime)s %(message)s' -logging.basicConfig(stream=sys.stdout, level=logging.INFO, - format=log_format, datefmt='%m/%d %I:%M:%S %p') - - -class BaseDataset(torch.utils.data.Dataset): - def __init__(self, shuffle): - super().__init__() - self.hparams = hparams - self.shuffle = shuffle - self.sort_by_len = hparams['sort_by_len'] - self.sizes = None - - @property - def _sizes(self): - return self.sizes - - def __getitem__(self, index): - raise NotImplementedError - - def collater(self, samples): - raise NotImplementedError - - def __len__(self): - return len(self._sizes) - - def num_tokens(self, index): - return self.size(index) - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - size = min(self._sizes[index], hparams['max_frames']) - return size - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)) - if self.sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - # 先random, 然后稳定排序, 保证排序后同长度的数据顺序是依照random permutation的 (被其随机打乱). - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) - - -class BaseTask(nn.Module): - def __init__(self, *args, **kwargs): - # dataset configs - super(BaseTask, self).__init__(*args, **kwargs) - self.current_epoch = 0 - self.global_step = 0 - self.loaded_optimizer_states_dict = {} - self.trainer = None - self.logger = None - self.on_gpu = False - self.use_dp = False - self.use_ddp = False - self.example_input_array = None - - self.max_tokens = hparams['max_tokens'] - self.max_sentences = hparams['max_sentences'] - self.max_eval_tokens = hparams['max_eval_tokens'] - if self.max_eval_tokens == -1: - hparams['max_eval_tokens'] = self.max_eval_tokens = self.max_tokens - self.max_eval_sentences = hparams['max_eval_sentences'] - if self.max_eval_sentences == -1: - hparams['max_eval_sentences'] = self.max_eval_sentences = self.max_sentences - - self.model = None - self.training_losses_meter = None - - ########### - # Training, validation and testing - ########### - def build_model(self): - raise NotImplementedError - - def load_ckpt(self, ckpt_base_dir, current_model_name=None, model_name='model', force=True, strict=True): - # This function is updated on 2021.12.13 - if current_model_name is None: - current_model_name = model_name - utils.load_ckpt(self.__getattr__(current_model_name), ckpt_base_dir, current_model_name, force, strict) - - def on_epoch_start(self): - self.training_losses_meter = {'total_loss': utils.AvgrageMeter()} - - def _training_step(self, sample, batch_idx, optimizer_idx): - """ - - :param sample: - :param batch_idx: - :return: total loss: torch.Tensor, loss_log: dict - """ - raise NotImplementedError - - def training_step(self, sample, batch_idx, optimizer_idx=-1): - loss_ret = self._training_step(sample, batch_idx, optimizer_idx) - self.opt_idx = optimizer_idx - if loss_ret is None: - return {'loss': None} - total_loss, log_outputs = loss_ret - log_outputs = utils.tensors_to_scalars(log_outputs) - for k, v in log_outputs.items(): - if k not in self.training_losses_meter: - self.training_losses_meter[k] = utils.AvgrageMeter() - if not np.isnan(v): - self.training_losses_meter[k].update(v) - self.training_losses_meter['total_loss'].update(total_loss.item()) - - try: - log_outputs['lr'] = self.scheduler.get_lr() - if isinstance(log_outputs['lr'], list): - log_outputs['lr'] = log_outputs['lr'][0] - except: - pass - - # log_outputs['all_loss'] = total_loss.item() - progress_bar_log = log_outputs - tb_log = {f'tr/{k}': v for k, v in log_outputs.items()} - return { - 'loss': total_loss, - 'progress_bar': progress_bar_log, - 'log': tb_log - } - - def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx): - optimizer.step() - optimizer.zero_grad() - if self.scheduler is not None: - self.scheduler.step(self.global_step // hparams['accumulate_grad_batches']) - - def on_epoch_end(self): - loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()} - print(f"\n==============\n " - f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}" - f"\n==============\n") - - def validation_step(self, sample, batch_idx): - """ - - :param sample: - :param batch_idx: - :return: output: dict - """ - raise NotImplementedError - - def _validation_end(self, outputs): - """ - - :param outputs: - :return: loss_output: dict - """ - raise NotImplementedError - - def validation_end(self, outputs): - loss_output = self._validation_end(outputs) - print(f"\n==============\n " - f"valid results: {loss_output}" - f"\n==============\n") - return { - 'log': {f'val/{k}': v for k, v in loss_output.items()}, - 'val_loss': loss_output['total_loss'] - } - - def build_scheduler(self, optimizer): - raise NotImplementedError - - def build_optimizer(self, model): - raise NotImplementedError - - def configure_optimizers(self): - optm = self.build_optimizer(self.model) - self.scheduler = self.build_scheduler(optm) - return [optm] - - def test_start(self): - pass - - def test_step(self, sample, batch_idx): - return self.validation_step(sample, batch_idx) - - def test_end(self, outputs): - return self.validation_end(outputs) - - ########### - # Running configuration - ########### - - @classmethod - def start(cls): - set_hparams() - os.environ['MASTER_PORT'] = str(random.randint(15000, 30000)) - random.seed(hparams['seed']) - np.random.seed(hparams['seed']) - task = cls() - work_dir = hparams['work_dir'] - trainer = BaseTrainer(checkpoint_callback=LatestModelCheckpoint( - filepath=work_dir, - verbose=True, - monitor='val_loss', - mode='min', - num_ckpt_keep=hparams['num_ckpt_keep'], - save_best=hparams['save_best'], - period=1 if hparams['save_ckpt'] else 100000 - ), - logger=TensorBoardLogger( - save_dir=work_dir, - name='lightning_logs', - version='lastest' - ), - gradient_clip_val=hparams['clip_grad_norm'], - val_check_interval=hparams['val_check_interval'], - row_log_interval=hparams['log_interval'], - max_updates=hparams['max_updates'], - num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams[ - 'validate'] else 10000, - accumulate_grad_batches=hparams['accumulate_grad_batches']) - if not hparams['infer']: # train - t = datetime.now().strftime('%Y%m%d%H%M%S') - code_dir = f'{work_dir}/codes/{t}' - subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True) - for c in hparams['save_codes']: - subprocess.check_call(f'cp -r "{c}" "{code_dir}/"', shell=True) - print(f"| Copied codes to {code_dir}.") - trainer.checkpoint_callback.task = task - trainer.fit(task) - else: - trainer.test(task) - - def configure_ddp(self, model, device_ids): - model = DDP( - model, - device_ids=device_ids, - find_unused_parameters=True - ) - if dist.get_rank() != 0 and not hparams['debug']: - sys.stdout = open(os.devnull, "w") - sys.stderr = open(os.devnull, "w") - random.seed(hparams['seed']) - np.random.seed(hparams['seed']) - return model - - def training_end(self, *args, **kwargs): - return None - - def init_ddp_connection(self, proc_rank, world_size): - set_hparams(print_hparams=False) - # guarantees unique ports across jobs from same grid search - default_port = 12910 - # if user gave a port number, use that one instead - try: - default_port = os.environ['MASTER_PORT'] - except Exception: - os.environ['MASTER_PORT'] = str(default_port) - - # figure out the root node addr - root_node = '127.0.0.2' - root_node = self.trainer.resolve_root_node_address(root_node) - os.environ['MASTER_ADDR'] = root_node - dist.init_process_group('nccl', rank=proc_rank, world_size=world_size) - - @data_loader - def train_dataloader(self): - return None - - @data_loader - def test_dataloader(self): - return None - - @data_loader - def val_dataloader(self): - return None - - def on_load_checkpoint(self, checkpoint): - pass - - def on_save_checkpoint(self, checkpoint): - pass - - def on_sanity_check_start(self): - pass - - def on_train_start(self): - pass - - def on_train_end(self): - pass - - def on_batch_start(self, batch): - pass - - def on_batch_end(self): - pass - - def on_pre_performance_check(self): - pass - - def on_post_performance_check(self): - pass - - def on_before_zero_grad(self, optimizer): - pass - - def on_after_backward(self): - pass - - def backward(self, loss, optimizer): - loss.backward() - - def grad_norm(self, norm_type): - results = {} - total_norm = 0 - for name, p in self.named_parameters(): - if p.requires_grad: - try: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm ** norm_type - norm = param_norm ** (1 / norm_type) - - grad = round(norm.data.cpu().numpy().flatten()[0], 3) - results['grad_{}_norm_{}'.format(norm_type, name)] = grad - except Exception: - # this param had no grad - pass - - total_norm = total_norm ** (1. / norm_type) - grad = round(total_norm.data.cpu().numpy().flatten()[0], 3) - results['grad_{}_norm_total'.format(norm_type)] = grad - return results diff --git a/spaces/Smotto/Vocal-Isolator/src/models/MDX_net/kimvocal.py b/spaces/Smotto/Vocal-Isolator/src/models/MDX_net/kimvocal.py deleted file mode 100644 index 5880a0065e0ad2345a2156876ef0ab975980645b..0000000000000000000000000000000000000000 --- a/spaces/Smotto/Vocal-Isolator/src/models/MDX_net/kimvocal.py +++ /dev/null @@ -1,73 +0,0 @@ -# Standard Library Imports - -# Third Party Imports -import torch -import onnxruntime as ort - -# Local Imports -from src.models.MDX_net.mdx_net import Conv_TDF_net_trimm -from src.loader import Loader - -# Global Variables -from src.constants import EXECUTION_PROVIDER_LIST, COMPUTATION_DEVICE, ONNX_MODEL_PATH - - -class KimVocal: - """ - TODO: Put something here for flexibility purposes (model types). - """ - - def __init__(self): - pass - - def demix_vocals(self, music_tensor, sample_rate, model, streamlit_progressbar): - """ - Removing vocals using a ONNX model. - - Args: - music_tensor (torch.Tensor): Input tensor. - model (torch.nn): Model used for inferring. - - Returns: - torch.Tensor: Output tensor after passing through the network. - """ - number_of_samples = music_tensor.shape[1] - overlap = model.overlap - # Calculate chunk_size and gen_size based on the sample rate - chunk_size = model.chunk_size - gen_size = chunk_size - 2 * overlap - pad_size = gen_size - number_of_samples % gen_size - mix_padded = torch.cat( - [torch.zeros(2, overlap), music_tensor, torch.zeros(2, pad_size + overlap)], - 1, - ) - - # Start running the session for the model - ort_session = ort.InferenceSession(ONNX_MODEL_PATH, providers=EXECUTION_PROVIDER_LIST) - - # TODO: any way to optimize against silence? I think that's what skips are for, gotta double check. - # process one chunk at a time (batch_size=1) - demixed_chunks = [] - i = 0 - while i < number_of_samples + pad_size: - # Progress Bar - streamlit_progressbar.progress(i / (number_of_samples + pad_size)) - - # Computation - chunk = mix_padded[:, i : i + chunk_size] - x = model.stft(chunk.unsqueeze(0).to(COMPUTATION_DEVICE)) - with torch.no_grad(): - x = torch.tensor(ort_session.run(None, {"input": x.cpu().numpy()})[0]) - x = model.stft.inverse(x).squeeze(0) - x = x[..., overlap:-overlap] - demixed_chunks.append(x) - i += gen_size - - vocals_output = torch.cat(demixed_chunks, -1)[..., :-pad_size].cpu() - - return vocals_output - - -if __name__ == "__main__": - kimvocal = KimVocal() - kimvocal.main() diff --git a/spaces/Sowmyashetty/MyAichatbot/app.py b/spaces/Sowmyashetty/MyAichatbot/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/Sowmyashetty/MyAichatbot/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/SrikanthPhalgun/Cifar10_ERAV1_GradCam_Demo/Cifar_datamodule.py b/spaces/SrikanthPhalgun/Cifar10_ERAV1_GradCam_Demo/Cifar_datamodule.py deleted file mode 100644 index 121011bcfda3f0f54a6206adc06147725542f54a..0000000000000000000000000000000000000000 --- a/spaces/SrikanthPhalgun/Cifar10_ERAV1_GradCam_Demo/Cifar_datamodule.py +++ /dev/null @@ -1,64 +0,0 @@ -import albumentations as A -from pytorch_lightning import LightningDataModule -from torch.utils.data import DataLoader, Dataset -from torchvision import datasets -from transformation import * - - - - -class CIFAR10DataModule(LightningDataModule): - def __init__( - self, - data_dir: str = "./data", - batch_size: int = 512, - num_workers: int = 0, - pin_memory: bool = False, - train_augments: A.Compose | None = None, - ): - super().__init__() - - # this line allows to access init params with 'self.hparams' attribute - # also ensures init params will be stored in ckpt - self.save_hyperparameters(logger=False) - - self.train_transforms = ( - image_transform("train") if train_augments is None else train_augments - ) - self.val_transforms = image_transform("val") - - self.data_train: Optional[Dataset] = None - self.data_val: Optional[Dataset] = None - - @property - def num_classes(self): - return 10 - - def prepare_data(self): - # download - datasets.CIFAR10(self.hparams.data_dir, train=True, download=True) - datasets.CIFAR10(self.hparams.data_dir, train=False, download=True) - - def setup(self, stage=None) -> None: - # load only if not loaded already - if not self.data_train and not self.data_val: - self.data_train = datasets.CIFAR10(self.hparams.data_dir, train=True) - self.data_val = datasets.CIFAR10(self.hparams.data_dir, train=False) - - def train_dataloader(self): - return DataLoader( - dataset=CIFAR10(self.data_train, self.train_transforms), - batch_size=self.hparams.batch_size, - num_workers=self.hparams.num_workers, - pin_memory=self.hparams.pin_memory, - shuffle=True, - ) - - def val_dataloader(self): - return DataLoader( - dataset=CIFAR10(self.data_val, self.val_transforms), - batch_size=self.hparams.batch_size, - num_workers=self.hparams.num_workers, - pin_memory=self.hparams.pin_memory, - shuffle=False, - ) \ No newline at end of file diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/compression/encodec_musicgen_32khz.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/compression/encodec_musicgen_32khz.py deleted file mode 100644 index 9da31daa5f009f46e753601a51a06391594b8f9b..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/compression/encodec_musicgen_32khz.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid shows how to train a MusicGen EnCodec model at 32 kHz. -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=8, partition=partitions) - # use configuration for MusicGen's EnCodec model trained on monophonic audio sampled at 32 kHz - # MusicGen's EnCodec is trained with a total stride of 640 leading to a frame rate of 50 hz - launcher.bind_(solver='compression/encodec_musicgen_32khz') - # replace this by the desired music dataset - launcher.bind_(dset='internal/music_400k_32khz') - # launch xp - launcher() - launcher({ - 'metrics.visqol.bin': '/data/home/jadecopet/local/usr/opt/visqol', - 'label': 'visqol', - 'evaluate.metrics.visqol': True - }) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/usage.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/usage.py deleted file mode 100644 index 53219bceb2562805537fe37632e19be6fb56760e..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/usage.py +++ /dev/null @@ -1,341 +0,0 @@ -# -*- coding: utf-8 -*- -"""Usage information for the main IPython applications. -""" -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu> -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -import sys -from IPython.core import release - -cl_usage = """\ -========= - IPython -========= - -Tools for Interactive Computing in Python -========================================= - - A Python shell with automatic history (input and output), dynamic object - introspection, easier configuration, command completion, access to the - system shell and more. IPython can also be embedded in running programs. - - -Usage - - ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ... - - If invoked with no options, it executes the file and exits, passing the - remaining arguments to the script, just as if you had specified the same - command with python. You may need to specify `--` before args to be passed - to the script, to prevent IPython from attempting to parse them. If you - specify the option `-i` before the filename, it will enter an interactive - IPython session after running the script, rather than exiting. Files ending - in .py will be treated as normal Python, but files ending in .ipy can - contain special IPython syntax (magic commands, shell expansions, etc.). - - Almost all configuration in IPython is available via the command-line. Do - `ipython --help-all` to see all available options. For persistent - configuration, look into your `ipython_config.py` configuration file for - details. - - This file is typically installed in the `IPYTHONDIR` directory, and there - is a separate configuration directory for each profile. The default profile - directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR - defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to - C:\\Users\\YourUserName in most instances. - - To initialize a profile with the default configuration file, do:: - - $> ipython profile create - - and start editing `IPYTHONDIR/profile_default/ipython_config.py` - - In IPython's documentation, we will refer to this directory as - `IPYTHONDIR`, you can change its default location by creating an - environment variable with this name and setting it to the desired path. - - For more information, see the manual available in HTML and PDF in your - installation, or online at https://ipython.org/documentation.html. -""" - -interactive_usage = """ -IPython -- An enhanced Interactive Python -========================================= - -IPython offers a fully compatible replacement for the standard Python -interpreter, with convenient shell features, special commands, command -history mechanism and output results caching. - -At your system command line, type 'ipython -h' to see the command line -options available. This document only describes interactive features. - -GETTING HELP ------------- - -Within IPython you have various way to access help: - - ? -> Introduction and overview of IPython's features (this screen). - object? -> Details about 'object'. - object?? -> More detailed, verbose information about 'object'. - %quickref -> Quick reference of all IPython specific syntax and magics. - help -> Access Python's own help system. - -If you are in terminal IPython you can quit this screen by pressing `q`. - - -MAIN FEATURES -------------- - -* Access to the standard Python help with object docstrings and the Python - manuals. Simply type 'help' (no quotes) to invoke it. - -* Magic commands: type %magic for information on the magic subsystem. - -* System command aliases, via the %alias command or the configuration file(s). - -* Dynamic object information: - - Typing ?word or word? prints detailed information about an object. Certain - long strings (code, etc.) get snipped in the center for brevity. - - Typing ??word or word?? gives access to the full information without - snipping long strings. Strings that are longer than the screen are printed - through the less pager. - - The ?/?? system gives access to the full source code for any object (if - available), shows function prototypes and other useful information. - - If you just want to see an object's docstring, type '%pdoc object' (without - quotes, and without % if you have automagic on). - -* Tab completion in the local namespace: - - At any time, hitting tab will complete any available python commands or - variable names, and show you a list of the possible completions if there's - no unambiguous one. It will also complete filenames in the current directory. - -* Search previous command history in multiple ways: - - - Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search - through the history items that match what you've typed so far. - - - Hit Ctrl-r: opens a search prompt. Begin typing and the system searches - your history for lines that match what you've typed so far, completing as - much as it can. - - - %hist: search history by index. - -* Persistent command history across sessions. - -* Logging of input with the ability to save and restore a working session. - -* System shell with !. Typing !ls will run 'ls' in the current directory. - -* The reload command does a 'deep' reload of a module: changes made to the - module since you imported will actually be available without having to exit. - -* Verbose and colored exception traceback printouts. See the magic xmode and - xcolor functions for details (just type %magic). - -* Input caching system: - - IPython offers numbered prompts (In/Out) with input and output caching. All - input is saved and can be retrieved as variables (besides the usual arrow - key recall). - - The following GLOBAL variables always exist (so don't overwrite them!): - _i: stores previous input. - _ii: next previous. - _iii: next-next previous. - _ih : a list of all input _ih[n] is the input from line n. - - Additionally, global variables named _i<n> are dynamically created (<n> - being the prompt counter), such that _i<n> == _ih[<n>] - - For example, what you typed at prompt 14 is available as _i14 and _ih[14]. - - You can create macros which contain multiple input lines from this history, - for later re-execution, with the %macro function. - - The history function %hist allows you to see any part of your input history - by printing a range of the _i variables. Note that inputs which contain - magic functions (%) appear in the history with a prepended comment. This is - because they aren't really valid Python code, so you can't exec them. - -* Output caching system: - - For output that is returned from actions, a system similar to the input - cache exists but using _ instead of _i. Only actions that produce a result - (NOT assignments, for example) are cached. If you are familiar with - Mathematica, IPython's _ variables behave exactly like Mathematica's % - variables. - - The following GLOBAL variables always exist (so don't overwrite them!): - _ (one underscore): previous output. - __ (two underscores): next previous. - ___ (three underscores): next-next previous. - - Global variables named _<n> are dynamically created (<n> being the prompt - counter), such that the result of output <n> is always available as _<n>. - - Finally, a global dictionary named _oh exists with entries for all lines - which generated output. - -* Directory history: - - Your history of visited directories is kept in the global list _dh, and the - magic %cd command can be used to go to any entry in that list. - -* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython) - - 1. Auto-parentheses - - Callable objects (i.e. functions, methods, etc) can be invoked like - this (notice the commas between the arguments):: - - In [1]: callable_ob arg1, arg2, arg3 - - and the input will be translated to this:: - - callable_ob(arg1, arg2, arg3) - - This feature is off by default (in rare cases it can produce - undesirable side-effects), but you can activate it at the command-line - by starting IPython with `--autocall 1`, set it permanently in your - configuration file, or turn on at runtime with `%autocall 1`. - - You can force auto-parentheses by using '/' as the first character - of a line. For example:: - - In [1]: /globals # becomes 'globals()' - - Note that the '/' MUST be the first character on the line! This - won't work:: - - In [2]: print /globals # syntax error - - In most cases the automatic algorithm should work, so you should - rarely need to explicitly invoke /. One notable exception is if you - are trying to call a function with a list of tuples as arguments (the - parenthesis will confuse IPython):: - - In [1]: zip (1,2,3),(4,5,6) # won't work - - but this will work:: - - In [2]: /zip (1,2,3),(4,5,6) - ------> zip ((1,2,3),(4,5,6)) - Out[2]= [(1, 4), (2, 5), (3, 6)] - - IPython tells you that it has altered your command line by - displaying the new command line preceded by -->. e.g.:: - - In [18]: callable list - -------> callable (list) - - 2. Auto-Quoting - - You can force auto-quoting of a function's arguments by using ',' as - the first character of a line. For example:: - - In [1]: ,my_function /home/me # becomes my_function("/home/me") - - If you use ';' instead, the whole argument is quoted as a single - string (while ',' splits on whitespace):: - - In [2]: ,my_function a b c # becomes my_function("a","b","c") - In [3]: ;my_function a b c # becomes my_function("a b c") - - Note that the ',' MUST be the first character on the line! This - won't work:: - - In [4]: x = ,my_function /home/me # syntax error -""" - -interactive_usage_min = """\ -An enhanced console for Python. -Some of its features are: -- Tab completion in the local namespace. -- Logging of input, see command-line options. -- System shell escape via ! , eg !ls. -- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.) -- Keeps track of locally defined variables via %who, %whos. -- Show object information with a ? eg ?x or x? (use ?? for more info). -""" - -quick_reference = r""" -IPython -- An enhanced Interactive Python - Quick Reference Card -================================================================ - -obj?, obj?? : Get help, or more help for object (also works as - ?obj, ??obj). -?foo.*abc* : List names in 'foo' containing 'abc' in them. -%magic : Information about IPython's 'magic' % functions. - -Magic functions are prefixed by % or %%, and typically take their arguments -without parentheses, quotes or even commas for convenience. Line magics take a -single % and cell magics are prefixed with two %%. - -Example magic function calls: - -%alias d ls -F : 'd' is now an alias for 'ls -F' -alias d ls -F : Works if 'alias' not a python name -alist = %alias : Get list of aliases to 'alist' -cd /usr/share : Obvious. cd -<tab> to choose from visited dirs. -%cd?? : See help AND source for magic %cd -%timeit x=10 : time the 'x=10' statement with high precision. -%%timeit x=2**100 -x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not - counted. This is an example of a cell magic. - -System commands: - -!cp a.txt b/ : System command escape, calls os.system() -cp a.txt b/ : after %rehashx, most system commands work without ! -cp ${f}.txt $bar : Variable expansion in magics and system commands -files = !ls /usr : Capture system command output -files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc' - -History: - -_i, _ii, _iii : Previous, next previous, next next previous input -_i4, _ih[2:5] : Input history line 4, lines 2-4 -exec(_i81) : Execute input history line #81 again -%rep 81 : Edit input history line #81 -_, __, ___ : previous, next previous, next next previous output -_dh : Directory history -_oh : Output history -%hist : Command history of current session. -%hist -g foo : Search command history of (almost) all sessions for 'foo'. -%hist -g : Command history of (almost) all sessions. -%hist 1/2-8 : Command history containing lines 2-8 of session 1. -%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current. -%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to - line 5 of 6 sessions ago. -%edit 0/ : Open editor to execute code with history of current session. - -Autocall: - -f 1,2 : f(1,2) # Off by default, enable with %autocall magic. -/f 1,2 : f(1,2) (forced autoparen) -,f 1 2 : f("1","2") -;f 1 2 : f("1 2") - -Remember: TAB completion works in many contexts, not just file names -or python names. - -The following magic functions are currently available: - -""" - -default_banner_parts = ["Python %s\n"%sys.version.split("\n")[0], - "Type 'copyright', 'credits' or 'license' for more information\n" , - "IPython {version} -- An enhanced Interactive Python. Type '?' for help.\n".format(version=release.version), -] - -default_banner = ''.join(default_banner_parts) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageOps.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageOps.py deleted file mode 100644 index 301c593c79055dd9ce3dda76c8dcf5bc525faf2c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageOps.py +++ /dev/null @@ -1,621 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard image operations -# -# History: -# 2001-10-20 fl Created -# 2001-10-23 fl Added autocontrast operator -# 2001-12-18 fl Added Kevin's fit operator -# 2004-03-14 fl Fixed potential division by zero in equalize -# 2005-05-05 fl Fixed equalize for low number of values -# -# Copyright (c) 2001-2004 by Secret Labs AB -# Copyright (c) 2001-2004 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import functools -import operator -import re - -from . import Image, ImagePalette - -# -# helpers - - -def _border(border): - if isinstance(border, tuple): - if len(border) == 2: - left, top = right, bottom = border - elif len(border) == 4: - left, top, right, bottom = border - else: - left = top = right = bottom = border - return left, top, right, bottom - - -def _color(color, mode): - if isinstance(color, str): - from . import ImageColor - - color = ImageColor.getcolor(color, mode) - return color - - -def _lut(image, lut): - if image.mode == "P": - # FIXME: apply to lookup table, not image data - msg = "mode P support coming soon" - raise NotImplementedError(msg) - elif image.mode in ("L", "RGB"): - if image.mode == "RGB" and len(lut) == 256: - lut = lut + lut + lut - return image.point(lut) - else: - msg = "not supported for this image mode" - raise OSError(msg) - - -# -# actions - - -def autocontrast(image, cutoff=0, ignore=None, mask=None, preserve_tone=False): - """ - Maximize (normalize) image contrast. This function calculates a - histogram of the input image (or mask region), removes ``cutoff`` percent of the - lightest and darkest pixels from the histogram, and remaps the image - so that the darkest pixel becomes black (0), and the lightest - becomes white (255). - - :param image: The image to process. - :param cutoff: The percent to cut off from the histogram on the low and - high ends. Either a tuple of (low, high), or a single - number for both. - :param ignore: The background pixel value (use None for no background). - :param mask: Histogram used in contrast operation is computed using pixels - within the mask. If no mask is given the entire image is used - for histogram computation. - :param preserve_tone: Preserve image tone in Photoshop-like style autocontrast. - - .. versionadded:: 8.2.0 - - :return: An image. - """ - if preserve_tone: - histogram = image.convert("L").histogram(mask) - else: - histogram = image.histogram(mask) - - lut = [] - for layer in range(0, len(histogram), 256): - h = histogram[layer : layer + 256] - if ignore is not None: - # get rid of outliers - try: - h[ignore] = 0 - except TypeError: - # assume sequence - for ix in ignore: - h[ix] = 0 - if cutoff: - # cut off pixels from both ends of the histogram - if not isinstance(cutoff, tuple): - cutoff = (cutoff, cutoff) - # get number of pixels - n = 0 - for ix in range(256): - n = n + h[ix] - # remove cutoff% pixels from the low end - cut = n * cutoff[0] // 100 - for lo in range(256): - if cut > h[lo]: - cut = cut - h[lo] - h[lo] = 0 - else: - h[lo] -= cut - cut = 0 - if cut <= 0: - break - # remove cutoff% samples from the high end - cut = n * cutoff[1] // 100 - for hi in range(255, -1, -1): - if cut > h[hi]: - cut = cut - h[hi] - h[hi] = 0 - else: - h[hi] -= cut - cut = 0 - if cut <= 0: - break - # find lowest/highest samples after preprocessing - for lo in range(256): - if h[lo]: - break - for hi in range(255, -1, -1): - if h[hi]: - break - if hi <= lo: - # don't bother - lut.extend(list(range(256))) - else: - scale = 255.0 / (hi - lo) - offset = -lo * scale - for ix in range(256): - ix = int(ix * scale + offset) - if ix < 0: - ix = 0 - elif ix > 255: - ix = 255 - lut.append(ix) - return _lut(image, lut) - - -def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127): - """ - Colorize grayscale image. - This function calculates a color wedge which maps all black pixels in - the source image to the first color and all white pixels to the - second color. If ``mid`` is specified, it uses three-color mapping. - The ``black`` and ``white`` arguments should be RGB tuples or color names; - optionally you can use three-color mapping by also specifying ``mid``. - Mapping positions for any of the colors can be specified - (e.g. ``blackpoint``), where these parameters are the integer - value corresponding to where the corresponding color should be mapped. - These parameters must have logical order, such that - ``blackpoint <= midpoint <= whitepoint`` (if ``mid`` is specified). - - :param image: The image to colorize. - :param black: The color to use for black input pixels. - :param white: The color to use for white input pixels. - :param mid: The color to use for midtone input pixels. - :param blackpoint: an int value [0, 255] for the black mapping. - :param whitepoint: an int value [0, 255] for the white mapping. - :param midpoint: an int value [0, 255] for the midtone mapping. - :return: An image. - """ - - # Initial asserts - assert image.mode == "L" - if mid is None: - assert 0 <= blackpoint <= whitepoint <= 255 - else: - assert 0 <= blackpoint <= midpoint <= whitepoint <= 255 - - # Define colors from arguments - black = _color(black, "RGB") - white = _color(white, "RGB") - if mid is not None: - mid = _color(mid, "RGB") - - # Empty lists for the mapping - red = [] - green = [] - blue = [] - - # Create the low-end values - for i in range(0, blackpoint): - red.append(black[0]) - green.append(black[1]) - blue.append(black[2]) - - # Create the mapping (2-color) - if mid is None: - range_map = range(0, whitepoint - blackpoint) - - for i in range_map: - red.append(black[0] + i * (white[0] - black[0]) // len(range_map)) - green.append(black[1] + i * (white[1] - black[1]) // len(range_map)) - blue.append(black[2] + i * (white[2] - black[2]) // len(range_map)) - - # Create the mapping (3-color) - else: - range_map1 = range(0, midpoint - blackpoint) - range_map2 = range(0, whitepoint - midpoint) - - for i in range_map1: - red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1)) - green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1)) - blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1)) - for i in range_map2: - red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2)) - green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2)) - blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2)) - - # Create the high-end values - for i in range(0, 256 - whitepoint): - red.append(white[0]) - green.append(white[1]) - blue.append(white[2]) - - # Return converted image - image = image.convert("RGB") - return _lut(image, red + green + blue) - - -def contain(image, size, method=Image.Resampling.BICUBIC): - """ - Returns a resized version of the image, set to the maximum width and height - within the requested size, while maintaining the original aspect ratio. - - :param image: The image to resize and crop. - :param size: The requested output size in pixels, given as a - (width, height) tuple. - :param method: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :return: An image. - """ - - im_ratio = image.width / image.height - dest_ratio = size[0] / size[1] - - if im_ratio != dest_ratio: - if im_ratio > dest_ratio: - new_height = round(image.height / image.width * size[0]) - if new_height != size[1]: - size = (size[0], new_height) - else: - new_width = round(image.width / image.height * size[1]) - if new_width != size[0]: - size = (new_width, size[1]) - return image.resize(size, resample=method) - - -def pad(image, size, method=Image.Resampling.BICUBIC, color=None, centering=(0.5, 0.5)): - """ - Returns a resized and padded version of the image, expanded to fill the - requested aspect ratio and size. - - :param image: The image to resize and crop. - :param size: The requested output size in pixels, given as a - (width, height) tuple. - :param method: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :param color: The background color of the padded image. - :param centering: Control the position of the original image within the - padded version. - - (0.5, 0.5) will keep the image centered - (0, 0) will keep the image aligned to the top left - (1, 1) will keep the image aligned to the bottom - right - :return: An image. - """ - - resized = contain(image, size, method) - if resized.size == size: - out = resized - else: - out = Image.new(image.mode, size, color) - if resized.palette: - out.putpalette(resized.getpalette()) - if resized.width != size[0]: - x = round((size[0] - resized.width) * max(0, min(centering[0], 1))) - out.paste(resized, (x, 0)) - else: - y = round((size[1] - resized.height) * max(0, min(centering[1], 1))) - out.paste(resized, (0, y)) - return out - - -def crop(image, border=0): - """ - Remove border from image. The same amount of pixels are removed - from all four sides. This function works on all image modes. - - .. seealso:: :py:meth:`~PIL.Image.Image.crop` - - :param image: The image to crop. - :param border: The number of pixels to remove. - :return: An image. - """ - left, top, right, bottom = _border(border) - return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) - - -def scale(image, factor, resample=Image.Resampling.BICUBIC): - """ - Returns a rescaled image by a specific factor given in parameter. - A factor greater than 1 expands the image, between 0 and 1 contracts the - image. - - :param image: The image to rescale. - :param factor: The expansion factor, as a float. - :param resample: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :returns: An :py:class:`~PIL.Image.Image` object. - """ - if factor == 1: - return image.copy() - elif factor <= 0: - msg = "the factor must be greater than 0" - raise ValueError(msg) - else: - size = (round(factor * image.width), round(factor * image.height)) - return image.resize(size, resample) - - -def deform(image, deformer, resample=Image.Resampling.BILINEAR): - """ - Deform the image. - - :param image: The image to deform. - :param deformer: A deformer object. Any object that implements a - ``getmesh`` method can be used. - :param resample: An optional resampling filter. Same values possible as - in the PIL.Image.transform function. - :return: An image. - """ - return image.transform( - image.size, Image.Transform.MESH, deformer.getmesh(image), resample - ) - - -def equalize(image, mask=None): - """ - Equalize the image histogram. This function applies a non-linear - mapping to the input image, in order to create a uniform - distribution of grayscale values in the output image. - - :param image: The image to equalize. - :param mask: An optional mask. If given, only the pixels selected by - the mask are included in the analysis. - :return: An image. - """ - if image.mode == "P": - image = image.convert("RGB") - h = image.histogram(mask) - lut = [] - for b in range(0, len(h), 256): - histo = [_f for _f in h[b : b + 256] if _f] - if len(histo) <= 1: - lut.extend(list(range(256))) - else: - step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 - if not step: - lut.extend(list(range(256))) - else: - n = step // 2 - for i in range(256): - lut.append(n // step) - n = n + h[i + b] - return _lut(image, lut) - - -def expand(image, border=0, fill=0): - """ - Add border to the image - - :param image: The image to expand. - :param border: Border width, in pixels. - :param fill: Pixel fill value (a color value). Default is 0 (black). - :return: An image. - """ - left, top, right, bottom = _border(border) - width = left + image.size[0] + right - height = top + image.size[1] + bottom - color = _color(fill, image.mode) - if image.palette: - palette = ImagePalette.ImagePalette(palette=image.getpalette()) - if isinstance(color, tuple): - color = palette.getcolor(color) - else: - palette = None - out = Image.new(image.mode, (width, height), color) - if palette: - out.putpalette(palette.palette) - out.paste(image, (left, top)) - return out - - -def fit(image, size, method=Image.Resampling.BICUBIC, bleed=0.0, centering=(0.5, 0.5)): - """ - Returns a resized and cropped version of the image, cropped to the - requested aspect ratio and size. - - This function was contributed by Kevin Cazabon. - - :param image: The image to resize and crop. - :param size: The requested output size in pixels, given as a - (width, height) tuple. - :param method: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :param bleed: Remove a border around the outside of the image from all - four edges. The value is a decimal percentage (use 0.01 for - one percent). The default value is 0 (no border). - Cannot be greater than or equal to 0.5. - :param centering: Control the cropping position. Use (0.5, 0.5) for - center cropping (e.g. if cropping the width, take 50% off - of the left side, and therefore 50% off the right side). - (0.0, 0.0) will crop from the top left corner (i.e. if - cropping the width, take all of the crop off of the right - side, and if cropping the height, take all of it off the - bottom). (1.0, 0.0) will crop from the bottom left - corner, etc. (i.e. if cropping the width, take all of the - crop off the left side, and if cropping the height take - none from the top, and therefore all off the bottom). - :return: An image. - """ - - # by Kevin Cazabon, Feb 17/2000 - # kevin@cazabon.com - # https://www.cazabon.com - - # ensure centering is mutable - centering = list(centering) - - if not 0.0 <= centering[0] <= 1.0: - centering[0] = 0.5 - if not 0.0 <= centering[1] <= 1.0: - centering[1] = 0.5 - - if not 0.0 <= bleed < 0.5: - bleed = 0.0 - - # calculate the area to use for resizing and cropping, subtracting - # the 'bleed' around the edges - - # number of pixels to trim off on Top and Bottom, Left and Right - bleed_pixels = (bleed * image.size[0], bleed * image.size[1]) - - live_size = ( - image.size[0] - bleed_pixels[0] * 2, - image.size[1] - bleed_pixels[1] * 2, - ) - - # calculate the aspect ratio of the live_size - live_size_ratio = live_size[0] / live_size[1] - - # calculate the aspect ratio of the output image - output_ratio = size[0] / size[1] - - # figure out if the sides or top/bottom will be cropped off - if live_size_ratio == output_ratio: - # live_size is already the needed ratio - crop_width = live_size[0] - crop_height = live_size[1] - elif live_size_ratio >= output_ratio: - # live_size is wider than what's needed, crop the sides - crop_width = output_ratio * live_size[1] - crop_height = live_size[1] - else: - # live_size is taller than what's needed, crop the top and bottom - crop_width = live_size[0] - crop_height = live_size[0] / output_ratio - - # make the crop - crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0] - crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1] - - crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height) - - # resize the image and return it - return image.resize(size, method, box=crop) - - -def flip(image): - """ - Flip the image vertically (top to bottom). - - :param image: The image to flip. - :return: An image. - """ - return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM) - - -def grayscale(image): - """ - Convert the image to grayscale. - - :param image: The image to convert. - :return: An image. - """ - return image.convert("L") - - -def invert(image): - """ - Invert (negate) the image. - - :param image: The image to invert. - :return: An image. - """ - lut = [] - for i in range(256): - lut.append(255 - i) - return image.point(lut) if image.mode == "1" else _lut(image, lut) - - -def mirror(image): - """ - Flip image horizontally (left to right). - - :param image: The image to mirror. - :return: An image. - """ - return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT) - - -def posterize(image, bits): - """ - Reduce the number of bits for each color channel. - - :param image: The image to posterize. - :param bits: The number of bits to keep for each channel (1-8). - :return: An image. - """ - lut = [] - mask = ~(2 ** (8 - bits) - 1) - for i in range(256): - lut.append(i & mask) - return _lut(image, lut) - - -def solarize(image, threshold=128): - """ - Invert all pixel values above a threshold. - - :param image: The image to solarize. - :param threshold: All pixels above this greyscale level are inverted. - :return: An image. - """ - lut = [] - for i in range(256): - if i < threshold: - lut.append(i) - else: - lut.append(255 - i) - return _lut(image, lut) - - -def exif_transpose(image): - """ - If an image has an EXIF Orientation tag, other than 1, return a new image - that is transposed accordingly. The new image will have the orientation - data removed. - - Otherwise, return a copy of the image. - - :param image: The image to transpose. - :return: An image. - """ - exif = image.getexif() - orientation = exif.get(0x0112) - method = { - 2: Image.Transpose.FLIP_LEFT_RIGHT, - 3: Image.Transpose.ROTATE_180, - 4: Image.Transpose.FLIP_TOP_BOTTOM, - 5: Image.Transpose.TRANSPOSE, - 6: Image.Transpose.ROTATE_270, - 7: Image.Transpose.TRANSVERSE, - 8: Image.Transpose.ROTATE_90, - }.get(orientation) - if method is not None: - transposed_image = image.transpose(method) - transposed_exif = transposed_image.getexif() - if 0x0112 in transposed_exif: - del transposed_exif[0x0112] - if "exif" in transposed_image.info: - transposed_image.info["exif"] = transposed_exif.tobytes() - elif "Raw profile type exif" in transposed_image.info: - transposed_image.info[ - "Raw profile type exif" - ] = transposed_exif.tobytes().hex() - elif "XML:com.adobe.xmp" in transposed_image.info: - for pattern in ( - r'tiff:Orientation="([0-9])"', - r"<tiff:Orientation>([0-9])</tiff:Orientation>", - ): - transposed_image.info["XML:com.adobe.xmp"] = re.sub( - pattern, "", transposed_image.info["XML:com.adobe.xmp"] - ) - return transposed_image - return image.copy() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driverc/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driverc/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/__init__.py deleted file mode 100644 index daf9f90949c26e43019da8103a86d1a81f1133f1..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/__init__.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See LICENSE in the project root -# for license information. - -import contextlib -from importlib import import_module -import os -import sys - -from . import _util - - -VENDORED_ROOT = os.path.dirname(os.path.abspath(__file__)) -# TODO: Move the "pydevd" git submodule to the debugpy/_vendored directory -# and then drop the following fallback. -if "pydevd" not in os.listdir(VENDORED_ROOT): - VENDORED_ROOT = os.path.dirname(VENDORED_ROOT) - - -def list_all(resolve=False): - """Return the list of vendored projects.""" - # TODO: Derive from os.listdir(VENDORED_ROOT)? - projects = ["pydevd"] - if not resolve: - return projects - return [project_root(name) for name in projects] - - -def project_root(project): - """Return the path the root dir of the vendored project. - - If "project" is an empty string then the path prefix for vendored - projects (e.g. "debugpy/_vendored/") will be returned. - """ - if not project: - project = "" - return os.path.join(VENDORED_ROOT, project) - - -def iter_project_files(project, relative=False, **kwargs): - """Yield (dirname, basename, filename) for all files in the project.""" - if relative: - with _util.cwd(VENDORED_ROOT): - for result in _util.iter_all_files(project, **kwargs): - yield result - else: - root = project_root(project) - for result in _util.iter_all_files(root, **kwargs): - yield result - - -def iter_packaging_files(project): - """Yield the filenames for all files in the project. - - The filenames are relative to "debugpy/_vendored". This is most - useful for the "package data" in a setup.py. - """ - # TODO: Use default filters? __pycache__ and .pyc? - prune_dir = None - exclude_file = None - try: - mod = import_module("._{}_packaging".format(project), __name__) - except ImportError: - pass - else: - prune_dir = getattr(mod, "prune_dir", prune_dir) - exclude_file = getattr(mod, "exclude_file", exclude_file) - results = iter_project_files( - project, relative=True, prune_dir=prune_dir, exclude_file=exclude_file - ) - for _, _, filename in results: - yield filename - - -def prefix_matcher(*prefixes): - """Return a module match func that matches any of the given prefixes.""" - assert prefixes - - def match(name, module): - for prefix in prefixes: - if name.startswith(prefix): - return True - else: - return False - - return match - - -def check_modules(project, match, root=None): - """Verify that only vendored modules have been imported.""" - if root is None: - root = project_root(project) - extensions = [] - unvendored = {} - for modname, mod in list(sys.modules.items()): - if not match(modname, mod): - continue - try: - filename = getattr(mod, "__file__", None) - except: # In theory it's possible that any error is raised when accessing __file__ - filename = None - if not filename: # extension module - extensions.append(modname) - elif not filename.startswith(root): - unvendored[modname] = filename - return unvendored, extensions - - -@contextlib.contextmanager -def vendored(project, root=None): - """A context manager under which the vendored project will be imported.""" - if root is None: - root = project_root(project) - # Add the vendored project directory, so that it gets tried first. - sys.path.insert(0, root) - try: - yield root - finally: - sys.path.remove(root) - - -def preimport(project, modules, **kwargs): - """Import each of the named modules out of the vendored project.""" - with vendored(project, **kwargs): - for name in modules: - import_module(name) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/evaluation/coco_evaluator.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/evaluation/coco_evaluator.py deleted file mode 100644 index c26107ee1bdec1e6f83831d6c9a0aaaf0b9cedf1..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/evaluation/coco_evaluator.py +++ /dev/null @@ -1,563 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/coco_evaluation.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -import contextlib -import copy -import io -import itertools -import json -import logging -import numpy as np -import os -import pickle -from collections import OrderedDict -import annotator.oneformer.pycocotools.mask as mask_util -import torch -from annotator.oneformer.pycocotools.coco import COCO -from annotator.oneformer.pycocotools.cocoeval import COCOeval -from tabulate import tabulate - -import annotator.oneformer.detectron2.utils.comm as comm -from annotator.oneformer.detectron2.config import CfgNode -from annotator.oneformer.detectron2.data import MetadataCatalog -from annotator.oneformer.detectron2.data.datasets.coco import convert_to_coco_json -from annotator.oneformer.detectron2.structures import Boxes, BoxMode, pairwise_iou -from annotator.oneformer.detectron2.utils.file_io import PathManager -from annotator.oneformer.detectron2.utils.logger import create_small_table - -from .evaluator import DatasetEvaluator - -try: - from annotator.oneformer.detectron2.evaluation.fast_eval_api import COCOeval_opt -except ImportError: - COCOeval_opt = COCOeval - - -class COCOEvaluator(DatasetEvaluator): - """ - Evaluate AP for instance detection/segmentation, AP - for keypoint detection outputs using COCO's metrics. - See http://cocodataset.org/#detection-eval and - http://cocodataset.org/#keypoints-eval to understand its metrics. - The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means - the metric cannot be computed (e.g. due to no predictions made). - - In addition to COCO, this evaluator is able to support any bounding box detection, - instance segmentation, or keypoint detection dataset. - """ - - def __init__( - self, - dataset_name, - tasks=None, - distributed=True, - output_dir=None, - *, - max_dets_per_image=None, - use_fast_impl=True, - kpt_oks_sigmas=(), - allow_cached_coco=True, - ): - """ - Args: - dataset_name (str): name of the dataset to be evaluated. - It must have either the following corresponding metadata: - - "json_file": the path to the COCO format annotation - - Or it must be in detectron2's standard dataset format - so it can be converted to COCO format automatically. - tasks (tuple[str]): tasks that can be evaluated under the given - configuration. A task is one of "bbox", "segm", "keypoints". - By default, will infer this automatically from predictions. - distributed (True): if True, will collect results from all ranks and run evaluation - in the main process. - Otherwise, will only evaluate the results in the current process. - output_dir (str): optional, an output directory to dump all - results predicted on the dataset. The dump contains two files: - - 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and - contains all the results in the format they are produced by the model. - 2. "coco_instances_results.json" a json file in COCO's result format. - max_dets_per_image (int): limit on the maximum number of detections per image. - By default in COCO, this limit is to 100, but this can be customized - to be greater, as is needed in evaluation metrics AP fixed and AP pool - (see https://arxiv.org/pdf/2102.01066.pdf) - This doesn't affect keypoint evaluation. - use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. - Although the results should be very close to the official implementation in COCO - API, it is still recommended to compute results with the official API for use in - papers. The faster implementation also uses more RAM. - kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. - See http://cocodataset.org/#keypoints-eval - When empty, it will use the defaults in COCO. - Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. - allow_cached_coco (bool): Whether to use cached coco json from previous validation - runs. You should set this to False if you need to use different validation data. - Defaults to True. - """ - self._logger = logging.getLogger(__name__) - self._distributed = distributed - self._output_dir = output_dir - - if use_fast_impl and (COCOeval_opt is COCOeval): - self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") - use_fast_impl = False - self._use_fast_impl = use_fast_impl - - # COCOeval requires the limit on the number of detections per image (maxDets) to be a list - # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the - # 3rd element (100) is used as the limit on the number of detections per image when - # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, - # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. - if max_dets_per_image is None: - max_dets_per_image = [1, 10, 100] - else: - max_dets_per_image = [1, 10, max_dets_per_image] - self._max_dets_per_image = max_dets_per_image - - if tasks is not None and isinstance(tasks, CfgNode): - kpt_oks_sigmas = ( - tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas - ) - self._logger.warn( - "COCO Evaluator instantiated using config, this is deprecated behavior." - " Please pass in explicit arguments instead." - ) - self._tasks = None # Infering it from predictions should be better - else: - self._tasks = tasks - - self._cpu_device = torch.device("cpu") - - self._metadata = MetadataCatalog.get(dataset_name) - if not hasattr(self._metadata, "json_file"): - if output_dir is None: - raise ValueError( - "output_dir must be provided to COCOEvaluator " - "for datasets not in COCO format." - ) - self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") - - cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") - self._metadata.json_file = cache_path - convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) - - json_file = PathManager.get_local_path(self._metadata.json_file) - with contextlib.redirect_stdout(io.StringIO()): - self._coco_api = COCO(json_file) - - # Test set json files do not contain annotations (evaluation must be - # performed using the COCO evaluation server). - self._do_evaluation = "annotations" in self._coco_api.dataset - if self._do_evaluation: - self._kpt_oks_sigmas = kpt_oks_sigmas - - def reset(self): - self._predictions = [] - - def process(self, inputs, outputs): - """ - Args: - inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). - It is a list of dict. Each dict corresponds to an image and - contains keys like "height", "width", "file_name", "image_id". - outputs: the outputs of a COCO model. It is a list of dicts with key - "instances" that contains :class:`Instances`. - """ - for input, output in zip(inputs, outputs): - prediction = {"image_id": input["image_id"]} - - if "instances" in output: - instances = output["instances"].to(self._cpu_device) - prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) - if len(prediction) > 1: - self._predictions.append(prediction) - - def evaluate(self, img_ids=None): - """ - Args: - img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset - """ - if self._distributed: - comm.synchronize() - predictions = comm.gather(self._predictions, dst=0) - predictions = list(itertools.chain(*predictions)) - - if not comm.is_main_process(): - return {} - else: - predictions = self._predictions - - if len(predictions) == 0: - self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") - return {} - - if self._output_dir: - PathManager.mkdirs(self._output_dir) - file_path = os.path.join(self._output_dir, "instances_predictions.pth") - with PathManager.open(file_path, "wb") as f: - torch.save(predictions, f) - - self._results = OrderedDict() - if "instances" in predictions[0]: - self._eval_predictions(predictions, img_ids=img_ids) - # Copy so the caller can do whatever with results - return copy.deepcopy(self._results) - - def _tasks_from_predictions(self, predictions): - """ - Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. - """ - for pred in predictions: - if "segmentation" in pred: - tasks = {"segm"} - if "keypoints" in pred: - tasks.add("keypoints") - return sorted(tasks) - - def _eval_predictions(self, predictions, img_ids=None): - """ - Evaluate predictions. Fill self._results with the metrics of the tasks. - """ - self._logger.info("Preparing results for COCO format ...") - coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) - tasks = self._tasks or self._tasks_from_predictions(coco_results) - - # unmap the category ids for COCO - if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): - dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id - all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) - num_classes = len(all_contiguous_ids) - assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 - - reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} - for result in coco_results: - category_id = result["category_id"] - assert category_id < num_classes, ( - f"A prediction has class={category_id}, " - f"but the dataset only has {num_classes} classes and " - f"predicted class id should be in [0, {num_classes - 1}]." - ) - result["category_id"] = reverse_id_mapping[category_id] - - if self._output_dir: - file_path = os.path.join(self._output_dir, "coco_instances_results.json") - self._logger.info("Saving results to {}".format(file_path)) - with PathManager.open(file_path, "w") as f: - f.write(json.dumps(coco_results)) - f.flush() - - if not self._do_evaluation: - self._logger.info("Annotations are not available for evaluation.") - return - - self._logger.info( - "Evaluating predictions with {} COCO API...".format( - "unofficial" if self._use_fast_impl else "official" - ) - ) - for task in sorted(tasks): - assert task in {"segm", "keypoints"}, f"Got unknown task: {task}!" - coco_eval = ( - _evaluate_predictions_on_coco( - self._coco_api, - coco_results, - task, - kpt_oks_sigmas=self._kpt_oks_sigmas, - use_fast_impl=self._use_fast_impl, - img_ids=img_ids, - max_dets_per_image=self._max_dets_per_image, - ) - if len(coco_results) > 0 - else None # cocoapi does not handle empty results very well - ) - - res = self._derive_coco_results( - coco_eval, task, class_names=self._metadata.get("thing_classes") - ) - self._results[task] = res - - def _derive_coco_results(self, coco_eval, iou_type, class_names=None): - """ - Derive the desired score numbers from summarized COCOeval. - - Args: - coco_eval (None or COCOEval): None represents no predictions from model. - iou_type (str): - class_names (None or list[str]): if provided, will use it to predict - per-category AP. - - Returns: - a dict of {metric name: score} - """ - - metrics = { - "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], - "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], - }[iou_type] - - if coco_eval is None: - self._logger.warn("No predictions from the model!") - return {metric: float("nan") for metric in metrics} - - # the standard metrics - results = { - metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") - for idx, metric in enumerate(metrics) - } - self._logger.info( - "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) - ) - if not np.isfinite(sum(results.values())): - self._logger.info("Some metrics cannot be computed and is shown as NaN.") - - if class_names is None or len(class_names) <= 1: - return results - # Compute per-category AP - # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa - precisions = coco_eval.eval["precision"] - # precision has dims (iou, recall, cls, area range, max dets) - assert len(class_names) == precisions.shape[2] - - results_per_category = [] - for idx, name in enumerate(class_names): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - ap = np.mean(precision) if precision.size else float("nan") - results_per_category.append(("{}".format(name), float(ap * 100))) - - # tabulate it - N_COLS = min(6, len(results_per_category) * 2) - results_flatten = list(itertools.chain(*results_per_category)) - results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) - table = tabulate( - results_2d, - tablefmt="pipe", - floatfmt=".3f", - headers=["category", "AP"] * (N_COLS // 2), - numalign="left", - ) - self._logger.info("Per-category {} AP: \n".format(iou_type) + table) - - results.update({"AP-" + name: ap for name, ap in results_per_category}) - return results - - -def instances_to_coco_json(instances, img_id): - """ - Dump an "Instances" object to a COCO-format json that's used for evaluation. - - Args: - instances (Instances): - img_id (int): the image id - - Returns: - list[dict]: list of json annotations in COCO format. - """ - num_instance = len(instances) - if num_instance == 0: - return [] - - scores = instances.scores.tolist() - classes = instances.pred_classes.tolist() - - has_mask = instances.has("pred_masks") - if has_mask: - # use RLE to encode the masks, because they are too large and takes memory - # since this evaluator stores outputs of the entire dataset - rles = [ - mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] - for mask in instances.pred_masks - ] - for rle in rles: - # "counts" is an array encoded by mask_util as a byte-stream. Python3's - # json writer which always produces strings cannot serialize a bytestream - # unless you decode it. Thankfully, utf-8 works out (which is also what - # the annotator.oneformer.pycocotools/_mask.pyx does). - rle["counts"] = rle["counts"].decode("utf-8") - - has_keypoints = instances.has("pred_keypoints") - if has_keypoints: - keypoints = instances.pred_keypoints - - results = [] - for k in range(num_instance): - result = { - "image_id": img_id, - "category_id": classes[k], - "score": scores[k], - } - if has_mask: - result["segmentation"] = rles[k] - if has_keypoints: - # In COCO annotations, - # keypoints coordinates are pixel indices. - # However our predictions are floating point coordinates. - # Therefore we subtract 0.5 to be consistent with the annotation format. - # This is the inverse of data loading logic in `datasets/coco.py`. - keypoints[k][:, :2] -= 0.5 - result["keypoints"] = keypoints[k].flatten().tolist() - results.append(result) - return results - -def _evaluate_predictions_on_coco( - coco_gt, - coco_results, - iou_type, - kpt_oks_sigmas=None, - use_fast_impl=True, - img_ids=None, - max_dets_per_image=None, -): - """ - Evaluate the coco results using COCOEval API. - """ - assert len(coco_results) > 0 - - if iou_type == "segm": - coco_results = copy.deepcopy(coco_results) - # When evaluating mask AP, if the results contain bbox, cocoapi will - # use the box area as the area of the instance, instead of the mask area. - # This leads to a different definition of small/medium/large. - # We remove the bbox field to let mask AP use mask area. - for c in coco_results: - c.pop("bbox", None) - - coco_dt = coco_gt.loadRes(coco_results) - coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type) - # For COCO, the default max_dets_per_image is [1, 10, 100]. - if max_dets_per_image is None: - max_dets_per_image = [1, 10, 100] # Default from COCOEval - else: - assert ( - len(max_dets_per_image) >= 3 - ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" - # In the case that user supplies a custom input for max_dets_per_image, - # apply COCOevalMaxDets to evaluate AP with the custom input. - if max_dets_per_image[2] != 100: - coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) - if iou_type != "keypoints": - coco_eval.params.maxDets = max_dets_per_image - - if img_ids is not None: - coco_eval.params.imgIds = img_ids - - if iou_type == "keypoints": - # Use the COCO default keypoint OKS sigmas unless overrides are specified - if kpt_oks_sigmas: - assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "annotator.oneformer.pycocotools is too old!" - coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) - # COCOAPI requires every detection and every gt to have keypoints, so - # we just take the first entry from both - num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 - num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 - num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) - assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( - f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " - f"Ground truth contains {num_keypoints_gt} keypoints. " - f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " - "They have to agree with each other. For meaning of OKS, please refer to " - "http://cocodataset.org/#keypoints-eval." - ) - - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - return coco_eval - - -class COCOevalMaxDets(COCOeval): - """ - Modified version of COCOeval for evaluating AP with a custom - maxDets (by default for COCO, maxDets is 100) - """ - - def summarize(self): - """ - Compute and display summary metrics for evaluation results given - a custom value for max_dets_per_image - """ - - def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): - p = self.params - iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" - titleStr = "Average Precision" if ap == 1 else "Average Recall" - typeStr = "(AP)" if ap == 1 else "(AR)" - iouStr = ( - "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) - if iouThr is None - else "{:0.2f}".format(iouThr) - ) - - aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] - mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] - if ap == 1: - # dimension of precision: [TxRxKxAxM] - s = self.eval["precision"] - # IoU - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, :, aind, mind] - else: - # dimension of recall: [TxKxAxM] - s = self.eval["recall"] - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, aind, mind] - if len(s[s > -1]) == 0: - mean_s = -1 - else: - mean_s = np.mean(s[s > -1]) - print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) - return mean_s - - def _summarizeDets(): - stats = np.zeros((12,)) - # Evaluate AP using the custom limit on maximum detections per image - stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) - stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) - stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) - stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) - stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) - stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) - stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) - stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) - stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) - stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) - stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) - stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) - return stats - - def _summarizeKps(): - stats = np.zeros((10,)) - stats[0] = _summarize(1, maxDets=20) - stats[1] = _summarize(1, maxDets=20, iouThr=0.5) - stats[2] = _summarize(1, maxDets=20, iouThr=0.75) - stats[3] = _summarize(1, maxDets=20, areaRng="medium") - stats[4] = _summarize(1, maxDets=20, areaRng="large") - stats[5] = _summarize(0, maxDets=20) - stats[6] = _summarize(0, maxDets=20, iouThr=0.5) - stats[7] = _summarize(0, maxDets=20, iouThr=0.75) - stats[8] = _summarize(0, maxDets=20, areaRng="medium") - stats[9] = _summarize(0, maxDets=20, areaRng="large") - return stats - - if not self.eval: - raise Exception("Please run accumulate() first") - iouType = self.params.iouType - if iouType == "segm": - summarize = _summarizeDets - elif iouType == "keypoints": - summarize = _summarizeKps - self.stats = summarize() - - def __str__(self): - self.summarize() \ No newline at end of file diff --git a/spaces/TEnngal/bingo/src/components/ui/tooltip.tsx b/spaces/TEnngal/bingo/src/components/ui/tooltip.tsx deleted file mode 100644 index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -'use client' - -import * as React from 'react' -import * as TooltipPrimitive from '@radix-ui/react-tooltip' - -import { cn } from '@/lib/utils' - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef<typeof TooltipPrimitive.Content>, - React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content> ->(({ className, sideOffset = 4, ...props }, ref) => ( - <TooltipPrimitive.Content - ref={ref} - sideOffset={sideOffset} - className={cn( - 'z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-xs font-medium text-popover-foreground shadow-md animate-in fade-in-50 data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1', - className - )} - {...props} - /> -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/enums.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/enums.py deleted file mode 100644 index 5e3e198233698f2b007489dd299cecb87d971067..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/enums.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -All of the Enums that are used throughout the chardet package. - -:author: Dan Blanchard (dan.blanchard@gmail.com) -""" - -from enum import Enum, Flag - - -class InputState: - """ - This enum represents the different states a universal detector can be in. - """ - - PURE_ASCII = 0 - ESC_ASCII = 1 - HIGH_BYTE = 2 - - -class LanguageFilter(Flag): - """ - This enum represents the different language filters we can apply to a - ``UniversalDetector``. - """ - - NONE = 0x00 - CHINESE_SIMPLIFIED = 0x01 - CHINESE_TRADITIONAL = 0x02 - JAPANESE = 0x04 - KOREAN = 0x08 - NON_CJK = 0x10 - ALL = 0x1F - CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL - CJK = CHINESE | JAPANESE | KOREAN - - -class ProbingState(Enum): - """ - This enum represents the different states a prober can be in. - """ - - DETECTING = 0 - FOUND_IT = 1 - NOT_ME = 2 - - -class MachineState: - """ - This enum represents the different states a state machine can be in. - """ - - START = 0 - ERROR = 1 - ITS_ME = 2 - - -class SequenceLikelihood: - """ - This enum represents the likelihood of a character following the previous one. - """ - - NEGATIVE = 0 - UNLIKELY = 1 - LIKELY = 2 - POSITIVE = 3 - - @classmethod - def get_num_categories(cls) -> int: - """:returns: The number of likelihood categories in the enum.""" - return 4 - - -class CharacterCategory: - """ - This enum represents the different categories language models for - ``SingleByteCharsetProber`` put characters into. - - Anything less than CONTROL is considered a letter. - """ - - UNDEFINED = 255 - LINE_BREAK = 254 - SYMBOL = 253 - DIGIT = 252 - CONTROL = 251 diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py deleted file mode 100644 index 33c613b749a49d6035c0e549389e92c3d68a83ad..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py +++ /dev/null @@ -1,141 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -from typing import FrozenSet, NewType, Tuple, Union, cast - -from .tags import Tag, parse_tag -from .version import InvalidVersion, Version - -BuildTag = Union[Tuple[()], Tuple[int, str]] -NormalizedName = NewType("NormalizedName", str) - - -class InvalidWheelFilename(ValueError): - """ - An invalid wheel filename was found, users should refer to PEP 427. - """ - - -class InvalidSdistFilename(ValueError): - """ - An invalid sdist filename was found, users should refer to the packaging user guide. - """ - - -_canonicalize_regex = re.compile(r"[-_.]+") -# PEP 427: The build number must start with a digit. -_build_tag_regex = re.compile(r"(\d+)(.*)") - - -def canonicalize_name(name: str) -> NormalizedName: - # This is taken from PEP 503. - value = _canonicalize_regex.sub("-", name).lower() - return cast(NormalizedName, value) - - -def canonicalize_version( - version: Union[Version, str], *, strip_trailing_zero: bool = True -) -> str: - """ - This is very similar to Version.__str__, but has one subtle difference - with the way it handles the release segment. - """ - if isinstance(version, str): - try: - parsed = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - else: - parsed = version - - parts = [] - - # Epoch - if parsed.epoch != 0: - parts.append(f"{parsed.epoch}!") - - # Release segment - release_segment = ".".join(str(x) for x in parsed.release) - if strip_trailing_zero: - # NB: This strips trailing '.0's to normalize - release_segment = re.sub(r"(\.0)+$", "", release_segment) - parts.append(release_segment) - - # Pre-release - if parsed.pre is not None: - parts.append("".join(str(x) for x in parsed.pre)) - - # Post-release - if parsed.post is not None: - parts.append(f".post{parsed.post}") - - # Development release - if parsed.dev is not None: - parts.append(f".dev{parsed.dev}") - - # Local version segment - if parsed.local is not None: - parts.append(f"+{parsed.local}") - - return "".join(parts) - - -def parse_wheel_filename( - filename: str, -) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: - if not filename.endswith(".whl"): - raise InvalidWheelFilename( - f"Invalid wheel filename (extension must be '.whl'): {filename}" - ) - - filename = filename[:-4] - dashes = filename.count("-") - if dashes not in (4, 5): - raise InvalidWheelFilename( - f"Invalid wheel filename (wrong number of parts): {filename}" - ) - - parts = filename.split("-", dashes - 2) - name_part = parts[0] - # See PEP 427 for the rules on escaping the project name - if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename(f"Invalid project name: {filename}") - name = canonicalize_name(name_part) - version = Version(parts[1]) - if dashes == 5: - build_part = parts[2] - build_match = _build_tag_regex.match(build_part) - if build_match is None: - raise InvalidWheelFilename( - f"Invalid build number: {build_part} in '{filename}'" - ) - build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) - else: - build = () - tags = parse_tag(parts[-1]) - return (name, version, build, tags) - - -def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: - if filename.endswith(".tar.gz"): - file_stem = filename[: -len(".tar.gz")] - elif filename.endswith(".zip"): - file_stem = filename[: -len(".zip")] - else: - raise InvalidSdistFilename( - f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" - f" {filename}" - ) - - # We are requiring a PEP 440 version, which cannot contain dashes, - # so we split on the last dash. - name_part, sep, version_part = file_stem.rpartition("-") - if not sep: - raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") - - name = canonicalize_name(name_part) - version = Version(version_part) - return (name, version) diff --git a/spaces/TencentARC/Caption-Anything/app.py b/spaces/TencentARC/Caption-Anything/app.py deleted file mode 100644 index 53d8bfb5d2ae3c84cc6c030a77ba5b5f8d885a8f..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/Caption-Anything/app.py +++ /dev/null @@ -1,599 +0,0 @@ -import os -import json -import gradio as gr -import numpy as np -from gradio import processing_utils - -from packaging import version -from PIL import Image, ImageDraw -import functools - -from caption_anything.model import CaptionAnything -from caption_anything.utils.image_editing_utils import create_bubble_frame -from caption_anything.utils.utils import mask_painter, seg_model_map, prepare_segmenter, image_resize -from caption_anything.utils.parser import parse_augment -from caption_anything.captioner import build_captioner -from caption_anything.text_refiner import build_text_refiner -from caption_anything.segmenter import build_segmenter -from caption_anything.utils.chatbot import ConversationBot, build_chatbot_tools, get_new_image_name -from segment_anything import sam_model_registry -import easyocr - -args = parse_augment() -args.segmenter = "huge" -args.segmenter_checkpoint = "sam_vit_h_4b8939.pth" -args.clip_filter = True -if args.segmenter_checkpoint is None: - _, segmenter_checkpoint = prepare_segmenter(args.segmenter) -else: - segmenter_checkpoint = args.segmenter_checkpoint - -shared_captioner = build_captioner(args.captioner, args.device, args) -shared_sam_model = sam_model_registry[seg_model_map[args.segmenter]](checkpoint=segmenter_checkpoint).to(args.device) -ocr_lang = ["ch_tra", "en"] -shared_ocr_reader = easyocr.Reader(ocr_lang) -tools_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.chat_tools_dict.split(',')} -shared_chatbot_tools = build_chatbot_tools(tools_dict) - - -class ImageSketcher(gr.Image): - """ - Fix the bug of gradio.Image that cannot upload with tool == 'sketch'. - """ - - is_template = True # Magic to make this work with gradio.Block, don't remove unless you know what you're doing. - - def __init__(self, **kwargs): - super().__init__(tool="sketch", **kwargs) - - def preprocess(self, x): - if self.tool == 'sketch' and self.source in ["upload", "webcam"]: - assert isinstance(x, dict) - if x['mask'] is None: - decode_image = processing_utils.decode_base64_to_image(x['image']) - width, height = decode_image.size - mask = np.zeros((height, width, 4), dtype=np.uint8) - mask[..., -1] = 255 - mask = self.postprocess(mask) - x['mask'] = mask - return super().preprocess(x) - - -def build_caption_anything_with_models(args, api_key="", captioner=None, sam_model=None, ocr_reader=None, text_refiner=None, - session_id=None): - segmenter = build_segmenter(args.segmenter, args.device, args, model=sam_model) - captioner = captioner - if session_id is not None: - print('Init caption anything for session {}'.format(session_id)) - return CaptionAnything(args, api_key, captioner=captioner, segmenter=segmenter, ocr_reader=ocr_reader, text_refiner=text_refiner) - - -def init_openai_api_key(api_key=""): - text_refiner = None - visual_chatgpt = None - if api_key and len(api_key) > 30: - try: - text_refiner = build_text_refiner(args.text_refiner, args.device, args, api_key) - assert len(text_refiner.llm('hi')) > 0 # test - visual_chatgpt = ConversationBot(shared_chatbot_tools, api_key) - except: - text_refiner = None - visual_chatgpt = None - openai_available = text_refiner is not None - if openai_available: - return [gr.update(visible=True)]*6 + [gr.update(visible=False)]*2 + [text_refiner, visual_chatgpt, None] - else: - return [gr.update(visible=False)]*6 + [gr.update(visible=True)]*2 + [text_refiner, visual_chatgpt, 'Your OpenAI API Key is not available'] - -def init_wo_openai_api_key(): - return [gr.update(visible=False)]*4 + [gr.update(visible=True)]*2 + [gr.update(visible=False)]*2 + [None, None, None] - -def get_click_prompt(chat_input, click_state, click_mode): - inputs = json.loads(chat_input) - if click_mode == 'Continuous': - points = click_state[0] - labels = click_state[1] - for input in inputs: - points.append(input[:2]) - labels.append(input[2]) - elif click_mode == 'Single': - points = [] - labels = [] - for input in inputs: - points.append(input[:2]) - labels.append(input[2]) - click_state[0] = points - click_state[1] = labels - else: - raise NotImplementedError - - prompt = { - "prompt_type": ["click"], - "input_point": click_state[0], - "input_label": click_state[1], - "multimask_output": "True", - } - return prompt - - -def update_click_state(click_state, caption, click_mode): - if click_mode == 'Continuous': - click_state[2].append(caption) - elif click_mode == 'Single': - click_state[2] = [caption] - else: - raise NotImplementedError - -def chat_input_callback(*args): - visual_chatgpt, chat_input, click_state, state, aux_state = args - if visual_chatgpt is not None: - return visual_chatgpt.run_text(chat_input, state, aux_state) - else: - response = "Text refiner is not initilzed, please input openai api key." - state = state + [(chat_input, response)] - return state, state - - - -def upload_callback(image_input, state, visual_chatgpt=None): - - if isinstance(image_input, dict): # if upload from sketcher_input, input contains image and mask - image_input, mask = image_input['image'], image_input['mask'] - - click_state = [[], [], []] - image_input = image_resize(image_input, res=1024) - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - session_id=iface.app_id - ) - model.segmenter.set_image(image_input) - image_embedding = model.image_embedding - original_size = model.original_size - input_size = model.input_size - - if visual_chatgpt is not None: - print('upload_callback: add caption to chatGPT memory') - new_image_path = get_new_image_name('chat_image', func_name='upload') - image_input.save(new_image_path) - visual_chatgpt.current_image = new_image_path - img_caption = model.captioner.inference(image_input, filter=False, args={'text_prompt':''})['caption'] - Human_prompt = f'\nHuman: The description of the image with path {new_image_path} is: {img_caption}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n' - AI_prompt = "Received." - visual_chatgpt.global_prompt = Human_prompt + 'AI: ' + AI_prompt - visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt - state = [(None, 'Received new image, resize it to width {} and height {}: '.format(image_input.size[0], image_input.size[1]))] - - return state, state, image_input, click_state, image_input, image_input, image_embedding, \ - original_size, input_size - - -def inference_click(image_input, point_prompt, click_mode, enable_wiki, language, sentiment, factuality, - length, image_embedding, state, click_state, original_size, input_size, text_refiner, visual_chatgpt, - evt: gr.SelectData): - click_index = evt.index - - if point_prompt == 'Positive': - coordinate = "[[{}, {}, 1]]".format(str(click_index[0]), str(click_index[1])) - else: - coordinate = "[[{}, {}, 0]]".format(str(click_index[0]), str(click_index[1])) - - prompt = get_click_prompt(coordinate, click_state, click_mode) - input_points = prompt['input_point'] - input_labels = prompt['input_label'] - - controls = {'length': length, - 'sentiment': sentiment, - 'factuality': factuality, - 'language': language} - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - text_refiner=text_refiner, - session_id=iface.app_id - ) - - model.setup(image_embedding, original_size, input_size, is_image_set=True) - - enable_wiki = True if enable_wiki in ['True', 'TRUE', 'true', True, 'Yes', 'YES', 'yes'] else False - out = model.inference(image_input, prompt, controls, disable_gpt=True, enable_wiki=enable_wiki, verbose=True, args={'clip_filter': False})[0] - - state = state + [("Image point: {}, Input label: {}".format(prompt["input_point"], prompt["input_label"]), None)] - state = state + [(None, "raw_caption: {}".format(out['generated_captions']['raw_caption']))] - update_click_state(click_state, out['generated_captions']['raw_caption'], click_mode) - text = out['generated_captions']['raw_caption'] - input_mask = np.array(out['mask'].convert('P')) - image_input = mask_painter(np.array(image_input), input_mask) - origin_image_input = image_input - image_input = create_bubble_frame(image_input, text, (click_index[0], click_index[1]), input_mask, - input_points=input_points, input_labels=input_labels) - x, y = input_points[-1] - - if visual_chatgpt is not None: - print('inference_click: add caption to chatGPT memory') - new_crop_save_path = get_new_image_name('chat_image', func_name='crop') - Image.open(out["crop_save_path"]).save(new_crop_save_path) - point_prompt = f'You should primarly use tools on the selected regional image (description: {text}, path: {new_crop_save_path}), which is a part of the whole image (path: {visual_chatgpt.current_image}). If human mentioned some objects not in the selected region, you can use tools on the whole image.' - visual_chatgpt.point_prompt = point_prompt - - yield state, state, click_state, image_input - if not args.disable_gpt and model.text_refiner: - refined_caption = model.text_refiner.inference(query=text, controls=controls, context=out['context_captions'], - enable_wiki=enable_wiki) - # new_cap = 'Original: ' + text + '. Refined: ' + refined_caption['caption'] - new_cap = refined_caption['caption'] - if refined_caption['wiki']: - state = state + [(None, "Wiki: {}".format(refined_caption['wiki']))] - state = state + [(None, f"caption: {new_cap}")] - refined_image_input = create_bubble_frame(origin_image_input, new_cap, (click_index[0], click_index[1]), - input_mask, - input_points=input_points, input_labels=input_labels) - yield state, state, click_state, refined_image_input - - -def get_sketch_prompt(mask: Image.Image): - """ - Get the prompt for the sketcher. - TODO: This is a temporary solution. We should cluster the sketch and get the bounding box of each cluster. - """ - - mask = np.asarray(mask)[..., 0] - - # Get the bounding box of the sketch - y, x = np.where(mask != 0) - x1, y1 = np.min(x), np.min(y) - x2, y2 = np.max(x), np.max(y) - - prompt = { - 'prompt_type': ['box'], - 'input_boxes': [ - [x1, y1, x2, y2] - ] - } - - return prompt - - -def inference_traject(sketcher_image, enable_wiki, language, sentiment, factuality, length, image_embedding, state, - original_size, input_size, text_refiner): - image_input, mask = sketcher_image['image'], sketcher_image['mask'] - - prompt = get_sketch_prompt(mask) - boxes = prompt['input_boxes'] - - controls = {'length': length, - 'sentiment': sentiment, - 'factuality': factuality, - 'language': language} - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - text_refiner=text_refiner, - session_id=iface.app_id - ) - - model.setup(image_embedding, original_size, input_size, is_image_set=True) - - enable_wiki = True if enable_wiki in ['True', 'TRUE', 'true', True, 'Yes', 'YES', 'yes'] else False - out = model.inference(image_input, prompt, controls, disable_gpt=True, enable_wiki=enable_wiki)[0] - - # Update components and states - state.append((f'Box: {boxes}', None)) - state.append((None, f'raw_caption: {out["generated_captions"]["raw_caption"]}')) - text = out['generated_captions']['raw_caption'] - input_mask = np.array(out['mask'].convert('P')) - image_input = mask_painter(np.array(image_input), input_mask) - - origin_image_input = image_input - - fake_click_index = (int((boxes[0][0] + boxes[0][2]) / 2), int((boxes[0][1] + boxes[0][3]) / 2)) - image_input = create_bubble_frame(image_input, text, fake_click_index, input_mask) - - yield state, state, image_input - - if not args.disable_gpt and model.text_refiner: - refined_caption = model.text_refiner.inference(query=text, controls=controls, context=out['context_captions'], - enable_wiki=enable_wiki) - - new_cap = refined_caption['caption'] - if refined_caption['wiki']: - state = state + [(None, "Wiki: {}".format(refined_caption['wiki']))] - state = state + [(None, f"caption: {new_cap}")] - refined_image_input = create_bubble_frame(origin_image_input, new_cap, fake_click_index, input_mask) - - yield state, state, refined_image_input - -def clear_chat_memory(visual_chatgpt, keep_global=False): - if visual_chatgpt is not None: - visual_chatgpt.memory.clear() - visual_chatgpt.point_prompt = "" - if keep_global: - visual_chatgpt.agent.memory.buffer = visual_chatgpt.global_prompt - else: - visual_chatgpt.current_image = None - visual_chatgpt.global_prompt = "" - -def cap_everything(image_input, visual_chatgpt, text_refiner): - - model = build_caption_anything_with_models( - args, - api_key="", - captioner=shared_captioner, - sam_model=shared_sam_model, - ocr_reader=shared_ocr_reader, - text_refiner=text_refiner, - session_id=iface.app_id - ) - paragraph = model.inference_cap_everything(image_input, verbose=True) - # state = state + [(None, f"Caption Everything: {paragraph}")] - Human_prompt = f'\nThe description of the image with path {visual_chatgpt.current_image} is:\n{paragraph}\nThis information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n' - AI_prompt = "Received." - visual_chatgpt.global_prompt = Human_prompt + 'AI: ' + AI_prompt - visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt - return paragraph - - -def get_style(): - current_version = version.parse(gr.__version__) - if current_version <= version.parse('3.24.1'): - style = ''' - #image_sketcher{min-height:500px} - #image_sketcher [data-testid="image"], #image_sketcher [data-testid="image"] > div{min-height: 500px} - #image_upload{min-height:500px} - #image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 500px} - ''' - elif current_version <= version.parse('3.27'): - style = ''' - #image_sketcher{min-height:500px} - #image_upload{min-height:500px} - ''' - else: - style = None - - return style - - -def create_ui(): - title = """<p><h1 align="center">Caption-Anything</h1></p> - """ - description = """<p>Gradio demo for Caption Anything, image to dense captioning generation with various language styles. To use it, simply upload your image, or click one of the examples to load them. Code: <a href="https://github.com/ttengwang/Caption-Anything">https://github.com/ttengwang/Caption-Anything</a> <a href="https://huggingface.co/spaces/TencentARC/Caption-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>""" - - examples = [ - ["test_images/img35.webp"], - ["test_images/img2.jpg"], - ["test_images/img5.jpg"], - ["test_images/img12.jpg"], - ["test_images/img14.jpg"], - ["test_images/qingming3.jpeg"], - ["test_images/img1.jpg"], - ] - - with gr.Blocks( - css=get_style() - ) as iface: - state = gr.State([]) - click_state = gr.State([[], [], []]) - # chat_state = gr.State([]) - origin_image = gr.State(None) - image_embedding = gr.State(None) - text_refiner = gr.State(None) - visual_chatgpt = gr.State(None) - original_size = gr.State(None) - input_size = gr.State(None) - # img_caption = gr.State(None) - aux_state = gr.State([]) - - gr.Markdown(title) - gr.Markdown(description) - - with gr.Row(): - with gr.Column(scale=1.0): - with gr.Column(visible=False) as modules_not_need_gpt: - with gr.Tab("Click"): - image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload") - example_image = gr.Image(type="pil", interactive=False, visible=False) - with gr.Row(scale=1.0): - with gr.Row(scale=0.4): - point_prompt = gr.Radio( - choices=["Positive", "Negative"], - value="Positive", - label="Point Prompt", - interactive=True) - click_mode = gr.Radio( - choices=["Continuous", "Single"], - value="Continuous", - label="Clicking Mode", - interactive=True) - with gr.Row(scale=0.4): - clear_button_click = gr.Button(value="Clear Clicks", interactive=True) - clear_button_image = gr.Button(value="Clear Image", interactive=True) - with gr.Tab("Trajectory (beta)"): - sketcher_input = ImageSketcher(type="pil", interactive=True, brush_radius=20, - elem_id="image_sketcher") - with gr.Row(): - submit_button_sketcher = gr.Button(value="Submit", interactive=True) - - with gr.Column(visible=False) as modules_need_gpt1: - with gr.Row(scale=1.0): - language = gr.Dropdown( - ['English', 'Chinese', 'French', "Spanish", "Arabic", "Portuguese", "Cantonese"], - value="English", label="Language", interactive=True) - sentiment = gr.Radio( - choices=["Positive", "Natural", "Negative"], - value="Natural", - label="Sentiment", - interactive=True, - ) - with gr.Row(scale=1.0): - factuality = gr.Radio( - choices=["Factual", "Imagination"], - value="Factual", - label="Factuality", - interactive=True, - ) - length = gr.Slider( - minimum=10, - maximum=80, - value=10, - step=1, - interactive=True, - label="Generated Caption Length", - ) - enable_wiki = gr.Radio( - choices=["Yes", "No"], - value="No", - label="Enable Wiki", - interactive=True) - # with gr.Column(visible=True) as modules_not_need_gpt3: - gr.Examples( - examples=examples, - inputs=[example_image], - ) - with gr.Column(scale=0.5): - with gr.Column(visible=True) as module_key_input: - openai_api_key = gr.Textbox( - placeholder="Input openAI API key", - show_label=False, - label="OpenAI API Key", - lines=1, - type="password") - with gr.Row(scale=0.5): - enable_chatGPT_button = gr.Button(value="Run with ChatGPT", interactive=True, variant='primary') - disable_chatGPT_button = gr.Button(value="Run without ChatGPT (Faster)", interactive=True, - variant='primary') - with gr.Column(visible=False) as module_notification_box: - notification_box = gr.Textbox(lines=1, label="Notification", max_lines=5, show_label=False) - with gr.Column(visible=False) as modules_need_gpt2: - paragraph_output = gr.Textbox(lines=7, label="Describe Everything", max_lines=7) - with gr.Column(visible=False) as modules_need_gpt0: - cap_everything_button = gr.Button(value="Caption Everything in a Paragraph", interactive=True) - with gr.Column(visible=False) as modules_not_need_gpt2: - chatbot = gr.Chatbot(label="Chatbox", ).style(height=550, scale=0.5) - with gr.Column(visible=False) as modules_need_gpt3: - chat_input = gr.Textbox(show_label=False, placeholder="Enter text and press Enter").style( - container=False) - with gr.Row(): - clear_button_text = gr.Button(value="Clear Text", interactive=True) - submit_button_text = gr.Button(value="Submit", interactive=True, variant="primary") - - openai_api_key.submit(init_openai_api_key, inputs=[openai_api_key], - outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, modules_not_need_gpt, - modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box]) - enable_chatGPT_button.click(init_openai_api_key, inputs=[openai_api_key], - outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, - modules_not_need_gpt, - modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box]) - disable_chatGPT_button.click(init_wo_openai_api_key, - outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, - modules_not_need_gpt, - modules_not_need_gpt2, module_key_input, module_notification_box, text_refiner, visual_chatgpt, notification_box]) - - enable_chatGPT_button.click( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - openai_api_key.submit( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - - cap_everything_button.click(cap_everything, [origin_image, visual_chatgpt, text_refiner], [paragraph_output]) - - clear_button_click.click( - lambda x: ([[], [], []], x), - [origin_image], - [click_state, image_input], - queue=False, - show_progress=False - ) - clear_button_click.click(functools.partial(clear_chat_memory, keep_global=True), inputs=[visual_chatgpt]) - clear_button_image.click( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - clear_button_image.click(clear_chat_memory, inputs=[visual_chatgpt]) - clear_button_text.click( - lambda: ([], [], [[], [], [], []]), - [], - [chatbot, state, click_state], - queue=False, - show_progress=False - ) - clear_button_text.click(clear_chat_memory, inputs=[visual_chatgpt]) - - image_input.clear( - lambda: (None, [], [], [[], [], []], "", "", ""), - [], - [image_input, chatbot, state, click_state, paragraph_output, origin_image], - queue=False, - show_progress=False - ) - - image_input.clear(clear_chat_memory, inputs=[visual_chatgpt]) - - - image_input.upload(upload_callback, [image_input, state, visual_chatgpt], - [chatbot, state, origin_image, click_state, image_input, sketcher_input, - image_embedding, original_size, input_size]) - sketcher_input.upload(upload_callback, [sketcher_input, state, visual_chatgpt], - [chatbot, state, origin_image, click_state, image_input, sketcher_input, - image_embedding, original_size, input_size]) - chat_input.submit(chat_input_callback, [visual_chatgpt, chat_input, click_state, state, aux_state], - [chatbot, state, aux_state]) - chat_input.submit(lambda: "", None, chat_input) - submit_button_text.click(chat_input_callback, [visual_chatgpt, chat_input, click_state, state, aux_state], - [chatbot, state, aux_state]) - submit_button_text.click(lambda: "", None, chat_input) - example_image.change(upload_callback, [example_image, state, visual_chatgpt], - [chatbot, state, origin_image, click_state, image_input, sketcher_input, - image_embedding, original_size, input_size]) - example_image.change(clear_chat_memory, inputs=[visual_chatgpt]) - # select coordinate - image_input.select( - inference_click, - inputs=[ - origin_image, point_prompt, click_mode, enable_wiki, language, sentiment, factuality, length, - image_embedding, state, click_state, original_size, input_size, text_refiner, visual_chatgpt - ], - outputs=[chatbot, state, click_state, image_input], - show_progress=False, queue=True - ) - - submit_button_sketcher.click( - inference_traject, - inputs=[ - sketcher_input, enable_wiki, language, sentiment, factuality, length, image_embedding, state, - original_size, input_size, text_refiner - ], - outputs=[chatbot, state, sketcher_input], - show_progress=False, queue=True - ) - - return iface - - -if __name__ == '__main__': - iface = create_ui() - iface.queue(concurrency_count=5, api_open=False, max_size=10) - iface.launch(server_name="0.0.0.0", enable_queue=True) diff --git a/spaces/Tonic/indiansummer/README.md b/spaces/Tonic/indiansummer/README.md deleted file mode 100644 index 4e4bf471600f39bf369f1521d79fd5113a3f504f..0000000000000000000000000000000000000000 --- a/spaces/Tonic/indiansummer/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -tags: -- gradio-theme -title: indiansummer -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.22.1b1 -app_file: app.py -pinned: false -license: apache-2.0 -emoji: ☀️ ---- -# GreenBlast -an a11y w3c accessibility-first theme that's not boring for 🤗hugging face 👊🏻🔥🚀 -## Description -A light/mode dark/mode template that's accessible and exciting ! -## Contributions -Thanks to [@freddyaboulton](https://huggingface.co/freddyaboulton) for adding this gradio theme! \ No newline at end of file diff --git a/spaces/Wootang01/grammar_corrector/README.md b/spaces/Wootang01/grammar_corrector/README.md deleted file mode 100644 index c291ef6d619459fa59d087f9e4129ddc03f83d74..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/grammar_corrector/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Grammar_corrector -emoji: 💻 -colorFrom: blue -colorTo: red -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/XAI/VisualCorrespondenceHumanStudy/app.py b/spaces/XAI/VisualCorrespondenceHumanStudy/app.py deleted file mode 100644 index 4b71970509e0177693584d8a0c11929008e5d807..0000000000000000000000000000000000000000 --- a/spaces/XAI/VisualCorrespondenceHumanStudy/app.py +++ /dev/null @@ -1,403 +0,0 @@ -import json -import os -import pickle -import random -import time -from collections import Counter -from datetime import datetime -from glob import glob - -import gdown -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import streamlit as st -from PIL import Image - -import SessionState -from download_utils import * -from image_utils import * - -random.seed(datetime.now()) -np.random.seed(int(time.time())) - -NUMBER_OF_TRIALS = 20 -CLASSIFIER_TAG = "" -explaination_functions = [load_chm_nns, load_knn_nns] -selected_xai_tool = None - -# Config -folder_to_name = {} -class_descriptions = {} -classifier_predictions = {} -selected_dataset = "Final" - -root_visualization_dir = "./visualizations/" -viz_url = "https://drive.google.com/uc?id=1LpmOc_nFBzApYWAokO2J-s9RRXsk3pBN" -viz_archivefile = "Final.zip" - -demonstration_url = "https://drive.google.com/uc?id=1C92llG5VrlABrsIEvxfNlSDc_gIeLlls" -demonst_zipfile = "demonstrations.zip" - -picklefile_url = "https://drive.google.com/uc?id=1Yx4abA4VLZGO5JkzhXVGdy6mbPltMd68" -prediction_root = "./predictions/" -prediction_pickle = f"{prediction_root}predictions.pickle" - - -# Get the Data -download_files( - root_visualization_dir, - viz_url, - viz_archivefile, - demonstration_url, - demonst_zipfile, - picklefile_url, - prediction_root, - prediction_pickle, -) -################################################ -# GLOBAL VARIABLES -app_mode = "" - -## Shared/Global Information -with open("imagenet-labels.json", "rb") as f: - folder_to_name = json.load(f) - -with open("gloss.txt", "r") as f: - description_file = f.readlines() - -class_descriptions = {l.split("\t")[0]: l.split("\t")[1] for l in description_file} -################################################ - -with open(prediction_pickle, "rb") as f: - classifier_predictions = pickle.load(f) - -# SESSION STATE -session_state = SessionState.get( - page=1, - first_run=1, - user_feedback={}, - queries=[], - is_classifier_correct={}, - XAI_tool="Unselected", -) -################################################ - - -def resmaple_queries(): - if session_state.first_run == 1: - both_correct = glob( - root_visualization_dir + selected_dataset + "/Both_correct/*.JPEG" - ) - both_wrong = glob( - root_visualization_dir + selected_dataset + "/Both_wrong/*.JPEG" - ) - - correct_samples = list( - np.random.choice(a=both_correct, size=NUMBER_OF_TRIALS // 2, replace=False) - ) - wrong_samples = list( - np.random.choice(a=both_wrong, size=NUMBER_OF_TRIALS // 2, replace=False) - ) - - all_images = correct_samples + wrong_samples - random.shuffle(all_images) - session_state.queries = all_images - session_state.first_run = -1 - # RESET INTERACTIONS - session_state.user_feedback = {} - session_state.is_classifier_correct = {} - - -def render_experiment(query): - current_query = session_state.queries[query] - query_id = os.path.basename(current_query) - - predicted_wnid = classifier_predictions[query_id][f"{CLASSIFIER_TAG}-predictions"] - prediction_confidence = classifier_predictions[query_id][ - f"{CLASSIFIER_TAG}-confidence" - ] - prediction_label = folder_to_name[predicted_wnid] - class_def = class_descriptions[predicted_wnid] - - session_state.is_classifier_correct[query_id] = classifier_predictions[query_id][ - f"{CLASSIFIER_TAG.upper()}-Output" - ] - - ################################### SHOW QUERY and PREDICTION - - col1, col2 = st.columns(2) - with col1: - st.image(load_query(current_query), caption=f"Query ID: {query_id}") - with col2: - ################################### SHOW DESCRIPTION OF CLASS - with st.expander("Show Class Description"): - st.write(f"**Name**: {prediction_label}") - st.write("**Class Definition**:") - st.markdown("`" + class_def + "`") - st.image( - Image.open(f"demonstrations/{predicted_wnid}.jpeg"), - caption=f"Class Explanation", - use_column_width=True, - ) - - default_value = 0 - if query_id in session_state.user_feedback.keys(): - if session_state.user_feedback[query_id] == "Correct": - default_value = 1 - elif session_state.user_feedback[query_id] == "Wrong": - default_value = 2 - - session_state.user_feedback[query_id] = st.radio( - "What do you think about model's prediction?", - ("-", "Correct", "Wrong"), - key=query_id, - index=default_value, - ) - st.write(f"**Model Prediction**: {prediction_label}") - st.write(f"**Model Confidence**: {prediction_confidence}") - - ################################### SHOW Model Explanation - if selected_xai_tool is not None: - st.image( - selected_xai_tool(current_query), - caption=f"Explaination", - use_column_width=True, - ) - - ################################### SHOW DEBUG INFO - - if st.button("Debug: Show Everything"): - st.image(Image.open(current_query)) - - -def render_results(): - user_correct_guess = 0 - for q in session_state.user_feedback.keys(): - uf = True if session_state.user_feedback[q] == "Correct" else False - if session_state.is_classifier_correct[q] == uf: - user_correct_guess += 1 - - st.write( - f"User performance on {CLASSIFIER_TAG}: {user_correct_guess} out of {len( session_state.user_feedback)} Correct" - ) - st.markdown("## User Performance Breakdown") - - categories = [ - "Correct", - "Wrong", - ] # set(session_state.is_classifier_correct.values()) - breakdown_stats_correct = {c: 0 for c in categories} - breakdown_stats_wrong = {c: 0 for c in categories} - - experiment_summary = [] - - for q in session_state.user_feedback.keys(): - category = "Correct" if session_state.is_classifier_correct[q] else "Wrong" - is_user_correct = category == session_state.user_feedback[q] - - if is_user_correct: - breakdown_stats_correct[category] += 1 - else: - breakdown_stats_wrong[category] += 1 - - experiment_summary.append( - [ - q, - classifier_predictions[q]["real-gts"], - folder_to_name[ - classifier_predictions[q][f"{CLASSIFIER_TAG}-predictions"] - ], - category, - session_state.user_feedback[q], - is_user_correct, - ] - ) - ################################### Summary Table - experiment_summary_df = pd.DataFrame.from_records( - experiment_summary, - columns=[ - "Query", - "GT Labels", - f"{CLASSIFIER_TAG} Prediction", - "Category", - "User Prediction", - "Is User Prediction Correct", - ], - ) - st.write("Summary", experiment_summary_df) - - csv = convert_df(experiment_summary_df) - st.download_button( - "Press to Download", csv, "summary.csv", "text/csv", key="download-records" - ) - ################################### SHOW BREAKDOWN - user_pf_by_model_pred = experiment_summary_df.groupby("Category").agg( - {"Is User Prediction Correct": ["count", "sum", "mean"]} - ) - # rename columns - user_pf_by_model_pred.columns = user_pf_by_model_pred.columns.droplevel(0) - user_pf_by_model_pred.columns = [ - "Count", - "Correct User Guess", - "Mean User Performance", - ] - user_pf_by_model_pred.index.name = "Model Prediction" - st.write("User performance break down by Model prediction:", user_pf_by_model_pred) - csv = convert_df(user_pf_by_model_pred) - st.download_button( - "Press to Download", - csv, - "user-performance-by-model-prediction.csv", - "text/csv", - key="download-performance-by-model-prediction", - ) - ################################### CONFUSION MATRIX - - confusion_matrix = pd.crosstab( - experiment_summary_df["Category"], - experiment_summary_df["User Prediction"], - rownames=["Actual"], - colnames=["Predicted"], - ) - st.write("Confusion Matrix", confusion_matrix) - csv = convert_df(confusion_matrix) - st.download_button( - "Press to Download", - csv, - "confusion-matrix.csv", - "text/csv", - key="download-confusiion-matrix", - ) - - -def render_menu(): - # Render the readme as markdown using st.markdown. - readme_text = st.markdown( - """ - # Instructions - ``` - When testing this study, you should first see the class definition, then hide the expander and see the query. - ``` - """ - ) - - app_mode = st.selectbox( - "Choose the page to show:", - ["Experiment Instruction", "Start Experiment", "See the Results"], - ) - - if app_mode == "Experiment Instruction": - st.success("To continue select an option in the dropdown menu.") - elif app_mode == "Start Experiment": - # Clear Canvas - readme_text.empty() - - page_id = session_state.page - col1, col4, col2, col3 = st.columns(4) - prev_page = col1.button("Previous Image") - - if prev_page: - page_id -= 1 - if page_id < 1: - page_id = 1 - - next_page = col2.button("Next Image") - - if next_page: - page_id += 1 - if page_id > NUMBER_OF_TRIALS: - page_id = NUMBER_OF_TRIALS - - if page_id == NUMBER_OF_TRIALS: - st.success( - 'You have reached the last image. Please go to the "Results" page to see your performance.' - ) - if st.button("View"): - app_mode = "See the Results" - - if col3.button("Resample"): - st.write("Restarting ...") - page_id = 1 - session_state.first_run = 1 - resmaple_queries() - - session_state.page = page_id - st.write(f"Render Experiment: {session_state.page}") - render_experiment(session_state.page - 1) - elif app_mode == "See the Results": - readme_text.empty() - st.write("Results Summary") - render_results() - - -def main(): - global app_mode - global session_state - global selected_xai_tool - global CLASSIFIER_TAG - - # Set the session state - # State Management and General Setup - st.set_page_config(layout="wide") - st.title("Visual CorrespondenceHuman Study - ImageNet") - - options = [ - "Unselected", - "NOXAI", - "KNN", - "EMD-Corr Nearest Neighbors", - "EMD-Corr Correspondence", - "CHM-Corr Nearest Neighbors", - "CHM-Corr Correspondence", - ] - - st.markdown( - """ <style> - div[role="radiogroup"] > :first-child{ - display: none !important; - } - </style> - """, - unsafe_allow_html=True, - ) - - if session_state.XAI_tool == "Unselected": - default = options.index(session_state.XAI_tool) - session_state.XAI_tool = st.radio( - "What explaination tool do you want to evaluate?", - options, - key="which_xai", - index=default, - ) - # print(session_state.XAI_tool) - - if session_state.XAI_tool != "Unselected": - st.markdown(f"## SELECTED METHOD ``{session_state.XAI_tool}``") - - if session_state.XAI_tool == "NOXAI": - CLASSIFIER_TAG = "knn" - selected_xai_tool = None - elif session_state.XAI_tool == "KNN": - selected_xai_tool = load_knn_nns - CLASSIFIER_TAG = "knn" - elif session_state.XAI_tool == "CHM-Corr Nearest Neighbors": - selected_xai_tool = load_chm_nns - CLASSIFIER_TAG = "CHM" - elif session_state.XAI_tool == "CHM-Corr Correspondence": - selected_xai_tool = load_chm_corrs - CLASSIFIER_TAG = "CHM" - elif session_state.XAI_tool == "EMD-Corr Nearest Neighbors": - selected_xai_tool = load_emd_nns - CLASSIFIER_TAG = "EMD" - elif session_state.XAI_tool == "EMD-Corr Correspondence": - selected_xai_tool = load_emd_corrs - CLASSIFIER_TAG = "EMD" - - resmaple_queries() - render_menu() - - -if __name__ == "__main__": - main() diff --git a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/text/shanghainese.py b/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ᴇ'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'ᴇf'), - ('G', 'dʑi'), - ('H', 'ᴇtɕʰ'), - ('I', 'ᴀi'), - ('J', 'dʑᴇ'), - ('K', 'kʰᴇ'), - ('L', 'ᴇl'), - ('M', 'ᴇm'), - ('N', 'ᴇn'), - ('O', 'o'), - ('P', 'pʰi'), - ('Q', 'kʰiu'), - ('R', 'ᴀl'), - ('S', 'ᴇs'), - ('T', 'tʰi'), - ('U', 'ɦiu'), - ('V', 'vi'), - ('W', 'dᴀbɤliu'), - ('X', 'ᴇks'), - ('Y', 'uᴀi'), - ('Z', 'zᴇ') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') - return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/versatile_diffusion/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/versatile_diffusion/__init__.py deleted file mode 100644 index 1d2caa7e2399001632b61504aa7bc59a1ad2bcfe..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/versatile_diffusion/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from ...utils import is_torch_available, is_transformers_available, is_transformers_version - - -if is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0.dev0"): - from .modeling_text_unet import UNetFlatConditionModel - from .pipeline_versatile_diffusion import VersatileDiffusionPipeline - from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline - from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline - from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline -else: - from ...utils.dummy_torch_and_transformers_objects import ( - VersatileDiffusionDualGuidedPipeline, - VersatileDiffusionImageVariationPipeline, - VersatileDiffusionPipeline, - VersatileDiffusionTextToImagePipeline, - ) diff --git a/spaces/YlcldKlns/bing/src/components/ui/input.tsx b/spaces/YlcldKlns/bing/src/components/ui/input.tsx deleted file mode 100644 index 684a857f3d769b78818fb13de1abaebfb09ca79c..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/components/ui/input.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface InputProps - extends React.InputHTMLAttributes<HTMLInputElement> {} - -const Input = React.forwardRef<HTMLInputElement, InputProps>( - ({ className, type, ...props }, ref) => { - return ( - <input - type={type} - className={cn( - 'flex h-9 w-full rounded-md border border-input bg-transparent px-3 py-2 text-sm shadow-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50', - className - )} - ref={ref} - {...props} - /> - ) - } -) -Input.displayName = 'Input' - -export { Input } diff --git a/spaces/YlcldKlns/bing/tailwind.config.js b/spaces/YlcldKlns/bing/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / <alpha-value>)', - secondary: 'rgb(var(--color-secondary) / <alpha-value>)', - 'primary-background': 'rgb(var(--primary-background) / <alpha-value>)', - 'primary-text': 'rgb(var(--primary-text) / <alpha-value>)', - 'secondary-text': 'rgb(var(--secondary-text) / <alpha-value>)', - 'light-text': 'rgb(var(--light-text) / <alpha-value>)', - 'primary-border': 'rgb(var(--primary-border) / <alpha-value>)', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/YuAnthony/Voice-Recognition/infer_contrast.py b/spaces/YuAnthony/Voice-Recognition/infer_contrast.py deleted file mode 100644 index d76b9b5e5284cd04a51ca21c34bf47688f311282..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Voice-Recognition/infer_contrast.py +++ /dev/null @@ -1,47 +0,0 @@ -import argparse -import functools - -import numpy as np -import torch - -from utils.reader import load_audio -from utils.utility import add_arguments, print_arguments - -parser = argparse.ArgumentParser(description=__doc__) -add_arg = functools.partial(add_arguments, argparser=parser) -add_arg('threshold', float, 0.71, '判断是否为同一个人的阈值') -add_arg('input_shape', str, '(1, 257, 257)', '数据输入的形状') -add_arg('model_path', str, 'models_large/resnet34.pth', '预测模型的路径') -# args = parser.parse_args() -args =parser.parse_known_args()[0] - -print_arguments(args) -print(torch.cuda.is_available()) -device = torch.device("cpu") - -# 加载模型 -# model = torch.jit.load(args.model_path) -model = torch.jit.load(args.model_path,map_location="cpu") -# model.to(device) -model.eval() - - -# 预测音频 -def infer(audio_path): - input_shape = eval(args.input_shape) - data = load_audio(audio_path, mode='infer', spec_len=input_shape[2]) - data = data[np.newaxis, :] - data = torch.tensor(data, dtype=torch.float32) - # 执行预测 - feature = model(data) - return feature.data.cpu().numpy() - - -def run(audio1,audio2): - # 要预测的两个人的音频文件 - feature1 = infer(audio1)[0] - feature2 = infer(audio2)[0] - # 对角余弦值 - dist = np.dot(feature1, feature2) / (np.linalg.norm(feature1) * np.linalg.norm(feature2)) - - return dist \ No newline at end of file diff --git a/spaces/ZX9966/LOGO-Approximate-Computing-Technology/style.css b/spaces/ZX9966/LOGO-Approximate-Computing-Technology/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/ZX9966/LOGO-Approximate-Computing-Technology/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/hooks/ema.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/hooks/ema.py deleted file mode 100644 index 15c7e68088f019802a59e7ae41cc1fe0c7f28f96..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/hooks/ema.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ...parallel import is_module_wrapper -from ..hooks.hook import HOOKS, Hook - - -@HOOKS.register_module() -class EMAHook(Hook): - r"""Exponential Moving Average Hook. - - Use Exponential Moving Average on all parameters of model in training - process. All parameters have a ema backup, which update by the formula - as below. EMAHook takes priority over EvalHook and CheckpointSaverHook. - - .. math:: - - \text{Xema\_{t+1}} = (1 - \text{momentum}) \times - \text{Xema\_{t}} + \text{momentum} \times X_t - - Args: - momentum (float): The momentum used for updating ema parameter. - Defaults to 0.0002. - interval (int): Update ema parameter every interval iteration. - Defaults to 1. - warm_up (int): During first warm_up steps, we may use smaller momentum - to update ema parameters more slowly. Defaults to 100. - resume_from (str): The checkpoint path. Defaults to None. - """ - - def __init__(self, - momentum=0.0002, - interval=1, - warm_up=100, - resume_from=None): - assert isinstance(interval, int) and interval > 0 - self.warm_up = warm_up - self.interval = interval - assert momentum > 0 and momentum < 1 - self.momentum = momentum**interval - self.checkpoint = resume_from - - def before_run(self, runner): - """To resume model with it's ema parameters more friendly. - - Register ema parameter as ``named_buffer`` to model - """ - model = runner.model - if is_module_wrapper(model): - model = model.module - self.param_ema_buffer = {} - self.model_parameters = dict(model.named_parameters(recurse=True)) - for name, value in self.model_parameters.items(): - # "." is not allowed in module's buffer name - buffer_name = f"ema_{name.replace('.', '_')}" - self.param_ema_buffer[name] = buffer_name - model.register_buffer(buffer_name, value.data.clone()) - self.model_buffers = dict(model.named_buffers(recurse=True)) - if self.checkpoint is not None: - runner.resume(self.checkpoint) - - def after_train_iter(self, runner): - """Update ema parameter every self.interval iterations.""" - curr_step = runner.iter - # We warm up the momentum considering the instability at beginning - momentum = min(self.momentum, - (1 + curr_step) / (self.warm_up + curr_step)) - if curr_step % self.interval != 0: - return - for name, parameter in self.model_parameters.items(): - buffer_name = self.param_ema_buffer[name] - buffer_parameter = self.model_buffers[buffer_name] - buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data) - - def after_train_epoch(self, runner): - """We load parameter values from ema backup to model before the - EvalHook.""" - self._swap_ema_parameters() - - def before_train_epoch(self, runner): - """We recover model's parameter from ema backup after last epoch's - EvalHook.""" - self._swap_ema_parameters() - - def _swap_ema_parameters(self): - """Swap the parameter of model with parameter in ema_buffer.""" - for name, value in self.model_parameters.items(): - temp = value.data.clone() - ema_buffer = self.model_buffers[self.param_ema_buffer[name]] - value.data.copy_(ema_buffer.data) - ema_buffer.data.copy_(temp) diff --git a/spaces/aijack/jojo/e4e/scripts/train.py b/spaces/aijack/jojo/e4e/scripts/train.py deleted file mode 100644 index d885cfde49a0b21140e663e475918698d5e51ee3..0000000000000000000000000000000000000000 --- a/spaces/aijack/jojo/e4e/scripts/train.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -This file runs the main training/val loop -""" -import os -import json -import math -import sys -import pprint -import torch -from argparse import Namespace - -sys.path.append(".") -sys.path.append("..") - -from options.train_options import TrainOptions -from training.coach import Coach - - -def main(): - opts = TrainOptions().parse() - previous_train_ckpt = None - if opts.resume_training_from_ckpt: - opts, previous_train_ckpt = load_train_checkpoint(opts) - else: - setup_progressive_steps(opts) - create_initial_experiment_dir(opts) - - coach = Coach(opts, previous_train_ckpt) - coach.train() - - -def load_train_checkpoint(opts): - train_ckpt_path = opts.resume_training_from_ckpt - previous_train_ckpt = torch.load(opts.resume_training_from_ckpt, map_location='cpu') - new_opts_dict = vars(opts) - opts = previous_train_ckpt['opts'] - opts['resume_training_from_ckpt'] = train_ckpt_path - update_new_configs(opts, new_opts_dict) - pprint.pprint(opts) - opts = Namespace(**opts) - if opts.sub_exp_dir is not None: - sub_exp_dir = opts.sub_exp_dir - opts.exp_dir = os.path.join(opts.exp_dir, sub_exp_dir) - create_initial_experiment_dir(opts) - return opts, previous_train_ckpt - - -def setup_progressive_steps(opts): - log_size = int(math.log(opts.stylegan_size, 2)) - num_style_layers = 2*log_size - 2 - num_deltas = num_style_layers - 1 - if opts.progressive_start is not None: # If progressive delta training - opts.progressive_steps = [0] - next_progressive_step = opts.progressive_start - for i in range(num_deltas): - opts.progressive_steps.append(next_progressive_step) - next_progressive_step += opts.progressive_step_every - - assert opts.progressive_steps is None or is_valid_progressive_steps(opts, num_style_layers), \ - "Invalid progressive training input" - - -def is_valid_progressive_steps(opts, num_style_layers): - return len(opts.progressive_steps) == num_style_layers and opts.progressive_steps[0] == 0 - - -def create_initial_experiment_dir(opts): - if os.path.exists(opts.exp_dir): - raise Exception('Oops... {} already exists'.format(opts.exp_dir)) - os.makedirs(opts.exp_dir) - - opts_dict = vars(opts) - pprint.pprint(opts_dict) - with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f: - json.dump(opts_dict, f, indent=4, sort_keys=True) - - -def update_new_configs(ckpt_opts, new_opts): - for k, v in new_opts.items(): - if k not in ckpt_opts: - ckpt_opts[k] = v - if new_opts['update_param_list']: - for param in new_opts['update_param_list']: - ckpt_opts[param] = new_opts[param] - - -if __name__ == '__main__': - main() diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/losses/mel_loss.py b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/losses/mel_loss.py deleted file mode 100644 index 58b12bb76a4e9755d749ae83ba520ca2a3dbea2b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/losses/mel_loss.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2021 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Mel-spectrogram loss modules.""" - -from distutils.version import LooseVersion - -import librosa -import torch -import torch.nn.functional as F - - -is_pytorch_17plus = LooseVersion(torch.__version__) >= LooseVersion("1.7") - - -class MelSpectrogram(torch.nn.Module): - """Calculate Mel-spectrogram.""" - - def __init__( - self, - fs=22050, - fft_size=1024, - hop_size=256, - win_length=None, - window="hann", - num_mels=80, - fmin=80, - fmax=7600, - center=True, - normalized=False, - onesided=True, - eps=1e-10, - log_base=10.0, - ): - """Initialize MelSpectrogram module.""" - super().__init__() - self.fft_size = fft_size - if win_length is None: - self.win_length = fft_size - else: - self.win_length = win_length - self.hop_size = hop_size - self.center = center - self.normalized = normalized - self.onesided = onesided - if window is not None and not hasattr(torch, f"{window}_window"): - raise ValueError(f"{window} window is not implemented") - self.window = window - self.eps = eps - - fmin = 0 if fmin is None else fmin - fmax = fs / 2 if fmax is None else fmax - melmat = librosa.filters.mel( - sr=fs, - n_fft=fft_size, - n_mels=num_mels, - fmin=fmin, - fmax=fmax, - ) - self.register_buffer("melmat", torch.from_numpy(melmat.T).float()) - self.stft_params = { - "n_fft": self.fft_size, - "win_length": self.win_length, - "hop_length": self.hop_size, - "center": self.center, - "normalized": self.normalized, - "onesided": self.onesided, - } - if is_pytorch_17plus: - self.stft_params["return_complex"] = False - - self.log_base = log_base - if self.log_base is None: - self.log = torch.log - elif self.log_base == 2.0: - self.log = torch.log2 - elif self.log_base == 10.0: - self.log = torch.log10 - else: - raise ValueError(f"log_base: {log_base} is not supported.") - - def forward(self, x): - """Calculate Mel-spectrogram. - - Args: - x (Tensor): Input waveform tensor (B, T) or (B, 1, T). - - Returns: - Tensor: Mel-spectrogram (B, #mels, #frames). - - """ - if x.dim() == 3: - # (B, C, T) -> (B*C, T) - x = x.reshape(-1, x.size(2)) - - if self.window is not None: - window_func = getattr(torch, f"{self.window}_window") - window = window_func(self.win_length, dtype=x.dtype, device=x.device) - else: - window = None - - x_stft = torch.stft(x, window=window, **self.stft_params) - # (B, #freqs, #frames, 2) -> (B, $frames, #freqs, 2) - x_stft = x_stft.transpose(1, 2) - x_power = x_stft[..., 0] ** 2 + x_stft[..., 1] ** 2 - x_amp = torch.sqrt(torch.clamp(x_power, min=self.eps)) - - x_mel = torch.matmul(x_amp, self.melmat) - x_mel = torch.clamp(x_mel, min=self.eps) - - return self.log(x_mel).transpose(1, 2) - - -class MelSpectrogramLoss(torch.nn.Module): - """Mel-spectrogram loss.""" - - def __init__( - self, - fs=22050, - fft_size=1024, - hop_size=256, - win_length=None, - window="hann", - num_mels=80, - fmin=80, - fmax=7600, - center=True, - normalized=False, - onesided=True, - eps=1e-10, - log_base=10.0, - ): - """Initialize Mel-spectrogram loss.""" - super().__init__() - self.mel_spectrogram = MelSpectrogram( - fs=fs, - fft_size=fft_size, - hop_size=hop_size, - win_length=win_length, - window=window, - num_mels=num_mels, - fmin=fmin, - fmax=fmax, - center=center, - normalized=normalized, - onesided=onesided, - eps=eps, - log_base=log_base, - ) - - def forward(self, y_hat, y): - """Calculate Mel-spectrogram loss. - - Args: - y_hat (Tensor): Generated single tensor (B, 1, T). - y (Tensor): Groundtruth single tensor (B, 1, T). - - Returns: - Tensor: Mel-spectrogram loss value. - - """ - mel_hat = self.mel_spectrogram(y_hat) - mel = self.mel_spectrogram(y) - mel_loss = F.l1_loss(mel_hat, mel) - - return mel_loss diff --git a/spaces/akhaliq/lama/bin/side_by_side.py b/spaces/akhaliq/lama/bin/side_by_side.py deleted file mode 100644 index 8ba7a42a3b8597552b8002d1eb245d5776aff7f7..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/bin/side_by_side.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python3 -import os -import random - -import cv2 -import numpy as np - -from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset -from saicinpainting.evaluation.utils import load_yaml -from saicinpainting.training.visualizers.base import visualize_mask_and_images - - -def main(args): - config = load_yaml(args.config) - - datasets = [PrecomputedInpaintingResultsDataset(args.datadir, cur_predictdir, **config.dataset_kwargs) - for cur_predictdir in args.predictdirs] - assert len({len(ds) for ds in datasets}) == 1 - len_first = len(datasets[0]) - - indices = list(range(len_first)) - if len_first > args.max_n: - indices = sorted(random.sample(indices, args.max_n)) - - os.makedirs(args.outpath, exist_ok=True) - - filename2i = {} - - keys = ['image'] + [i for i in range(len(datasets))] - for img_i in indices: - try: - mask_fname = os.path.basename(datasets[0].mask_filenames[img_i]) - if mask_fname in filename2i: - filename2i[mask_fname] += 1 - idx = filename2i[mask_fname] - mask_fname_only, ext = os.path.split(mask_fname) - mask_fname = f'{mask_fname_only}_{idx}{ext}' - else: - filename2i[mask_fname] = 1 - - cur_vis_dict = datasets[0][img_i] - for ds_i, ds in enumerate(datasets): - cur_vis_dict[ds_i] = ds[img_i]['inpainted'] - - vis_img = visualize_mask_and_images(cur_vis_dict, keys, - last_without_mask=False, - mask_only_first=True, - black_mask=args.black) - vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8') - - out_fname = os.path.join(args.outpath, mask_fname) - - - - vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR) - cv2.imwrite(out_fname, vis_img) - except Exception as ex: - print(f'Could not process {img_i} due to {ex}') - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('--max-n', type=int, default=100, help='Maximum number of images to print') - aparser.add_argument('--black', action='store_true', help='Whether to fill mask on GT with black') - aparser.add_argument('config', type=str, help='Path to evaluation config (e.g. configs/eval1.yaml)') - aparser.add_argument('outpath', type=str, help='Where to put results') - aparser.add_argument('datadir', type=str, - help='Path to folder with images and masks') - aparser.add_argument('predictdirs', type=str, - nargs='+', - help='Path to folders with predicts') - - - main(aparser.parse_args()) diff --git a/spaces/akhaliq/openpose/app.py b/spaces/akhaliq/openpose/app.py deleted file mode 100644 index df4bde0079b4253ce321c6146792d6942684c1ec..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/openpose/app.py +++ /dev/null @@ -1,19 +0,0 @@ -import os -os.system("hub install openpose_body_estimation==1.0.0") -import gradio as gr -import paddlehub as hub -import numpy as np -from PIL import Image - -model = hub.Module(name='openpose_body_estimation') -def inference(image): - result = model.predict(image.name) - return Image.fromarray(np.uint8(result['data'])[:,:,::-1]).convert('RGB') - -title = "OpenPose" -description = "Gradio demo for OpenPose: Real-time multi-person keypoint detection library for body, face, hands, and foot estimation. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." -article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1812.08008' target='_blank'>OpenPose: Realtime Multi-Person 2D Pose Estimation using Part Affinity Fields</a> | <a href='https://github.com/CMU-Perceptual-Computing-Lab/openpose' target='_blank'>Github Repo</a></p>" - -examples=[['people.jpeg']] -iface = gr.Interface(inference, inputs=gr.inputs.Image(type="file"), outputs=gr.outputs.Image(type="pil"),enable_queue=True,title=title,article=article,description=description,examples=examples) -iface.launch() \ No newline at end of file diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py deleted file mode 100644 index 3f59cab9225b5a0faac4b91fa1a79c74795428e3..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/sessions.py +++ /dev/null @@ -1,771 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.sessions -~~~~~~~~~~~~~~~~~ - -This module provides a Session object to manage and persist settings across -requests (cookies, auth, proxies). -""" -import os -import sys -import time -from datetime import timedelta -from collections import OrderedDict - -from .auth import _basic_auth_str -from .compat import cookielib, is_py3, urljoin, urlparse, Mapping -from .cookies import ( - cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) -from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT -from .hooks import default_hooks, dispatch_hook -from ._internal_utils import to_native_string -from .utils import to_key_val_list, default_headers, DEFAULT_PORTS -from .exceptions import ( - TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) - -from .structures import CaseInsensitiveDict -from .adapters import HTTPAdapter - -from .utils import ( - requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, - get_auth_from_url, rewind_body, resolve_proxies -) - -from .status_codes import codes - -# formerly defined here, reexposed here for backward compatibility -from .models import REDIRECT_STATI - -# Preferred clock, based on which one is more accurate on a given system. -if sys.platform == 'win32': - try: # Python 3.4+ - preferred_clock = time.perf_counter - except AttributeError: # Earlier than Python 3. - preferred_clock = time.clock -else: - preferred_clock = time.time - - -def merge_setting(request_setting, session_setting, dict_class=OrderedDict): - """Determines appropriate setting for a given request, taking into account - the explicit setting on that request, and the setting in the session. If a - setting is a dictionary, they will be merged together using `dict_class` - """ - - if session_setting is None: - return request_setting - - if request_setting is None: - return session_setting - - # Bypass if not a dictionary (e.g. verify) - if not ( - isinstance(session_setting, Mapping) and - isinstance(request_setting, Mapping) - ): - return request_setting - - merged_setting = dict_class(to_key_val_list(session_setting)) - merged_setting.update(to_key_val_list(request_setting)) - - # Remove keys that are set to None. Extract keys first to avoid altering - # the dictionary during iteration. - none_keys = [k for (k, v) in merged_setting.items() if v is None] - for key in none_keys: - del merged_setting[key] - - return merged_setting - - -def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): - """Properly merges both requests and session hooks. - - This is necessary because when request_hooks == {'response': []}, the - merge breaks Session hooks entirely. - """ - if session_hooks is None or session_hooks.get('response') == []: - return request_hooks - - if request_hooks is None or request_hooks.get('response') == []: - return session_hooks - - return merge_setting(request_hooks, session_hooks, dict_class) - - -class SessionRedirectMixin(object): - - def get_redirect_target(self, resp): - """Receives a Response. Returns a redirect URI or ``None``""" - # Due to the nature of how requests processes redirects this method will - # be called at least once upon the original response and at least twice - # on each subsequent redirect response (if any). - # If a custom mixin is used to handle this logic, it may be advantageous - # to cache the redirect location onto the response object as a private - # attribute. - if resp.is_redirect: - location = resp.headers['location'] - # Currently the underlying http module on py3 decode headers - # in latin1, but empirical evidence suggests that latin1 is very - # rarely used with non-ASCII characters in HTTP headers. - # It is more likely to get UTF8 header rather than latin1. - # This causes incorrect handling of UTF8 encoded location headers. - # To solve this, we re-encode the location in latin1. - if is_py3: - location = location.encode('latin1') - return to_native_string(location, 'utf8') - return None - - def should_strip_auth(self, old_url, new_url): - """Decide whether Authorization header should be removed when redirecting""" - old_parsed = urlparse(old_url) - new_parsed = urlparse(new_url) - if old_parsed.hostname != new_parsed.hostname: - return True - # Special case: allow http -> https redirect when using the standard - # ports. This isn't specified by RFC 7235, but is kept to avoid - # breaking backwards compatibility with older versions of requests - # that allowed any redirects on the same host. - if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) - and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): - return False - - # Handle default port usage corresponding to scheme. - changed_port = old_parsed.port != new_parsed.port - changed_scheme = old_parsed.scheme != new_parsed.scheme - default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if (not changed_scheme and old_parsed.port in default_port - and new_parsed.port in default_port): - return False - - # Standard case: root URI must match - return changed_port or changed_scheme - - def resolve_redirects(self, resp, req, stream=False, timeout=None, - verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): - """Receives a Response. Returns a generator of Responses or Requests.""" - - hist = [] # keep track of history - - url = self.get_redirect_target(resp) - previous_fragment = urlparse(req.url).fragment - while url: - prepared_request = req.copy() - - # Update history and keep track of redirects. - # resp.history must ignore the original request in this loop - hist.append(resp) - resp.history = hist[1:] - - try: - resp.content # Consume socket so it can be released - except (ChunkedEncodingError, ContentDecodingError, RuntimeError): - resp.raw.read(decode_content=False) - - if len(resp.history) >= self.max_redirects: - raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) - - # Release the connection back into the pool. - resp.close() - - # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith('//'): - parsed_rurl = urlparse(resp.url) - url = ':'.join([to_native_string(parsed_rurl.scheme), url]) - - # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) - parsed = urlparse(url) - if parsed.fragment == '' and previous_fragment: - parsed = parsed._replace(fragment=previous_fragment) - elif parsed.fragment: - previous_fragment = parsed.fragment - url = parsed.geturl() - - # Facilitate relative 'location' headers, as allowed by RFC 7231. - # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') - # Compliant with RFC3986, we percent encode the url. - if not parsed.netloc: - url = urljoin(resp.url, requote_uri(url)) - else: - url = requote_uri(url) - - prepared_request.url = to_native_string(url) - - self.rebuild_method(prepared_request, resp) - - # https://github.com/psf/requests/issues/1084 - if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): - # https://github.com/psf/requests/issues/3490 - purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') - for header in purged_headers: - prepared_request.headers.pop(header, None) - prepared_request.body = None - - headers = prepared_request.headers - headers.pop('Cookie', None) - - # Extract any cookies sent on the response to the cookiejar - # in the new request. Because we've mutated our copied prepared - # request, use the old one that we haven't yet touched. - extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) - merge_cookies(prepared_request._cookies, self.cookies) - prepared_request.prepare_cookies(prepared_request._cookies) - - # Rebuild auth and proxy information. - proxies = self.rebuild_proxies(prepared_request, proxies) - self.rebuild_auth(prepared_request, resp) - - # A failed tell() sets `_body_position` to `object()`. This non-None - # value ensures `rewindable` will be True, allowing us to raise an - # UnrewindableBodyError, instead of hanging the connection. - rewindable = ( - prepared_request._body_position is not None and - ('Content-Length' in headers or 'Transfer-Encoding' in headers) - ) - - # Attempt to rewind consumed file-like object. - if rewindable: - rewind_body(prepared_request) - - # Override the original request. - req = prepared_request - - if yield_requests: - yield req - else: - - resp = self.send( - req, - stream=stream, - timeout=timeout, - verify=verify, - cert=cert, - proxies=proxies, - allow_redirects=False, - **adapter_kwargs - ) - - extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) - - # extract redirect url, if any, for the next loop - url = self.get_redirect_target(resp) - yield resp - - def rebuild_auth(self, prepared_request, response): - """When being redirected we may want to strip authentication from the - request to avoid leaking credentials. This method intelligently removes - and reapplies authentication where possible to avoid credential loss. - """ - headers = prepared_request.headers - url = prepared_request.url - - if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): - # If we get redirected to a new host, we should strip out any - # authentication headers. - del headers['Authorization'] - - # .netrc might have more auth for us on our new host. - new_auth = get_netrc_auth(url) if self.trust_env else None - if new_auth is not None: - prepared_request.prepare_auth(new_auth) - - def rebuild_proxies(self, prepared_request, proxies): - """This method re-evaluates the proxy configuration by considering the - environment variables. If we are redirected to a URL covered by - NO_PROXY, we strip the proxy configuration. Otherwise, we set missing - proxy keys for this URL (in case they were stripped by a previous - redirect). - - This method also replaces the Proxy-Authorization header where - necessary. - - :rtype: dict - """ - headers = prepared_request.headers - scheme = urlparse(prepared_request.url).scheme - new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) - - if 'Proxy-Authorization' in headers: - del headers['Proxy-Authorization'] - - try: - username, password = get_auth_from_url(new_proxies[scheme]) - except KeyError: - username, password = None, None - - if username and password: - headers['Proxy-Authorization'] = _basic_auth_str(username, password) - - return new_proxies - - def rebuild_method(self, prepared_request, response): - """When being redirected we may want to change the method of the request - based on certain specs or browser behavior. - """ - method = prepared_request.method - - # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != 'HEAD': - method = 'GET' - - # Do what the browsers do, despite standards... - # First, turn 302s into GETs. - if response.status_code == codes.found and method != 'HEAD': - method = 'GET' - - # Second, if a POST is responded to with a 301, turn it into a GET. - # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == 'POST': - method = 'GET' - - prepared_request.method = method - - -class Session(SessionRedirectMixin): - """A Requests session. - - Provides cookie persistence, connection-pooling, and configuration. - - Basic Usage:: - - >>> import requests - >>> s = requests.Session() - >>> s.get('https://httpbin.org/get') - <Response [200]> - - Or as a context manager:: - - >>> with requests.Session() as s: - ... s.get('https://httpbin.org/get') - <Response [200]> - """ - - __attrs__ = [ - 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', - 'cert', 'adapters', 'stream', 'trust_env', - 'max_redirects', - ] - - def __init__(self): - - #: A case-insensitive dictionary of headers to be sent on each - #: :class:`Request <Request>` sent from this - #: :class:`Session <Session>`. - self.headers = default_headers() - - #: Default Authentication tuple or object to attach to - #: :class:`Request <Request>`. - self.auth = None - - #: Dictionary mapping protocol or protocol and host to the URL of the proxy - #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to - #: be used on each :class:`Request <Request>`. - self.proxies = {} - - #: Event-handling hooks. - self.hooks = default_hooks() - - #: Dictionary of querystring data to attach to each - #: :class:`Request <Request>`. The dictionary values may be lists for - #: representing multivalued query parameters. - self.params = {} - - #: Stream response content default. - self.stream = False - - #: SSL Verification default. - #: Defaults to `True`, requiring requests to verify the TLS certificate at the - #: remote end. - #: If verify is set to `False`, requests will accept any TLS certificate - #: presented by the server, and will ignore hostname mismatches and/or - #: expired certificates, which will make your application vulnerable to - #: man-in-the-middle (MitM) attacks. - #: Only set this to `False` for testing. - self.verify = True - - #: SSL client certificate default, if String, path to ssl client - #: cert file (.pem). If Tuple, ('cert', 'key') pair. - self.cert = None - - #: Maximum number of redirects allowed. If the request exceeds this - #: limit, a :class:`TooManyRedirects` exception is raised. - #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is - #: 30. - self.max_redirects = DEFAULT_REDIRECT_LIMIT - - #: Trust environment settings for proxy configuration, default - #: authentication and similar. - self.trust_env = True - - #: A CookieJar containing all currently outstanding cookies set on this - #: session. By default it is a - #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but - #: may be any other ``cookielib.CookieJar`` compatible object. - self.cookies = cookiejar_from_dict({}) - - # Default connection adapters. - self.adapters = OrderedDict() - self.mount('https://', HTTPAdapter()) - self.mount('http://', HTTPAdapter()) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def prepare_request(self, request): - """Constructs a :class:`PreparedRequest <PreparedRequest>` for - transmission and returns it. The :class:`PreparedRequest` has settings - merged from the :class:`Request <Request>` instance and those of the - :class:`Session`. - - :param request: :class:`Request` instance to prepare with this - session's settings. - :rtype: requests.PreparedRequest - """ - cookies = request.cookies or {} - - # Bootstrap CookieJar. - if not isinstance(cookies, cookielib.CookieJar): - cookies = cookiejar_from_dict(cookies) - - # Merge with session cookies - merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies) - - # Set environment's basic authentication if not explicitly set. - auth = request.auth - if self.trust_env and not auth and not self.auth: - auth = get_netrc_auth(request.url) - - p = PreparedRequest() - p.prepare( - method=request.method.upper(), - url=request.url, - files=request.files, - data=request.data, - json=request.json, - headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), - params=merge_setting(request.params, self.params), - auth=merge_setting(auth, self.auth), - cookies=merged_cookies, - hooks=merge_hooks(request.hooks, self.hooks), - ) - return p - - def request(self, method, url, - params=None, data=None, headers=None, cookies=None, files=None, - auth=None, timeout=None, allow_redirects=True, proxies=None, - hooks=None, stream=None, verify=None, cert=None, json=None): - """Constructs a :class:`Request <Request>`, prepares it and sends it. - Returns :class:`Response <Response>` object. - - :param method: method for the new :class:`Request` object. - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary or bytes to be sent in the query - string for the :class:`Request`. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the - :class:`Request`. - :param headers: (optional) Dictionary of HTTP Headers to send with the - :class:`Request`. - :param cookies: (optional) Dict or CookieJar object to send with the - :class:`Request`. - :param files: (optional) Dictionary of ``'filename': file-like-objects`` - for multipart encoding upload. - :param auth: (optional) Auth tuple or callable to enable - Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) <timeouts>` tuple. - :type timeout: float or tuple - :param allow_redirects: (optional) Set to True by default. - :type allow_redirects: bool - :param proxies: (optional) Dictionary mapping protocol or protocol and - hostname to the URL of the proxy. - :param stream: (optional) whether to immediately download the response - content. Defaults to ``False``. - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use. Defaults to ``True``. When set to - ``False``, requests will accept any TLS certificate presented by - the server, and will ignore hostname mismatches and/or expired - certificates, which will make your application vulnerable to - man-in-the-middle (MitM) attacks. Setting verify to ``False`` - may be useful during local development or testing. - :param cert: (optional) if String, path to ssl client cert file (.pem). - If Tuple, ('cert', 'key') pair. - :rtype: requests.Response - """ - # Create the Request. - req = Request( - method=method.upper(), - url=url, - headers=headers, - files=files, - data=data or {}, - json=json, - params=params or {}, - auth=auth, - cookies=cookies, - hooks=hooks, - ) - prep = self.prepare_request(req) - - proxies = proxies or {} - - settings = self.merge_environment_settings( - prep.url, proxies, stream, verify, cert - ) - - # Send the request. - send_kwargs = { - 'timeout': timeout, - 'allow_redirects': allow_redirects, - } - send_kwargs.update(settings) - resp = self.send(prep, **send_kwargs) - - return resp - - def get(self, url, **kwargs): - r"""Sends a GET request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return self.request('GET', url, **kwargs) - - def options(self, url, **kwargs): - r"""Sends a OPTIONS request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return self.request('OPTIONS', url, **kwargs) - - def head(self, url, **kwargs): - r"""Sends a HEAD request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', False) - return self.request('HEAD', url, **kwargs) - - def post(self, url, data=None, json=None, **kwargs): - r"""Sends a POST request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('POST', url, data=data, json=json, **kwargs) - - def put(self, url, data=None, **kwargs): - r"""Sends a PUT request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('PUT', url, data=data, **kwargs) - - def patch(self, url, data=None, **kwargs): - r"""Sends a PATCH request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('PATCH', url, data=data, **kwargs) - - def delete(self, url, **kwargs): - r"""Sends a DELETE request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('DELETE', url, **kwargs) - - def send(self, request, **kwargs): - """Send a given PreparedRequest. - - :rtype: requests.Response - """ - # Set defaults that the hooks can utilize to ensure they always have - # the correct parameters to reproduce the previous request. - kwargs.setdefault('stream', self.stream) - kwargs.setdefault('verify', self.verify) - kwargs.setdefault('cert', self.cert) - if 'proxies' not in kwargs: - kwargs['proxies'] = resolve_proxies( - request, self.proxies, self.trust_env - ) - - # It's possible that users might accidentally send a Request object. - # Guard against that specific failure case. - if isinstance(request, Request): - raise ValueError('You can only send PreparedRequests.') - - # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop('allow_redirects', True) - stream = kwargs.get('stream') - hooks = request.hooks - - # Get the appropriate adapter to use - adapter = self.get_adapter(url=request.url) - - # Start time (approximately) of the request - start = preferred_clock() - - # Send the request - r = adapter.send(request, **kwargs) - - # Total elapsed time of the request (approximately) - elapsed = preferred_clock() - start - r.elapsed = timedelta(seconds=elapsed) - - # Response manipulation hooks - r = dispatch_hook('response', hooks, r, **kwargs) - - # Persist cookies - if r.history: - - # If the hooks create history then we want those cookies too - for resp in r.history: - extract_cookies_to_jar(self.cookies, resp.request, resp.raw) - - extract_cookies_to_jar(self.cookies, request, r.raw) - - # Resolve redirects if allowed. - if allow_redirects: - # Redirect resolving generator. - gen = self.resolve_redirects(r, request, **kwargs) - history = [resp for resp in gen] - else: - history = [] - - # Shuffle things around if there's history. - if history: - # Insert the first (original) request at the start - history.insert(0, r) - # Get the last request made - r = history.pop() - r.history = history - - # If redirects aren't being followed, store the response on the Request for Response.next(). - if not allow_redirects: - try: - r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) - except StopIteration: - pass - - if not stream: - r.content - - return r - - def merge_environment_settings(self, url, proxies, stream, verify, cert): - """ - Check the environment and merge it with some settings. - - :rtype: dict - """ - # Gather clues from the surrounding environment. - if self.trust_env: - # Set environment's proxies. - no_proxy = proxies.get('no_proxy') if proxies is not None else None - env_proxies = get_environ_proxies(url, no_proxy=no_proxy) - for (k, v) in env_proxies.items(): - proxies.setdefault(k, v) - - # Look for requests environment configuration and be compatible - # with cURL. - if verify is True or verify is None: - verify = (os.environ.get('REQUESTS_CA_BUNDLE') or - os.environ.get('CURL_CA_BUNDLE')) - - # Merge all the kwargs. - proxies = merge_setting(proxies, self.proxies) - stream = merge_setting(stream, self.stream) - verify = merge_setting(verify, self.verify) - cert = merge_setting(cert, self.cert) - - return {'verify': verify, 'proxies': proxies, 'stream': stream, - 'cert': cert} - - def get_adapter(self, url): - """ - Returns the appropriate connection adapter for the given URL. - - :rtype: requests.adapters.BaseAdapter - """ - for (prefix, adapter) in self.adapters.items(): - - if url.lower().startswith(prefix.lower()): - return adapter - - # Nothing matches :-/ - raise InvalidSchema("No connection adapters were found for {!r}".format(url)) - - def close(self): - """Closes all adapters and as such the session""" - for v in self.adapters.values(): - v.close() - - def mount(self, prefix, adapter): - """Registers a connection adapter to a prefix. - - Adapters are sorted in descending order by prefix length. - """ - self.adapters[prefix] = adapter - keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] - - for key in keys_to_move: - self.adapters[key] = self.adapters.pop(key) - - def __getstate__(self): - state = {attr: getattr(self, attr, None) for attr in self.__attrs__} - return state - - def __setstate__(self, state): - for attr, value in state.items(): - setattr(self, attr, value) - - -def session(): - """ - Returns a :class:`Session` for context-management. - - .. deprecated:: 1.0.0 - - This method has been deprecated since version 1.0.0 and is only kept for - backwards compatibility. New code should use :class:`~requests.sessions.Session` - to create a session. This may be removed at a future date. - - :rtype: Session - """ - return Session() diff --git a/spaces/allknowingroger/Image-Models-Test176/README.md b/spaces/allknowingroger/Image-Models-Test176/README.md deleted file mode 100644 index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test176/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - -<!--Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference--> \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test199/app.py b/spaces/allknowingroger/Image-Models-Test199/app.py deleted file mode 100644 index bfa47c0c7f38e581571eceb9c33184c4cb1098ab..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test199/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Sekharreddy/zaq", - "Yntec/Dreamful3", - "Lathavya/my-pet-dog-on-beach-iwe", - "01fe20bcs251/Vishal_img", - "Joviex/agm", - "JainSanyam/my-beautiful-cat", - "anilrolex/my-pet-dog", - "afraah/my-xzg-cat", - "Yntec/DucHaitenDarkside4", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_underflow.c b/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_underflow.c deleted file mode 100644 index 96216a691712e24b97e63ca4c227a315f189a9aa..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_underflow.c +++ /dev/null @@ -1,162 +0,0 @@ -/** @file patest_underflow.c - @ingroup test_src - @brief Simulate an output buffer underflow condition. - Tests whether the stream can be stopped when underflowing buffers. - @author Ross Bencina <rossb@audiomulch.com> - @author Phil Burk <philburk@softsynth.com> -*/ -/* - * $Id$ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include <stdio.h> -#include <math.h> -#include "portaudio.h" - -#define NUM_SECONDS (20) -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (2048) -#define MSEC_PER_BUFFER ( (FRAMES_PER_BUFFER * 1000) / SAMPLE_RATE ) - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (200) -typedef struct -{ - float sine[TABLE_SIZE]; - int left_phase; - int right_phase; - int sleepTime; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i; - int finished = 0; - (void) inputBuffer; /* Prevent unused variable warnings. */ - for( i=0; i<framesPerBuffer; i++ ) - { - *out++ = data->sine[data->left_phase]; /* left */ - *out++ = data->sine[data->right_phase]; /* right */ - data->left_phase += 1; - if( data->left_phase >= TABLE_SIZE ) data->left_phase -= TABLE_SIZE; - data->right_phase += 3; /* higher pitch so we can distinguish left and right. */ - if( data->right_phase >= TABLE_SIZE ) data->right_phase -= TABLE_SIZE; - } - - /* Cause underflow to occur. */ - if( data->sleepTime > 0 ) Pa_Sleep( data->sleepTime ); - data->sleepTime += 1; - - return finished; -} - -/*******************************************************************/ -int main(void); -int main(void) -{ - PaStreamParameters outputParameters; - PaStream *stream; - PaError err; - paTestData data; - int i; - printf("PortAudio Test: output sine wave. SR = %d, BufSize = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER); - /* initialise sinusoidal wavetable */ - for( i=0; i<TABLE_SIZE; i++ ) - { - data.sine[i] = (float) sin( ((double)i/(double)TABLE_SIZE) * M_PI * 2. ); - } - data.left_phase = data.right_phase = data.sleepTime = 0; - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - outputParameters.device = Pa_GetDefaultOutputDevice(); /* default output device */ - if (outputParameters.device == paNoDevice) { - fprintf(stderr,"Error: No default output device.\n"); - goto error; - } - outputParameters.channelCount = 2; /* stereo output */ - outputParameters.sampleFormat = paFloat32; /* 32 bit floating point output */ - outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency; - outputParameters.hostApiSpecificStreamInfo = NULL; - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - while( data.sleepTime < (2 * MSEC_PER_BUFFER) ) - { - printf("SleepTime = %d\n", data.sleepTime ); - Pa_Sleep( data.sleepTime ); - } - - printf("Try to stop stream.\n"); - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - Pa_Terminate(); - printf("Test finished.\n"); - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/README.md b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/README.md deleted file mode 100644 index f43efa6801d5a921c0e8c9d2fbe3278d72b48dc8..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# text-generation-webui manual - -## Table of contents - -* [Custom-chat-characters](Custom-chat-characters.md) -* [Docker Compose](Docker.md) -* [DeepSpeed](DeepSpeed.md) -* [Extensions](Extensions.md) -* [FlexGen](FlexGen.md) -* [GPTQ-models-(4-bit-mode)](GPTQ-models-(4-bit-mode).md) -* [llama.cpp-models](llama.cpp-models.md) -* [LLaMA-model](LLaMA-model.md) -* [Low-VRAM-guide](Low-VRAM-guide.md) -* [RWKV-model](RWKV-model.md) -* [Spell-book](Spell-book.md) -* [System-requirements](System-requirements.md) -* [Using-LoRAs](Using-LoRAs.md) -* [Windows-installation-guide](Windows-installation-guide.md) -* [WSL-installation-guide](WSL-installation-guide.md) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Debugger/Cygdb.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Debugger/Cygdb.py deleted file mode 100644 index 45f31ce6f77dde136f912ad1b127e51bda64627d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Debugger/Cygdb.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python - -""" -The Cython debugger - -The current directory should contain a directory named 'cython_debug', or a -path to the cython project directory should be given (the parent directory of -cython_debug). - -Additional gdb args can be provided only if a path to the project directory is -given. -""" - -import os -import sys -import glob -import tempfile -import textwrap -import subprocess -import optparse -import logging - -logger = logging.getLogger(__name__) - -def make_command_file(path_to_debug_info, prefix_code='', no_import=False): - if not no_import: - pattern = os.path.join(path_to_debug_info, - 'cython_debug', - 'cython_debug_info_*') - debug_files = glob.glob(pattern) - - if not debug_files: - sys.exit('%s.\nNo debug files were found in %s. Aborting.' % ( - usage, os.path.abspath(path_to_debug_info))) - - fd, tempfilename = tempfile.mkstemp() - f = os.fdopen(fd, 'w') - try: - f.write(prefix_code) - f.write(textwrap.dedent('''\ - # This is a gdb command file - # See https://sourceware.org/gdb/onlinedocs/gdb/Command-Files.html - - set breakpoint pending on - set print pretty on - - python - # Activate virtualenv, if we were launched from one - import os - virtualenv = os.getenv('VIRTUAL_ENV') - if virtualenv: - path_to_activate_this_py = os.path.join(virtualenv, 'bin', 'activate_this.py') - print("gdb command file: Activating virtualenv: %s; path_to_activate_this_py: %s" % ( - virtualenv, path_to_activate_this_py)) - with open(path_to_activate_this_py) as f: - exec(f.read(), dict(__file__=path_to_activate_this_py)) - - from Cython.Debugger import libcython, libpython - end - ''')) - - if no_import: - # don't do this, this overrides file command in .gdbinit - # f.write("file %s\n" % sys.executable) - pass - else: - path = os.path.join(path_to_debug_info, "cython_debug", "interpreter") - interpreter_file = open(path) - try: - interpreter = interpreter_file.read() - finally: - interpreter_file.close() - f.write("file %s\n" % interpreter) - f.write('\n'.join('cy import %s\n' % fn for fn in debug_files)) - f.write(textwrap.dedent('''\ - python - import sys - try: - gdb.lookup_type('PyModuleObject') - except RuntimeError: - sys.stderr.write( - 'Python was not compiled with debug symbols (or it was ' - 'stripped). Some functionality may not work (properly).\\n') - end - - source .cygdbinit - ''')) - finally: - f.close() - - return tempfilename - -usage = "Usage: cygdb [options] [PATH [-- GDB_ARGUMENTS]]" - -def main(path_to_debug_info=None, gdb_argv=None, no_import=False): - """ - Start the Cython debugger. This tells gdb to import the Cython and Python - extensions (libcython.py and libpython.py) and it enables gdb's pending - breakpoints. - - path_to_debug_info is the path to the Cython build directory - gdb_argv is the list of options to gdb - no_import tells cygdb whether it should import debug information - """ - parser = optparse.OptionParser(usage=usage) - parser.add_option("--gdb-executable", - dest="gdb", default='gdb', - help="gdb executable to use [default: gdb]") - parser.add_option("--verbose", "-v", - dest="verbosity", action="count", default=0, - help="Verbose mode. Multiple -v options increase the verbosity") - - (options, args) = parser.parse_args() - if path_to_debug_info is None: - if len(args) > 1: - path_to_debug_info = args[0] - else: - path_to_debug_info = os.curdir - - if gdb_argv is None: - gdb_argv = args[1:] - - if path_to_debug_info == '--': - no_import = True - - logging_level = logging.WARN - if options.verbosity == 1: - logging_level = logging.INFO - if options.verbosity >= 2: - logging_level = logging.DEBUG - logging.basicConfig(level=logging_level) - - logger.info("verbosity = %r", options.verbosity) - logger.debug("options = %r; args = %r", options, args) - logger.debug("Done parsing command-line options. path_to_debug_info = %r, gdb_argv = %r", - path_to_debug_info, gdb_argv) - - tempfilename = make_command_file(path_to_debug_info, no_import=no_import) - logger.info("Launching %s with command file: %s and gdb_argv: %s", - options.gdb, tempfilename, gdb_argv) - with open(tempfilename) as tempfile: - logger.debug('Command file (%s) contains: """\n%s"""', tempfilename, tempfile.read()) - logger.info("Spawning %s...", options.gdb) - p = subprocess.Popen([options.gdb, '-command', tempfilename] + gdb_argv) - logger.info("Spawned %s (pid %d)", options.gdb, p.pid) - while True: - try: - logger.debug("Waiting for gdb (pid %d) to exit...", p.pid) - ret = p.wait() - logger.debug("Wait for gdb (pid %d) to exit is done. Returned: %r", p.pid, ret) - except KeyboardInterrupt: - pass - else: - break - logger.debug("Closing temp command file with fd: %s", tempfile.fileno()) - logger.debug("Removing temp command file: %s", tempfilename) - os.remove(tempfilename) - logger.debug("Removed temp command file: %s", tempfilename) diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/image_degradation/bsrgan.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/image_degradation/bsrgan.py deleted file mode 100644 index 32ef56169978e550090261cddbcf5eb611a6173b..0000000000000000000000000000000000000000 --- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/image_degradation/bsrgan.py +++ /dev/null @@ -1,730 +0,0 @@ -# -*- coding: utf-8 -*- -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(30, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - elif i == 1: - image = add_blur(image, sf=sf) - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image":image} - return example - - -# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... -def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): - """ - This is an extended degradation model by combining - the degradation models of BSRGAN and Real-ESRGAN - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - use_shuffle: the degradation shuffle - use_sharp: sharpening the img - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - if use_sharp: - img = add_sharpening(img) - hq = img.copy() - - if random.random() < shuffle_prob: - shuffle_order = random.sample(range(13), 13) - else: - shuffle_order = list(range(13)) - # local shuffle for noise, JPEG is always the last one - shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) - shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) - - poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 - - for i in shuffle_order: - if i == 0: - img = add_blur(img, sf=sf) - elif i == 1: - img = add_resize(img, sf=sf) - elif i == 2: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 3: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 4: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 5: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - elif i == 6: - img = add_JPEG_noise(img) - elif i == 7: - img = add_blur(img, sf=sf) - elif i == 8: - img = add_resize(img, sf=sf) - elif i == 9: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 10: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 11: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 12: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - else: - print('check the shuffle!') - - # resize to desired size - img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), - interpolation=random.choice([1, 2, 3])) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf, lq_patchsize) - - return img, hq - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - print(img) - img = util.uint2single(img) - print(img) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_lq = deg_fn(img) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') - - diff --git a/spaces/awacke1/Text-generation/README.md b/spaces/awacke1/Text-generation/README.md deleted file mode 100644 index 2dc4176d3b00ae772c80361c517d5b2b4f26ae58..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Text-generation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text Generation -emoji: 🏃 -colorFrom: gray -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/badayvedat/AudioSep/models/CLAP/training/main.py b/spaces/badayvedat/AudioSep/models/CLAP/training/main.py deleted file mode 100644 index 3b563a5d001be7adfbe779dee7ad8ac49aadc50d..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/AudioSep/models/CLAP/training/main.py +++ /dev/null @@ -1,596 +0,0 @@ -from inspect import getargs -import logging -import os -import random -from datetime import datetime -import bisect -import copy -import numpy as np -import torch -import torch.backends.cudnn as cudnn -from torch import optim -from torch.cuda.amp import GradScaler -import faulthandler -import pathlib - -try: - import wandb -except ImportError: - wandb = None - -try: - import torch.utils.tensorboard as tensorboard -except ImportError: - tensorboard = None - -try: - import horovod.torch as hvd -except ImportError: - hvd = None - -from open_clip import create_model_and_transforms, trace_model, create_model -from training.data import get_data -from training.distributed import is_master, init_distributed_device, world_info_from_env -from training.logger import setup_logging -from training.params import parse_args -from training.scheduler import cosine_lr -from training.train import train_one_epoch, evaluate -from open_clip.utils import dataset_split, get_optimizer - - -def maintain_ckpts(args, startidx, all_idx_len): - for i in reversed(range(startidx, all_idx_len)): - if os.path.exists(os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt")): - os.rename( - os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"), - os.path.join(args.checkpoint_path, f"epoch_top_{i+1}.pt"), - ) - if os.path.exists( - os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt") - ): - os.remove(os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt")) - return - - -def update_top_k_performance( - new_metrics_inputs, current_top_k_ckpt_metrics, args, ckpt, bignumbetter=True -): - """ - Record the top-k performance of the current epoch. - current_top_k_metrics is a dictionary of the form: {1: top_1_ckpt_measure, 2: top_2_ckpt_measure, ...} - """ - if isinstance(new_metrics_inputs, (list, tuple)): - new_metrics_inputs = np.mean(new_metrics_inputs) - return update_top_k_performance( - new_metrics_inputs, - current_top_k_ckpt_metrics, - args=args, - ckpt=ckpt, - bignumbetter=bignumbetter, - ) - elif isinstance(new_metrics_inputs, dict): - new_metrics_inputs = np.mean(list(new_metrics_inputs.values())) - return update_top_k_performance( - new_metrics_inputs, - current_top_k_ckpt_metrics, - args=args, - ckpt=ckpt, - bignumbetter=bignumbetter, - ) - elif isinstance(new_metrics_inputs, (float, int)): - update_flag = {k: False for k in current_top_k_ckpt_metrics.keys()} - sorted_keys = sorted(current_top_k_ckpt_metrics.keys()) - sorted_values = sorted( - current_top_k_ckpt_metrics.values(), reverse=bignumbetter - ) - sorted_values_ = copy.deepcopy(sorted_values) - sorted_values.append(new_metrics_inputs) - sorted_values = sorted(sorted_values, reverse=bignumbetter) - sorted_values = sorted_values[:-1] - - if sorted_values == sorted_values_: - return current_top_k_ckpt_metrics, new_metrics_inputs - else: - for i in range(len(sorted_keys)): - if current_top_k_ckpt_metrics[sorted_keys[i]] != sorted_values[i]: - current_top_k_ckpt_metrics[sorted_keys[i]] = sorted_values[i] - update_flag[sorted_keys[i]] = True - for i in range(len(update_flag)): - if update_flag[i]: - maintain_ckpts(args, i, len(sorted_keys)) - torch.save( - ckpt, - os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"), - ) - break - return current_top_k_ckpt_metrics, new_metrics_inputs - - -# def updateifNone(a, b): -# a = b if None else a -# return a - - -def is_pretrained_params(n): - return ( - n.startswith("transformer") - or n in ["positional_embedding", "text_projection"] - or n.startswith("token_embedding") - or n.startswith("ln_final") - or n.startswith("logit_scale_t") - ) - - -def random_seed(seed=42, rank=0): - torch.manual_seed(seed + rank) - np.random.seed(seed + rank) - random.seed(seed + rank) - - -def main(): - args = parse_args() - # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? - args.amodel = args.amodel.replace("/", "-") - # download sizes.json file - - # (yusong): the below two lines are for debug - # print("setting up faulthandler") - # faulthandler.register(10) - - random.seed(args.seed) - torch.manual_seed(args.seed) - torch.cuda.manual_seed(args.seed) - torch.cuda.manual_seed_all(args.seed) - np.random.seed(args.seed) - if args.tmodel == "bert" or args.tmodel == "roberta" or args.tmodel == "bart": - assert ( - args.pretrained == "" or args.pretrained is None - ), "bert/roberta/bart text encoder does not support pretrained models." - - # get the name of the experiments - if args.name is None: - args.name = "-".join( - [ - datetime.now().strftime("%Y_%m_%d-%H_%M_%S"), - f"model_{args.amodel}", - f"lr_{args.lr}", - f"b_{args.batch_size}", - f"j_{args.workers}", - f"p_{args.precision}", - ] - ) - - # discover initial world args early so we can log properly - args.distributed = False - args.local_rank, args.rank, args.world_size = world_info_from_env() - - if args.remotedata and is_master(args): - for dataset_name in args.datasetnames: - for split in dataset_split[dataset_name]: - if not os.path.exists(f"./json_files/{dataset_name}/{split}"): - os.makedirs(f"./json_files/{dataset_name}/{split}") - os.system( - f"aws s3 cp s3://s-laion-audio/webdataset_tar/{dataset_name}/{split}/sizes.json ./json_files/{dataset_name}/{split}/sizes.json" - ) - - args.log_path = None - if is_master(args, local=args.log_local): - log_base_path = os.path.join(args.logs, args.name) - os.makedirs(log_base_path, exist_ok=True) - log_filename = f"out-{args.rank}" if args.log_local else "out.log" - args.log_path = os.path.join(log_base_path, log_filename) - if os.path.exists(args.log_path): - print( - "Error. Experiment already exists. Use --name {} to specify a new experiment." - ) - return -1 - - # Set logger - args.log_level = logging.DEBUG if args.debug else logging.INFO - setup_logging(args.log_path, args.log_level) - - # fully initialize distributed device environment - device = init_distributed_device(args) - - args.wandb = "wandb" in args.report_to or "all" in args.report_to - args.tensorboard = "tensorboard" in args.report_to or "all" in args.report_to - if is_master(args): - args.tensorboard_path = ( - os.path.join(args.logs, args.name, "tensorboard") - if args.tensorboard - else "" - ) - args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints") - for dirname in [args.tensorboard_path, args.checkpoint_path]: - if dirname: - os.makedirs(dirname, exist_ok=True) - else: - args.tensorboard_path = "" - args.checkpoint_path = "" - - if args.copy_codebase: - copy_codebase(args) - - assert args.precision in ["amp", "fp16", "fp32"] - if args.precision == "fp16": - logging.warning( - "It is recommended to use AMP mixed-precision instead of FP16. " - "FP16 support needs further verification and tuning, especially for train." - ) - - if args.horovod: - logging.info( - f"Running in horovod mode with multiple processes / nodes. Device: {args.device}." - f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}." - ) - elif args.distributed: - logging.info( - f"Running in distributed mode with multiple processes. Device: {args.device}." - f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}." - ) - else: - logging.info(f"Running with a single process. Device {args.device}.") - - logging.info(f"openai cache dir: {os.path.expanduser(args.openai_model_cache_dir)}") - - model, model_cfg = create_model( - args.amodel, - args.tmodel, - args.pretrained, - precision=args.precision, - device=device, - jit=args.torchscript, - force_quick_gelu=args.force_quick_gelu, - openai_model_cache_dir=os.path.expanduser(args.openai_model_cache_dir), - skip_params=True, - pretrained_audio=args.pretrained_audio, - pretrained_text=args.pretrained_text, - enable_fusion=args.enable_fusion, - fusion_type=args.fusion_type, - ) - - if args.horovod: - with torch.no_grad(): - for param in model.parameters(): - param.set_(param.contiguous()) - - if args.trace: - model = trace_model(model, batch_size=args.batch_size, device=device) - - if is_master(args): - logging.info("Model:") - logging.info(f"{str(model)}") - logging.info("Params:") - params_file = os.path.join(args.logs, args.name, "params.txt") - with open(params_file, "w") as f: - for name in sorted(vars(args)): - val = getattr(args, name) - logging.info(f" {name}: {val}") - f.write(f"{name}: {val}\n") - - if args.distributed and not args.horovod: - if args.use_bn_sync: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) - ddp_args = {} - if args.ddp_static_graph: - # this doesn't exist in older PyTorch, arg only added if enabled - ddp_args["static_graph"] = True - model = torch.nn.parallel.DistributedDataParallel( - model, device_ids=[device], find_unused_parameters=True, **ddp_args - ) - - data = get_data(args, model_cfg) - assert len(data), "At least one train or eval dataset must be specified." - if args.trace: - assert "train" not in data, "Cannot train with traced model" - - exclude = ( - lambda n, p: p.ndim < 2 - or "bn" in n - or "ln" in n - or "bias" in n - or "logit_scale" in n - ) - include = lambda n, p: not exclude(n, p) - - named_parameters = list(model.named_parameters()) - - # freeze text encoder - text_freeze_parameters = [p for n, p in named_parameters if "text_branch" in n] - - if args.freeze_text: - print("Freeze Text!!!!") - for k in text_freeze_parameters: - k.requires_grad = False - - gain_or_bias_params = [ - p for n, p in named_parameters if exclude(n, p) and p.requires_grad - ] - rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] - - # set wd-related params to 0 if use adam optimizer - if args.optimizer == "adam": - args.wd = 0 - args.wd_pretrained = 0 - args.wd_new = 0 - - if args.train_data is None: - optimizer = None - scheduler = None - else: - total_steps = data["train"].dataloader.num_batches * args.epochs - - if args.split_opt: - for x in ["lr", "beta1", "beta2", "eps", "wd"]: - for y in ["_new", "_pretrained"]: - if getattr(args, x + y) is None: - setattr(args, x + y, getattr(args, x)) - - gain_or_bias_pretrained_params = [ - p - for n, p in named_parameters - if (exclude(n, p) and p.requires_grad) and is_pretrained_params(n) - ] - rest_pretrained_params = [ - p - for n, p in named_parameters - if (include(n, p) and p.requires_grad) and is_pretrained_params(n) - ] - gain_or_bias_new_params = [ - p - for n, p in named_parameters - if (exclude(n, p) and p.requires_grad) and (not is_pretrained_params(n)) - ] - rest_new_params = [ - p - for n, p in named_parameters - if (include(n, p) and p.requires_grad) and (not is_pretrained_params(n)) - ] - pretrained_params_optimizer = get_optimizer( - [ - {"params": gain_or_bias_pretrained_params, "weight_decay": 0.0}, - { - "params": rest_pretrained_params, - "weight_decay": args.wd_pretrained, - }, - ], - lr=args.lr_pretrained, - betas=(args.beta1_pretrained, args.beta2_pretrained), - eps=args.eps_pretrained, - momentum=args.momentum_pretrained, - optimizer_name=args.optimizer, - ) - pretrained_params_scheduler = cosine_lr( - pretrained_params_optimizer, - args.lr_pretrained, - args.warmup, - total_steps, - ) - new_params_optimizer = get_optimizer( - [ - {"params": gain_or_bias_new_params, "weight_decay": 0.0}, - {"params": rest_new_params, "weight_decay": args.wd_new}, - ], - lr=args.lr_new, - betas=(args.beta1_new, args.beta2_new), - eps=args.eps_new, - momentum=args.momentum_new, - optimizer_name=args.optimizer, - ) - - new_params_scheduler = cosine_lr( - new_params_optimizer, args.lr_new, args.warmup, total_steps - ) - - optimizer = { - "pretrained": pretrained_params_optimizer, - "new": new_params_optimizer, - } - scheduler = { - "pretrained": pretrained_params_scheduler, - "new": new_params_scheduler, - } - - if args.horovod: - pretrained_params_optimizer = hvd.DistributedOptimizer( - pretrained_params_optimizer, - named_parameters=model.named_parameters(), - ) - new_params_optimizer = hvd.DistributedOptimizer( - new_params_optimizer, named_parameters=model.named_parameters() - ) - hvd.broadcast_parameters(model.state_dict(), root_rank=0) - hvd.broadcast_optimizer_state(pretrained_params_optimizer, root_rank=0) - hvd.broadcast_optimizer_state(new_params_optimizer, root_rank=0) - else: - optimizer = get_optimizer( - [ - {"params": gain_or_bias_params, "weight_decay": 0.0}, - {"params": rest_params, "weight_decay": args.wd}, - ], - lr=args.lr, - betas=(args.beta1, args.beta2), - eps=args.eps, - momentum=args.momentum, - optimizer_name=args.optimizer, - ) - - scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) - - if args.horovod: - optimizer = hvd.DistributedOptimizer( - optimizer, named_parameters=model.named_parameters() - ) - hvd.broadcast_parameters(model.state_dict(), root_rank=0) - hvd.broadcast_optimizer_state(optimizer, root_rank=0) - - scaler = GradScaler() if args.precision == "amp" else None - - # optionally resume from a checkpoint - start_epoch = 0 - if args.resume is not None: - if os.path.isfile(args.resume): - checkpoint = torch.load(args.resume, map_location=device) - if "epoch" in checkpoint: - # resuming a train checkpoint w/ epoch and optimizer state - start_epoch = checkpoint["epoch"] - sd = checkpoint["state_dict"] - if not args.distributed and next(iter(sd.items()))[0].startswith( - "module" - ): - sd = {k[len("module.") :]: v for k, v in sd.items()} - model.load_state_dict(sd) - if args.split_opt: - if optimizer is not None: - for k, o_ in optimizer.items(): - o_.load_state_dict(checkpoint[k + "_" + "optimizer"]) - if optimizer is not None: - optimizer.load_state_dict(checkpoint["optimizer"]) - if scaler is not None and "scaler" in checkpoint: - scaler.load_state_dict(checkpoint["scaler"]) - logging.info( - f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})" - ) - else: - # loading a bare (model only) checkpoint for fine-tune or evaluation - model.load_state_dict(checkpoint) - logging.info( - f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})" - ) - if args.freeze_text: - print("Freeze Text!!!!") - for k in text_freeze_parameters: - k.requires_grad = False - else: - logging.info("=> no checkpoint found at '{}'".format(args.resume)) - - cudnn.benchmark = True - cudnn.deterministic = False - - # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 - args.save_logs = args.logs and args.logs.lower() != "none" and is_master(args) - writer = None - if args.save_logs and args.tensorboard: - assert tensorboard is not None, "Please install tensorboard." - writer = tensorboard.SummaryWriter(args.tensorboard_path) - - if args.wandb and is_master(args): - assert wandb is not None, "Please install wandb." - logging.debug("Starting wandb.") - args.train_sz = data["train"].dataloader.num_samples - if args.val_data is not None: - args.val_sz = data["val"].dataloader.num_samples - # you will have to configure this for your project! - wandb.init( - project="clap", - notes=args.wandb_notes, - name=args.wandb_notes, - tags=[], - config=vars(args), - ) - if args.debug: - wandb.watch(model, log="all") - wandb.save(params_file) - logging.debug("Finished loading wandb.") - - if "train" not in data: - evaluate(model, data, start_epoch, args, writer) - return - elif start_epoch == 0 and "val" in data and not args.no_eval: - evaluate(model, data, 0, args, writer) - # print(f'rank {args.rank}, Start First Evaluation')# (yusong): for debug - if args.save_top_performance: - current_top_k_ckpt_metrics = { - i: 0 for i in range(args.save_top_performance) - } # initialize the top-k metric for ckpts to 0 - - # print(f'rank {args.rank}, Start Training') # (yusong): for debug - for epoch in range(start_epoch, args.epochs): - # freeze the text param after (include) args.freeze_text_after, this is -1 by default - if epoch == args.freeze_text_after: - print("Text pretrained parameters are freezed since this epoch.") - for k in text_freeze_parameters: - k.requires_grad = False - if is_master(args): - logging.info(f"Start epoch {epoch}") - - train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer) - completed_epoch = epoch + 1 - - if ( - any(v in data for v in ("val", "imagenet-val", "imagenet-v2")) - and not args.no_eval - ): - metrics = evaluate(model, data, completed_epoch, args, writer) - if args.save_top_performance: - top_k_dataset = args.top_k_checkpoint_select_dataset - top_k_metric = args.top_k_checkpoint_select_metric - filtered_metrics = [ - v - for k, v in metrics.items() - if top_k_metric in k and top_k_dataset in k - ] # check all R@10 metrics (all dataset) and use it to update the ckpt - # Saving checkpoints. - if args.save_logs: - if args.split_opt: - opt_dict = { - k + "_" + "optimizer": v.state_dict() for k, v in optimizer.items() - } - else: - opt_dict = {"optimizer": optimizer.state_dict()} - checkpoint_dict = { - "epoch": completed_epoch, - "name": args.name, - "state_dict": model.state_dict(), - } - checkpoint_dict.update(opt_dict) - if scaler is not None: - checkpoint_dict["scaler"] = scaler.state_dict() - - if completed_epoch == args.epochs or ( - args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0 - ): - torch.save( - checkpoint_dict, - os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"), - ) - if args.save_most_recent: - torch.save( - checkpoint_dict, - os.path.join(args.checkpoint_path, f"epoch_latest.pt"), - ) - if args.save_top_performance and not args.no_eval: - update_top_k_performance( - filtered_metrics, - current_top_k_ckpt_metrics, - args, - checkpoint_dict, - bignumbetter=True, - ) - - if args.wandb and is_master(args): - wandb.finish() - - -def copy_codebase(args): - from shutil import copytree, ignore_patterns - - new_code_path = os.path.join(args.logs, args.name, "code") - if os.path.exists(new_code_path): - print( - f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment." - ) - return -1 - print(f"Copying codebase to {new_code_path}") - current_code_path = os.path.realpath(__file__) - for _ in range(3): - current_code_path = os.path.dirname(current_code_path) - copytree( - current_code_path, new_code_path, ignore=ignore_patterns("log", "logs", "wandb") - ) - print("Done copying code.") - return 1 - - -if __name__ == "__main__": - main() diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/LightNode.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/LightNode.js deleted file mode 100644 index f510329f105017c7911d628d6334b4ed94659ee8..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/LightNode.js +++ /dev/null @@ -1,61 +0,0 @@ -/** - * @author sunag / http://www.sunag.com.br/ - */ - -import { TempNode } from '../core/TempNode.js'; - -function LightNode( scope ) { - - TempNode.call( this, 'v3', { shared: false } ); - - this.scope = scope || LightNode.TOTAL; - -} - -LightNode.TOTAL = 'total'; - -LightNode.prototype = Object.create( TempNode.prototype ); -LightNode.prototype.constructor = LightNode; -LightNode.prototype.nodeType = "Light"; - -LightNode.prototype.generate = function ( builder, output ) { - - if ( builder.isCache( 'light' ) ) { - - return builder.format( 'reflectedLight.directDiffuse', this.type, output ); - - } else { - - console.warn( "THREE.LightNode is only compatible in \"light\" channel." ); - - return builder.format( 'vec3( 0.0 )', this.type, output ); - - } - -}; - -LightNode.prototype.copy = function ( source ) { - - TempNode.prototype.copy.call( this, source ); - - this.scope = source.scope; - -}; - -LightNode.prototype.toJSON = function ( meta ) { - - var data = this.getJSONNode( meta ); - - if ( ! data ) { - - data = this.createJSONNode( meta ); - - data.scope = this.scope; - - } - - return data; - -}; - -export { LightNode }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/lights/PointLight.js b/spaces/banana-projects/web3d/node_modules/three/src/lights/PointLight.js deleted file mode 100644 index 162078776d17a9e973fa31740fd81abd4eb819eb..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/lights/PointLight.js +++ /dev/null @@ -1,62 +0,0 @@ -import { Light } from './Light.js'; -import { PerspectiveCamera } from '../cameras/PerspectiveCamera.js'; -import { LightShadow } from './LightShadow.js'; - -/** - * @author mrdoob / http://mrdoob.com/ - */ - - -function PointLight( color, intensity, distance, decay ) { - - Light.call( this, color, intensity ); - - this.type = 'PointLight'; - - Object.defineProperty( this, 'power', { - get: function () { - - // intensity = power per solid angle. - // ref: equation (15) from https://seblagarde.files.wordpress.com/2015/07/course_notes_moving_frostbite_to_pbr_v32.pdf - return this.intensity * 4 * Math.PI; - - }, - set: function ( power ) { - - // intensity = power per solid angle. - // ref: equation (15) from https://seblagarde.files.wordpress.com/2015/07/course_notes_moving_frostbite_to_pbr_v32.pdf - this.intensity = power / ( 4 * Math.PI ); - - } - } ); - - this.distance = ( distance !== undefined ) ? distance : 0; - this.decay = ( decay !== undefined ) ? decay : 1; // for physically correct lights, should be 2. - - this.shadow = new LightShadow( new PerspectiveCamera( 90, 1, 0.5, 500 ) ); - -} - -PointLight.prototype = Object.assign( Object.create( Light.prototype ), { - - constructor: PointLight, - - isPointLight: true, - - copy: function ( source ) { - - Light.prototype.copy.call( this, source ); - - this.distance = source.distance; - this.decay = source.decay; - - this.shadow = source.shadow.clone(); - - return this; - - } - -} ); - - -export { PointLight }; diff --git a/spaces/baulab/Erasing-Concepts-In-Diffusion/__init__.py b/spaces/baulab/Erasing-Concepts-In-Diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/bfh-nlp-circle/nlp-cirlce-demo/app.py b/spaces/bfh-nlp-circle/nlp-cirlce-demo/app.py deleted file mode 100644 index bf33aa805e7a45346b012597e46fcda1b5c97ff3..0000000000000000000000000000000000000000 --- a/spaces/bfh-nlp-circle/nlp-cirlce-demo/app.py +++ /dev/null @@ -1,5 +0,0 @@ -from src.nlp_circle_demo.wrapped_gradio_objects import GradioTabWrapper - -if __name__ == "__main__": - demo = GradioTabWrapper.from_yaml("resources/tabs/app_tab.yml") - demo.launch() diff --git a/spaces/bgk/sipariseng/README.md b/spaces/bgk/sipariseng/README.md deleted file mode 100644 index 84044f4efbba2c0b53b37b7090019e08a849f7ee..0000000000000000000000000000000000000000 --- a/spaces/bgk/sipariseng/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Sipariseng -emoji: ⚡ -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bguberfain/Detic/app.py b/spaces/bguberfain/Detic/app.py deleted file mode 100644 index ed6418dbe35f913492cff38f095641d5088c368f..0000000000000000000000000000000000000000 --- a/spaces/bguberfain/Detic/app.py +++ /dev/null @@ -1,126 +0,0 @@ -# Based on https://huggingface.co/spaces/akhaliq/Detic/tree/main Thanks! -import os -os.system("pip install gradio==2.4.6") -import sys -import gradio as gr - -os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html') - -# clone and install Detic -os.system("git clone https://github.com/facebookresearch/Detic.git --recurse-submodules") -os.chdir("Detic") - -# Install detectron2 -import torch - -# Some basic setup: -# Setup detectron2 logger -import detectron2 -from detectron2.utils.logger import setup_logger -setup_logger() - -# import some common libraries -import sys -import numpy as np -import os, json, cv2, random - -# import some common detectron2 utilities -from detectron2 import model_zoo -from detectron2.engine import DefaultPredictor -from detectron2.config import get_cfg -from detectron2.utils.visualizer import Visualizer -from detectron2.data import MetadataCatalog, DatasetCatalog - -# Detic libraries -sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/') -from centernet.config import add_centernet_config -from detic.config import add_detic_config -from detic.modeling.utils import reset_cls_test -from detic.modeling.text.text_encoder import build_text_encoder - -from PIL import Image - -# Build the detector and download our pretrained weights -cfg = get_cfg() -add_centernet_config(cfg) -add_detic_config(cfg) -cfg.MODEL.DEVICE='cpu' -cfg.merge_from_file("configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml") -cfg.MODEL.WEIGHTS = 'https://dl.fbaipublicfiles.com/detic/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth' -cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model -cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'rand' -cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = True # For better visualization purpose. Set to False for all classes. -predictor = DefaultPredictor(cfg) - -# Setup the model's vocabulary using build-in datasets - -BUILDIN_CLASSIFIER = { - 'lvis': 'datasets/metadata/lvis_v1_clip_a+cname.npy', - 'objects365': 'datasets/metadata/o365_clip_a+cnamefix.npy', - 'openimages': 'datasets/metadata/oid_clip_a+cname.npy', - 'coco': 'datasets/metadata/coco_clip_a+cname.npy', -} - -BUILDIN_METADATA_PATH = { - 'lvis': 'lvis_v1_val', - 'objects365': 'objects365_v2_val', - 'openimages': 'oid_val_expanded', - 'coco': 'coco_2017_val', -} - -text_encoder = build_text_encoder(pretrain=True) -text_encoder.eval() - -def get_clip_embeddings(vocabulary, prompt='a '): - texts = [prompt + x for x in vocabulary] - emb = text_encoder(texts).detach().permute(1, 0).contiguous().cpu() - return emb - -def update_test_score_thresh(predictor, test_score_thresh): - for box_predictor in predictor.model.roi_heads.box_predictor: - box_predictor.test_score_thresh = test_score_thresh - -def inference(custom_vocabulary, thresh, img): - update_test_score_thresh(predictor, test_score_thresh=thresh) - - metadata = MetadataCatalog.get("__unused") - metadata.thing_classes = custom_vocabulary.split(',') - classifier = get_clip_embeddings(metadata.thing_classes) - num_classes = len(metadata.thing_classes) - - reset_cls_test(predictor.model, classifier, num_classes) - - im = cv2.imread(img) - - outputs = predictor(im) - v = Visualizer(im[:, :, ::-1], metadata) - out = v.draw_instance_predictions(outputs["instances"].to("cpu")) - - MetadataCatalog.remove("__unused") - - return Image.fromarray(np.uint8(out.get_image())).convert('RGB') - -title = "Detic" - -description = "Gradio demo for Detic: Detecting Twenty-thousand Classes using Image-level Supervision. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." - -article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.02605' target='_blank'>Detecting Twenty-thousand Classes using Image-level Supervision</a> | <a href='https://github.com/facebookresearch/Detic' target='_blank'>Github Repo</a></p>" - -examples = [ - ['dog,cat' , 0.500, 'examples/dogs-and-cats.jpeg'], - ['a boy jumping in the air', 0.037, 'examples/jump.jpeg'], -] - -gr.Interface(inference, - inputs=[ - gr.inputs.Textbox(placeholder="Type a class or text to find", default="dog,cat"), - gr.inputs.Slider(minimum=0.001, maximum=0.999, step=0.001, default=0.5), - gr.inputs.Image(type="filepath") - ], - outputs=gr.outputs.Image(type="pil"), - title=title, - description=description, - article=article, - examples=examples, - enable_queue=True - ).launch() diff --git a/spaces/bigPear/digitalWDF/src/utils/__init__.py b/spaces/bigPear/digitalWDF/src/utils/__init__.py deleted file mode 100644 index 33e85048b4b13231b87f82b79a2b29690e0fb423..0000000000000000000000000000000000000000 --- a/spaces/bigPear/digitalWDF/src/utils/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -from .common import ( - load_pretrained, - prepare_args, - prepare_data, - preprocess_data -) - -from .seq2seq import ( - Seq2SeqDataCollatorForChatGLM, - ComputeMetrics, - Seq2SeqTrainerForChatGLM -) - -from .pairwise import ( - PairwiseDataCollatorForChatGLM, - PairwiseTrainerForChatGLM -) - -from .ppo import ( - PPODataCollatorForChatGLM, - PPOTrainerForChatGLM -) - -from .config import ModelArguments - -from .other import plot_loss diff --git a/spaces/bioriAsaeru/text-to-voice/Anvsoft Flash Slideshow Maker Professional 4.75 A Comprehensive Guide.md b/spaces/bioriAsaeru/text-to-voice/Anvsoft Flash Slideshow Maker Professional 4.75 A Comprehensive Guide.md deleted file mode 100644 index 30f3c1c423fa12d8635fd492413acc10ab6eb205..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Anvsoft Flash Slideshow Maker Professional 4.75 A Comprehensive Guide.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Anvsoft Flash Slideshow Maker Professional 4.75</h2><br /><p><b><b>Download</b> … <a href="https://urloso.com/2uyQd7">https://urloso.com/2uyQd7</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/bioriAsaeru/text-to-voice/Avast! Internet Security Premier Antivirus 19.3.2334 Serial Key Keygen Download and Install Guide.md b/spaces/bioriAsaeru/text-to-voice/Avast! Internet Security Premier Antivirus 19.3.2334 Serial Key Keygen Download and Install Guide.md deleted file mode 100644 index 8bd7857bda65f1d03a29724c08d2a3e452d90f67..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Avast! Internet Security Premier Antivirus 19.3.2334 Serial Key Keygen Download and Install Guide.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>avast! Internet Security Premier Antivirus 19.3.2334 Serial Key keygen</h2><br /><p><b><b>Download</b> ★★★ <a href="https://urloso.com/2uyQCo">https://urloso.com/2uyQCo</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/bioriAsaeru/text-to-voice/De Dana Dan Movie Download 720p In Hindi.md b/spaces/bioriAsaeru/text-to-voice/De Dana Dan Movie Download 720p In Hindi.md deleted file mode 100644 index 89839f01cdb55eaa47f7918af854ab779b3094f2..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/De Dana Dan Movie Download 720p In Hindi.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>De Dana Dan Movie Download 720p In Hindi</h2><br /><p><b><b>Download</b> ··· <a href="https://urloso.com/2uyRgx">https://urloso.com/2uyRgx</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/bioriAsaeru/text-to-voice/Free Download NI LabWindows CVI 2012 Crack And Keygen Added.md b/spaces/bioriAsaeru/text-to-voice/Free Download NI LabWindows CVI 2012 Crack And Keygen Added.md deleted file mode 100644 index 5f8e1143d7748f62827394fa99edbe82087a5098..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Free Download NI LabWindows CVI 2012 Crack And Keygen Added.md +++ /dev/null @@ -1,125 +0,0 @@ - -<h1>How to Download NI LabWindows CVI 2012 Crack And Keygen for Free and Enjoy Its Features</h1> - -<p>If you are looking for a software tool that can help you link any instrument to LabVIEW/CVI, you might want to download NI LabWindows CVI 2012 crack and keygen for free. This is a software tool that allows you to create user interfaces, test and measurement applications, and data acquisition systems using C and C++. The software tool is developed by National Instruments, a leading provider of hardware and software solutions for engineering and science.</p> -<h2>Free Download NI LabWindows CVI 2012 Crack And Keygen Added</h2><br /><p><b><b>Download Zip</b> 🌟 <a href="https://urloso.com/2uyPws">https://urloso.com/2uyPws</a></b></p><br /><br /> - -<h2>What is NI LabWindows CVI 2012 Crack And Keygen</h2> - -<p>NI LabWindows CVI 2012 crack and keygen is a software tool that can activate the full version of NI LabWindows CVI 2012 without paying any license fee. The full version of NI LabWindows CVI 2012 has many features and benefits, such as:</p> - -<ul> -<li>It supports the latest standards and technologies, such as Windows 7, .NET Framework 4.0, Visual Studio 2010, and multicore processors.</li> -<li>It provides a rich set of libraries and functions for data analysis, signal processing, instrument control, communication, database access, and more.</li> -<li>It integrates with LabVIEW/CVI, allowing you to share code, data, and resources between the two environments.</li> -<li>It offers a graphical user interface editor that lets you create and customize user interfaces with drag-and-drop components.</li> -<li>It includes a code analyzer that helps you detect and fix errors, optimize performance, and improve code quality.</li> -<li>It enables you to create executable files, dynamic link libraries, and installers for your applications.</li> -<li>It supports various debugging and testing tools, such as breakpoints, watch windows, code coverage, memory leak detection, and unit testing.</li> -</ul> - -<p>By downloading NI LabWindows CVI 2012 crack and keygen for free, you can enjoy all these features and benefits without spending any money.</p> - -<h2>How to Download NI LabWindows CVI 2012 Crack And Keygen for Free</h2> - -<p>To download NI LabWindows CVI 2012 crack and keygen for free, you can use one of the following sources:</p> - -<ol> -<li>Settwolfmarfull: This is a website that provides free download links for various software tools, including NI LabWindows CVI 2012 crack and keygen. You can find the download link by searching for the software name on the website. You can also view the instructions on how to install and use the software tool.</li> -<li>Capturingwow: This is a website that provides free exclusive download links for various software tools, including NI LabWindows CVI 2012 crack and keygen. You can find the download link by searching for the software name on the website. You can also view the screenshots and reviews of the software tool.</li> -</ol> - -<p>To download NI LabWindows CVI 2012 crack and keygen for free from these sources, you need to follow some steps, such as:</p> - -<ul> -<li>Click on the download link provided by the source.</li> -<li>Wait for the download to complete.</li> -<li>Extract the downloaded file using a file compression tool.</li> -<li>Run the setup file to install NI LabWindows CVI 2012 on your computer.</li> -<li>Run the crack file or enter the keygen code to activate the full version of NI LabWindows CVI 2012.</li> -<li>Enjoy using NI LabWindows CVI 2012 with all its features and benefits.</li> -</ul> - -<p>Downloading NI LabWindows CVI 2012 crack and keygen for free can help you link any instrument to LabVIEW/CVI with ease. The software tool can also help you create user interfaces, test and measurement applications, and data acquisition systems using C and C++. Download it today and start using it!</p> -<p></p> -<h2>What You Can Do with NI LabWindows CVI 2012</h2> - -<p>NI LabWindows CVI 2012 is a versatile and powerful software tool that can help you link any instrument to LabVIEW/CVI. With this software tool, you can do many things, such as:</p> - -<ul> -<li>You can create user interfaces for your applications using various components, such as buttons, menus, graphs, gauges, and tables.</li> -<li>You can test and measure various signals and data using various instruments, such as oscilloscopes, multimeters, function generators, and power supplies.</li> -<li>You can acquire and analyze data from various sources, such as sensors, cameras, microphones, and files.</li> -<li>You can control and automate various devices and systems, such as motors, valves, pumps, and robots.</li> -<li>You can communicate and exchange data with other applications and platforms, such as LabVIEW/CVI, MATLAB, Excel, and .NET.</li> -</ul> - -<p>With NI LabWindows CVI 2012, you can create applications that can perform various tasks and functions in engineering and science.</p> - -<h2>Why You Should Download NI LabWindows CVI 2012 Crack And Keygen for Free</h2> - -<p>Downloading NI LabWindows CVI 2012 crack and keygen for free has many advantages, such as:</p> - -<ul> -<li>You can save money by not paying any license fee for the full version of NI LabWindows CVI 2012.</li> -<li>You can access all the features and benefits of NI LabWindows CVI 2012 without any limitations or restrictions.</li> -<li>You can use NI LabWindows CVI 2012 on any computer without any activation or registration process.</li> -<li>You can update NI LabWindows CVI 2012 to the latest version without any hassle or problem.</li> -<li>You can share NI LabWindows CVI 2012 with your friends or colleagues without any legal or ethical issues.</li> -</ul> - -<p>Downloading NI LabWindows CVI 2012 crack and keygen for free can help you enjoy using NI LabWindows CVI 2012 without any cost or trouble.</p> -<h2>How to Use NI LabWindows CVI 2012 Effectively</h2> - -<p>NI LabWindows CVI 2012 is a software tool that can help you link any instrument to LabVIEW/CVI. However, to use it effectively, you need to follow some tips and suggestions, such as:</p> - -<ul> -<li>Read the documentation and tutorials that are included with NI LabWindows CVI 2012. They provide useful information and guidance on how to use the software tool and its features.</li> -<li>Use the code templates and examples that are provided with NI LabWindows CVI 2012. They can help you create and customize your applications faster and easier.</li> -<li>Use the debugging and testing tools that are provided with NI LabWindows CVI 2012. They can help you detect and fix errors, optimize performance, and improve code quality.</li> -<li>Use the graphical user interface editor that is provided with NI LabWindows CVI 2012. It can help you create and customize user interfaces with drag-and-drop components.</li> -<li>Use the libraries and functions that are provided with NI LabWindows CVI 2012. They can help you perform various tasks and functions in engineering and science.</li> -</ul> - -<p>Using NI LabWindows CVI 2012 effectively can help you link any instrument to LabVIEW/CVI with ease. It can also help you create user interfaces, test and measurement applications, and data acquisition systems using C and C++. It can also help you achieve your engineering and science goals.</p> -<h2>What are the Alternatives to NI LabWindows CVI 2012</h2> - -<p>NI LabWindows CVI 2012 is a software tool that can help you link any instrument to LabVIEW/CVI. However, it is not the only software tool that can do that. There are some alternatives to NI LabWindows CVI 2012 that you can also use, such as:</p> - -<ul> -<li>Visual Studio: This is a software development environment that allows you to create applications using various programming languages, such as C#, C++, and Visual Basic. It also supports various technologies and platforms, such as Windows, .NET, and Azure. It also integrates with LabVIEW/CVI, allowing you to share code, data, and resources between the two environments.</li> -<li>Qt: This is a software development framework that allows you to create cross-platform applications using C++. It also provides a graphical user interface toolkit that lets you create and customize user interfaces with widgets and layouts. It also supports various instruments and devices, such as cameras, sensors, and serial ports.</li> -<li>Python: This is a high-level programming language that allows you to create applications using simple and readable syntax. It also supports multiple paradigms, such as object-oriented, functional, and procedural. It also has a large and diverse set of libraries and modules that can help you perform various tasks and functions in engineering and science.</li> -</ul> - -<p>These are some of the alternatives to NI LabWindows CVI 2012 that you can also use to link any instrument to LabVIEW/CVI. However, they may have some disadvantages or limitations compared to NI LabWindows CVI 2012, such as:</p> - -<ul> -<li>They may require more coding or programming skills than NI LabWindows CVI 2012.</li> -<li>They may not have all the features or benefits that NI LabWindows CVI 2012 has.</li> -<li>They may not be compatible or integrated with all the instruments or devices that NI LabWindows CVI 2012 supports.</li> -</ul> - -<p>Therefore, you should choose the software tool that best suits your needs and preferences.</p> -<h2>How to Learn More About NI LabWindows CVI 2012</h2> - -<p>If you want to learn more about NI LabWindows CVI 2012 and how to use it effectively, you can use some of the following resources:</p> - -<ul> -<li>National Instruments Website: This is the official website of National Instruments, the developer of NI LabWindows CVI 2012. You can find more information and details about the software tool and its features on the website. You can also download the software tool and its documentation from the website.</li> -<li>National Instruments Community: This is an online platform that connects you with other users and experts of NI LabWindows CVI 2012. You can ask questions and seek answers about the software tool and its usage on the platform. You can also share your ideas and opinions about the software tool with others on the platform.</li> -<li>National Instruments Training: This is a service that provides various courses and workshops on NI LabWindows CVI 2012 and other National Instruments products. You can enroll in these courses and workshops to learn how to use the software tool and its features from experienced instructors. You can also get certified on the software tool after completing these courses and workshops.</li> -</ul> - -<p>These are some of the resources that can help you learn more about NI LabWindows CVI 2012 and how to use it effectively. You can also find other resources online or offline that can help you with the software tool.</p> -<h2>Conclusion</h2> - -<p>NI LabWindows CVI 2012 is a software tool that can help you link any instrument to LabVIEW/CVI. It can also help you create user interfaces, test and measurement applications, and data acquisition systems using C and C++. It has many features and benefits that make it a useful and reliable software tool for engineering and science.</p> - -<p>You can download NI LabWindows CVI 2012 crack and keygen for free from Settwolfmarfull or Capturingwow websites. They provide free download links and instructions for NI LabWindows CVI 2012 crack and keygen. By downloading NI LabWindows CVI 2012 crack and keygen for free, you can activate the full version of NI LabWindows CVI 2012 without paying any license fee.</p> - -<p>Downloading NI LabWindows CVI 2012 crack and keygen for free can help you enjoy using NI LabWindows CVI 2012 with all its features and benefits without any cost or trouble. You can also use some of the resources that can help you learn more about NI LabWindows CVI 2012 and how to use it effectively.</p> - -<p>If you are looking for a software tool that can help you link any instrument to LabVIEW/CVI, you should download NI LabWindows CVI 2012 crack and keygen for free today. The software tool will help you achieve your engineering and science goals.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/blmdsydm/faster-whisper-webui/src/whisper/abstractWhisperContainer.py b/spaces/blmdsydm/faster-whisper-webui/src/whisper/abstractWhisperContainer.py deleted file mode 100644 index 3df2f19ad8c5665b1f09bc3e59943049769b54b7..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/src/whisper/abstractWhisperContainer.py +++ /dev/null @@ -1,107 +0,0 @@ -import abc -from typing import List - -from src.config import ModelConfig, VadInitialPromptMode - -from src.hooks.progressListener import ProgressListener -from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - -class AbstractWhisperCallback: - def __init__(self): - self.__prompt_mode_gpt = None - - @abc.abstractmethod - def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None): - """ - Peform the transcription of the given audio file or data. - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor] - The audio file to transcribe, or the audio data as a numpy array or torch tensor. - segment_index: int - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - progress_listener: ProgressListener - A callback to receive progress updates. - """ - raise NotImplementedError() - -class AbstractWhisperContainer: - def __init__(self, model_name: str, device: str = None, compute_type: str = "float16", - download_root: str = None, - cache: ModelCache = None, models: List[ModelConfig] = []): - self.model_name = model_name - self.device = device - self.compute_type = compute_type - self.download_root = download_root - self.cache = cache - - # Will be created on demand - self.model = None - - # List of known models - self.models = models - - def get_model(self): - if self.model is None: - - if (self.cache is None): - self.model = self._create_model() - else: - model_key = "WhisperContainer." + self.model_name + ":" + (self.device if self.device else '') - self.model = self.cache.get(model_key, self._create_model) - return self.model - - @abc.abstractmethod - def _create_model(self): - raise NotImplementedError() - - def ensure_downloaded(self): - pass - - @abc.abstractmethod - def create_callback(self, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict) -> AbstractWhisperCallback: - """ - Create a WhisperCallback object that can be used to transcript audio files. - - Parameters - ---------- - language: str - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - prompt_strategy: AbstractPromptStrategy - The prompt strategy to use for the transcription. - decodeOptions: dict - Additional options to pass to the decoder. Must be pickleable. - - Returns - ------- - A WhisperCallback object. - """ - raise NotImplementedError() - - # This is required for multiprocessing - def __getstate__(self): - return { - "model_name": self.model_name, - "device": self.device, - "download_root": self.download_root, - "models": self.models, - "compute_type": self.compute_type - } - - def __setstate__(self, state): - self.model_name = state["model_name"] - self.device = state["device"] - self.download_root = state["download_root"] - self.models = state["models"] - self.compute_type = state["compute_type"] - self.model = None - # Depickled objects must use the global cache - self.cache = GLOBAL_MODEL_CACHE \ No newline at end of file diff --git a/spaces/bodah/RVC-Models-bo/lib/infer_pack/models.py b/spaces/bodah/RVC-Models-bo/lib/infer_pack/models.py deleted file mode 100644 index 44c08d361bcb13b84b38dc29beff5cdaddad4ea2..0000000000000000000000000000000000000000 --- a/spaces/bodah/RVC-Models-bo/lib/infer_pack/models.py +++ /dev/null @@ -1,1124 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/bart/tokenization_bart.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/bart/tokenization_bart.py deleted file mode 100644 index 22ee1a0db6149d464b297eb44b9c29175c13896f..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/models/bart/tokenization_bart.py +++ /dev/null @@ -1,419 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -from functools import lru_cache -from typing import List, Optional, Tuple - -import regex as re - -from ...tokenization_utils import AddedToken, PreTrainedTokenizer -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} - -# See all BART models at https://huggingface.co/models?filter=bart -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", - "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", - "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", - "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", - "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", - "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", - }, - "merges_file": { - "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", - "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", - "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", - "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", - "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", - "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "facebook/bart-base": 1024, - "facebook/bart-large": 1024, - "facebook/bart-large-mnli": 1024, - "facebook/bart-large-cnn": 1024, - "facebook/bart-large-xsum": 1024, - "yjernite/bart_eli5": 1024, -} - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control - characters the bpe code barfs on. - - The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab - if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for - decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup - tables between utf-8 bytes and unicode strings. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """ - Return set of symbol pairs in a word. - - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -class BartTokenizer(PreTrainedTokenizer): - """ - Constructs a BART tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding. - - This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will - be encoded differently whether it is at the beginning of the sentence (without space) or not: - - ```python - >>> from transformers import BartTokenizer - - >>> tokenizer = BartTokenizer.from_pretrained("facebook/bart-base") - >>> tokenizer("Hello world")["input_ids"] - [0, 31414, 232, 2] - - >>> tokenizer(" Hello world")["input_ids"] - [0, 20920, 232, 2] - ``` - - You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you - call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. - - <Tip> - - When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). - - </Tip> - - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - merges_file (`str`): - Path to the merges file. - errors (`str`, *optional*, defaults to `"replace"`): - Paradigm to follow when decoding bytes to UTF-8. See - [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. - bos_token (`str`, *optional*, defaults to `"<s>"`): - The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. - - <Tip> - - When building a sequence using special tokens, this is not the token that is used for the beginning of - sequence. The token used is the `cls_token`. - - </Tip> - - eos_token (`str`, *optional*, defaults to `"</s>"`): - The end of sequence token. - - <Tip> - - When building a sequence using special tokens, this is not the token that is used for the end of sequence. - The token used is the `sep_token`. - - </Tip> - - sep_token (`str`, *optional*, defaults to `"</s>"`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - cls_token (`str`, *optional*, defaults to `"<s>"`): - The classifier token which is used when doing sequence classification (classification of the whole sequence - instead of per-token classification). It is the first token of the sequence when built with special tokens. - unk_token (`str`, *optional*, defaults to `"<unk>"`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - pad_token (`str`, *optional*, defaults to `"<pad>"`): - The token used for padding, for example when batching sequences of different lengths. - mask_token (`str`, *optional*, defaults to `"<mask>"`): - The token used for masking values. This is the token used when training this model with masked language - modeling. This is the token which the model will try to predict. - add_prefix_space (`bool`, *optional*, defaults to `False`): - Whether or not to add an initial space to the input. This allows to treat the leading word just as any - other word. (BART tokenizer detect beginning of words by the preceding space). - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - - def __init__( - self, - vocab_file, - merges_file, - errors="replace", - bos_token="<s>", - eos_token="</s>", - sep_token="</s>", - cls_token="<s>", - unk_token="<unk>", - pad_token="<pad>", - mask_token="<mask>", - add_prefix_space=False, - **kwargs, - ): - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token - cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token - pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token - - # Mask token behave like a normal word, i.e. include the space before it - mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token - - super().__init__( - errors=errors, - bos_token=bos_token, - eos_token=eos_token, - unk_token=unk_token, - sep_token=sep_token, - cls_token=cls_token, - pad_token=pad_token, - mask_token=mask_token, - add_prefix_space=add_prefix_space, - **kwargs, - ) - - with open(vocab_file, encoding="utf-8") as vocab_handle: - self.encoder = json.load(vocab_handle) - self.decoder = {v: k for k, v in self.encoder.items()} - self.errors = errors # how to handle errors in decoding - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - with open(merges_file, encoding="utf-8") as merges_handle: - bpe_merges = merges_handle.read().split("\n")[1:-1] - bpe_merges = [tuple(merge.split()) for merge in bpe_merges] - self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) - self.cache = {} - self.add_prefix_space = add_prefix_space - - # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions - self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") - - @property - def vocab_size(self): - return len(self.encoder) - - def get_vocab(self): - return dict(self.encoder, **self.added_tokens_encoder) - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token) - pairs = get_pairs(word) - - if not pairs: - return token - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - except ValueError: - new_word.extend(word[i:]) - break - else: - new_word.extend(word[i:j]) - i = j - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def _tokenize(self, text): - """Tokenize a string.""" - bpe_tokens = [] - for token in re.findall(self.pat, text): - token = "".join( - self.byte_encoder[b] for b in token.encode("utf-8") - ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) - bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) - return bpe_tokens - - def _convert_token_to_id(self, token): - """Converts a token (str) in an id using the vocab.""" - return self.encoder.get(token, self.encoder.get(self.unk_token)) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.decoder.get(index) - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - text = "".join(tokens) - text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) - return text - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") - return - vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) - merge_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] - ) - - with open(vocab_file, "w", encoding="utf-8") as f: - f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") - - index = 0 - with open(merge_file, "w", encoding="utf-8") as writer: - writer.write("#version: 0.2\n") - for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): - if index != token_index: - logger.warning( - f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." - " Please check that the tokenizer is not corrupted!" - ) - index = token_index - writer.write(" ".join(bpe_tokens) + "\n") - index += 1 - - return vocab_file, merge_file - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BART sequence has the following format: - - - single sequence: `<s> X </s>` - - pair of sequences: `<s> A </s></s> B </s>` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - if token_ids_1 is None: - return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] - cls = [self.cls_token_id] - sep = [self.sep_token_id] - return cls + token_ids_0 + sep + sep + token_ids_1 + sep - - def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """ - Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` method. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - if token_ids_1 is None: - return [1] + ([0] * len(token_ids_0)) + [1] - return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] - - def create_token_type_ids_from_sequences( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not - make use of token type ids, therefore a list of zeros is returned. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of zeros. - """ - sep = [self.sep_token_id] - cls = [self.cls_token_id] - - if token_ids_1 is None: - return len(cls + token_ids_0 + sep) * [0] - return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] - - def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): - add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) - if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): - text = " " + text - return (text, kwargs) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageFilter.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageFilter.py deleted file mode 100644 index 33bc7cc2e30ea9a0f95cc884de151643915848fa..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageFilter.py +++ /dev/null @@ -1,550 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard filters -# -# History: -# 1995-11-27 fl Created -# 2002-06-08 fl Added rank and mode filters -# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-2002 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# -import functools - - -class Filter: - pass - - -class MultibandFilter(Filter): - pass - - -class BuiltinFilter(MultibandFilter): - def filter(self, image): - if image.mode == "P": - msg = "cannot filter palette images" - raise ValueError(msg) - return image.filter(*self.filterargs) - - -class Kernel(BuiltinFilter): - """ - Create a convolution kernel. The current version only - supports 3x3 and 5x5 integer and floating point kernels. - - In the current version, kernels can only be applied to - "L" and "RGB" images. - - :param size: Kernel size, given as (width, height). In the current - version, this must be (3,3) or (5,5). - :param kernel: A sequence containing kernel weights. The kernel will - be flipped vertically before being applied to the image. - :param scale: Scale factor. If given, the result for each pixel is - divided by this value. The default is the sum of the - kernel weights. - :param offset: Offset. If given, this value is added to the result, - after it has been divided by the scale factor. - """ - - name = "Kernel" - - def __init__(self, size, kernel, scale=None, offset=0): - if scale is None: - # default scale is sum of kernel - scale = functools.reduce(lambda a, b: a + b, kernel) - if size[0] * size[1] != len(kernel): - msg = "not enough coefficients in kernel" - raise ValueError(msg) - self.filterargs = size, scale, offset, kernel - - -class RankFilter(Filter): - """ - Create a rank filter. The rank filter sorts all pixels in - a window of the given size, and returns the ``rank``'th value. - - :param size: The kernel size, in pixels. - :param rank: What pixel value to pick. Use 0 for a min filter, - ``size * size / 2`` for a median filter, ``size * size - 1`` - for a max filter, etc. - """ - - name = "Rank" - - def __init__(self, size, rank): - self.size = size - self.rank = rank - - def filter(self, image): - if image.mode == "P": - msg = "cannot filter palette images" - raise ValueError(msg) - image = image.expand(self.size // 2, self.size // 2) - return image.rankfilter(self.size, self.rank) - - -class MedianFilter(RankFilter): - """ - Create a median filter. Picks the median pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Median" - - def __init__(self, size=3): - self.size = size - self.rank = size * size // 2 - - -class MinFilter(RankFilter): - """ - Create a min filter. Picks the lowest pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Min" - - def __init__(self, size=3): - self.size = size - self.rank = 0 - - -class MaxFilter(RankFilter): - """ - Create a max filter. Picks the largest pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Max" - - def __init__(self, size=3): - self.size = size - self.rank = size * size - 1 - - -class ModeFilter(Filter): - """ - Create a mode filter. Picks the most frequent pixel value in a box with the - given size. Pixel values that occur only once or twice are ignored; if no - pixel value occurs more than twice, the original pixel value is preserved. - - :param size: The kernel size, in pixels. - """ - - name = "Mode" - - def __init__(self, size=3): - self.size = size - - def filter(self, image): - return image.modefilter(self.size) - - -class GaussianBlur(MultibandFilter): - """Blurs the image with a sequence of extended box filters, which - approximates a Gaussian kernel. For details on accuracy see - <https://www.mia.uni-saarland.de/Publications/gwosdek-ssvm11.pdf> - - :param radius: Standard deviation of the Gaussian kernel. - """ - - name = "GaussianBlur" - - def __init__(self, radius=2): - self.radius = radius - - def filter(self, image): - return image.gaussian_blur(self.radius) - - -class BoxBlur(MultibandFilter): - """Blurs the image by setting each pixel to the average value of the pixels - in a square box extending radius pixels in each direction. - Supports float radius of arbitrary size. Uses an optimized implementation - which runs in linear time relative to the size of the image - for any radius value. - - :param radius: Size of the box in one direction. Radius 0 does not blur, - returns an identical image. Radius 1 takes 1 pixel - in each direction, i.e. 9 pixels in total. - """ - - name = "BoxBlur" - - def __init__(self, radius): - if radius < 0: - msg = "radius must be >= 0" - raise ValueError(msg) - self.radius = radius - - def filter(self, image): - return image.box_blur(self.radius) - - -class UnsharpMask(MultibandFilter): - """Unsharp mask filter. - - See Wikipedia's entry on `digital unsharp masking`_ for an explanation of - the parameters. - - :param radius: Blur Radius - :param percent: Unsharp strength, in percent - :param threshold: Threshold controls the minimum brightness change that - will be sharpened - - .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking - - """ # noqa: E501 - - name = "UnsharpMask" - - def __init__(self, radius=2, percent=150, threshold=3): - self.radius = radius - self.percent = percent - self.threshold = threshold - - def filter(self, image): - return image.unsharp_mask(self.radius, self.percent, self.threshold) - - -class BLUR(BuiltinFilter): - name = "Blur" - # fmt: off - filterargs = (5, 5), 16, 0, ( - 1, 1, 1, 1, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 1, 1, 1, 1, - ) - # fmt: on - - -class CONTOUR(BuiltinFilter): - name = "Contour" - # fmt: off - filterargs = (3, 3), 1, 255, ( - -1, -1, -1, - -1, 8, -1, - -1, -1, -1, - ) - # fmt: on - - -class DETAIL(BuiltinFilter): - name = "Detail" - # fmt: off - filterargs = (3, 3), 6, 0, ( - 0, -1, 0, - -1, 10, -1, - 0, -1, 0, - ) - # fmt: on - - -class EDGE_ENHANCE(BuiltinFilter): - name = "Edge-enhance" - # fmt: off - filterargs = (3, 3), 2, 0, ( - -1, -1, -1, - -1, 10, -1, - -1, -1, -1, - ) - # fmt: on - - -class EDGE_ENHANCE_MORE(BuiltinFilter): - name = "Edge-enhance More" - # fmt: off - filterargs = (3, 3), 1, 0, ( - -1, -1, -1, - -1, 9, -1, - -1, -1, -1, - ) - # fmt: on - - -class EMBOSS(BuiltinFilter): - name = "Emboss" - # fmt: off - filterargs = (3, 3), 1, 128, ( - -1, 0, 0, - 0, 1, 0, - 0, 0, 0, - ) - # fmt: on - - -class FIND_EDGES(BuiltinFilter): - name = "Find Edges" - # fmt: off - filterargs = (3, 3), 1, 0, ( - -1, -1, -1, - -1, 8, -1, - -1, -1, -1, - ) - # fmt: on - - -class SHARPEN(BuiltinFilter): - name = "Sharpen" - # fmt: off - filterargs = (3, 3), 16, 0, ( - -2, -2, -2, - -2, 32, -2, - -2, -2, -2, - ) - # fmt: on - - -class SMOOTH(BuiltinFilter): - name = "Smooth" - # fmt: off - filterargs = (3, 3), 13, 0, ( - 1, 1, 1, - 1, 5, 1, - 1, 1, 1, - ) - # fmt: on - - -class SMOOTH_MORE(BuiltinFilter): - name = "Smooth More" - # fmt: off - filterargs = (5, 5), 100, 0, ( - 1, 1, 1, 1, 1, - 1, 5, 5, 5, 1, - 1, 5, 44, 5, 1, - 1, 5, 5, 5, 1, - 1, 1, 1, 1, 1, - ) - # fmt: on - - -class Color3DLUT(MultibandFilter): - """Three-dimensional color lookup table. - - Transforms 3-channel pixels using the values of the channels as coordinates - in the 3D lookup table and interpolating the nearest elements. - - This method allows you to apply almost any color transformation - in constant time by using pre-calculated decimated tables. - - .. versionadded:: 5.2.0 - - :param size: Size of the table. One int or tuple of (int, int, int). - Minimal size in any dimension is 2, maximum is 65. - :param table: Flat lookup table. A list of ``channels * size**3`` - float elements or a list of ``size**3`` channels-sized - tuples with floats. Channels are changed first, - then first dimension, then second, then third. - Value 0.0 corresponds lowest value of output, 1.0 highest. - :param channels: Number of channels in the table. Could be 3 or 4. - Default is 3. - :param target_mode: A mode for the result image. Should have not less - than ``channels`` channels. Default is ``None``, - which means that mode wouldn't be changed. - """ - - name = "Color 3D LUT" - - def __init__(self, size, table, channels=3, target_mode=None, **kwargs): - if channels not in (3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - self.size = size = self._check_size(size) - self.channels = channels - self.mode = target_mode - - # Hidden flag `_copy_table=False` could be used to avoid extra copying - # of the table if the table is specially made for the constructor. - copy_table = kwargs.get("_copy_table", True) - items = size[0] * size[1] * size[2] - wrong_size = False - - numpy = None - if hasattr(table, "shape"): - try: - import numpy - except ImportError: # pragma: no cover - pass - - if numpy and isinstance(table, numpy.ndarray): - if copy_table: - table = table.copy() - - if table.shape in [ - (items * channels,), - (items, channels), - (size[2], size[1], size[0], channels), - ]: - table = table.reshape(items * channels) - else: - wrong_size = True - - else: - if copy_table: - table = list(table) - - # Convert to a flat list - if table and isinstance(table[0], (list, tuple)): - table, raw_table = [], table - for pixel in raw_table: - if len(pixel) != channels: - msg = ( - "The elements of the table should " - f"have a length of {channels}." - ) - raise ValueError(msg) - table.extend(pixel) - - if wrong_size or len(table) != items * channels: - msg = ( - "The table should have either channels * size**3 float items " - "or size**3 items of channels-sized tuples with floats. " - f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. " - f"Actual length: {len(table)}" - ) - raise ValueError(msg) - self.table = table - - @staticmethod - def _check_size(size): - try: - _, _, _ = size - except ValueError as e: - msg = "Size should be either an integer or a tuple of three integers." - raise ValueError(msg) from e - except TypeError: - size = (size, size, size) - size = [int(x) for x in size] - for size_1d in size: - if not 2 <= size_1d <= 65: - msg = "Size should be in [2, 65] range." - raise ValueError(msg) - return size - - @classmethod - def generate(cls, size, callback, channels=3, target_mode=None): - """Generates new LUT using provided callback. - - :param size: Size of the table. Passed to the constructor. - :param callback: Function with three parameters which correspond - three color channels. Will be called ``size**3`` - times with values from 0.0 to 1.0 and should return - a tuple with ``channels`` elements. - :param channels: The number of channels which should return callback. - :param target_mode: Passed to the constructor of the resulting - lookup table. - """ - size_1d, size_2d, size_3d = cls._check_size(size) - if channels not in (3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - - table = [0] * (size_1d * size_2d * size_3d * channels) - idx_out = 0 - for b in range(size_3d): - for g in range(size_2d): - for r in range(size_1d): - table[idx_out : idx_out + channels] = callback( - r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) - ) - idx_out += channels - - return cls( - (size_1d, size_2d, size_3d), - table, - channels=channels, - target_mode=target_mode, - _copy_table=False, - ) - - def transform(self, callback, with_normals=False, channels=None, target_mode=None): - """Transforms the table values using provided callback and returns - a new LUT with altered values. - - :param callback: A function which takes old lookup table values - and returns a new set of values. The number - of arguments which function should take is - ``self.channels`` or ``3 + self.channels`` - if ``with_normals`` flag is set. - Should return a tuple of ``self.channels`` or - ``channels`` elements if it is set. - :param with_normals: If true, ``callback`` will be called with - coordinates in the color cube as the first - three arguments. Otherwise, ``callback`` - will be called only with actual color values. - :param channels: The number of channels in the resulting lookup table. - :param target_mode: Passed to the constructor of the resulting - lookup table. - """ - if channels not in (None, 3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - ch_in = self.channels - ch_out = channels or ch_in - size_1d, size_2d, size_3d = self.size - - table = [0] * (size_1d * size_2d * size_3d * ch_out) - idx_in = 0 - idx_out = 0 - for b in range(size_3d): - for g in range(size_2d): - for r in range(size_1d): - values = self.table[idx_in : idx_in + ch_in] - if with_normals: - values = callback( - r / (size_1d - 1), - g / (size_2d - 1), - b / (size_3d - 1), - *values, - ) - else: - values = callback(*values) - table[idx_out : idx_out + ch_out] = values - idx_in += ch_in - idx_out += ch_out - - return type(self)( - self.size, - table, - channels=ch_out, - target_mode=target_mode or self.mode, - _copy_table=False, - ) - - def __repr__(self): - r = [ - f"{self.__class__.__name__} from {self.table.__class__.__name__}", - "size={:d}x{:d}x{:d}".format(*self.size), - f"channels={self.channels:d}", - ] - if self.mode: - r.append(f"target_mode={self.mode}") - return "<{}>".format(" ".join(r)) - - def filter(self, image): - from . import Image - - return image.color_lut_3d( - self.mode or image.mode, - Image.Resampling.BILINEAR, - self.channels, - self.size[0], - self.size[1], - self.size[2], - self.table, - ) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/streams/memory.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/streams/memory.py deleted file mode 100644 index a6499c13ff36f74d2e217ee996825a13edd6d9fb..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/streams/memory.py +++ /dev/null @@ -1,279 +0,0 @@ -from __future__ import annotations - -from collections import OrderedDict, deque -from dataclasses import dataclass, field -from types import TracebackType -from typing import Generic, NamedTuple, TypeVar - -from .. import ( - BrokenResourceError, - ClosedResourceError, - EndOfStream, - WouldBlock, - get_cancelled_exc_class, -) -from .._core._compat import DeprecatedAwaitable -from ..abc import Event, ObjectReceiveStream, ObjectSendStream -from ..lowlevel import checkpoint - -T_Item = TypeVar("T_Item") -T_co = TypeVar("T_co", covariant=True) -T_contra = TypeVar("T_contra", contravariant=True) - - -class MemoryObjectStreamStatistics(NamedTuple): - current_buffer_used: int #: number of items stored in the buffer - #: maximum number of items that can be stored on this stream (or :data:`math.inf`) - max_buffer_size: float - open_send_streams: int #: number of unclosed clones of the send stream - open_receive_streams: int #: number of unclosed clones of the receive stream - tasks_waiting_send: int #: number of tasks blocked on :meth:`MemoryObjectSendStream.send` - #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive` - tasks_waiting_receive: int - - -@dataclass(eq=False) -class MemoryObjectStreamState(Generic[T_Item]): - max_buffer_size: float = field() - buffer: deque[T_Item] = field(init=False, default_factory=deque) - open_send_channels: int = field(init=False, default=0) - open_receive_channels: int = field(init=False, default=0) - waiting_receivers: OrderedDict[Event, list[T_Item]] = field( - init=False, default_factory=OrderedDict - ) - waiting_senders: OrderedDict[Event, T_Item] = field( - init=False, default_factory=OrderedDict - ) - - def statistics(self) -> MemoryObjectStreamStatistics: - return MemoryObjectStreamStatistics( - len(self.buffer), - self.max_buffer_size, - self.open_send_channels, - self.open_receive_channels, - len(self.waiting_senders), - len(self.waiting_receivers), - ) - - -@dataclass(eq=False) -class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]): - _state: MemoryObjectStreamState[T_co] - _closed: bool = field(init=False, default=False) - - def __post_init__(self) -> None: - self._state.open_receive_channels += 1 - - def receive_nowait(self) -> T_co: - """ - Receive the next item if it can be done without waiting. - - :return: the received item - :raises ~anyio.ClosedResourceError: if this send stream has been closed - :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been - closed from the sending end - :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks - waiting to send - - """ - if self._closed: - raise ClosedResourceError - - if self._state.waiting_senders: - # Get the item from the next sender - send_event, item = self._state.waiting_senders.popitem(last=False) - self._state.buffer.append(item) - send_event.set() - - if self._state.buffer: - return self._state.buffer.popleft() - elif not self._state.open_send_channels: - raise EndOfStream - - raise WouldBlock - - async def receive(self) -> T_co: - await checkpoint() - try: - return self.receive_nowait() - except WouldBlock: - # Add ourselves in the queue - receive_event = Event() - container: list[T_co] = [] - self._state.waiting_receivers[receive_event] = container - - try: - await receive_event.wait() - except get_cancelled_exc_class(): - # Ignore the immediate cancellation if we already received an item, so as not to - # lose it - if not container: - raise - finally: - self._state.waiting_receivers.pop(receive_event, None) - - if container: - return container[0] - else: - raise EndOfStream - - def clone(self) -> MemoryObjectReceiveStream[T_co]: - """ - Create a clone of this receive stream. - - Each clone can be closed separately. Only when all clones have been closed will the - receiving end of the memory stream be considered closed by the sending ends. - - :return: the cloned stream - - """ - if self._closed: - raise ClosedResourceError - - return MemoryObjectReceiveStream(_state=self._state) - - def close(self) -> None: - """ - Close the stream. - - This works the exact same way as :meth:`aclose`, but is provided as a special case for the - benefit of synchronous callbacks. - - """ - if not self._closed: - self._closed = True - self._state.open_receive_channels -= 1 - if self._state.open_receive_channels == 0: - send_events = list(self._state.waiting_senders.keys()) - for event in send_events: - event.set() - - async def aclose(self) -> None: - self.close() - - def statistics(self) -> MemoryObjectStreamStatistics: - """ - Return statistics about the current state of this stream. - - .. versionadded:: 3.0 - """ - return self._state.statistics() - - def __enter__(self) -> MemoryObjectReceiveStream[T_co]: - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - self.close() - - -@dataclass(eq=False) -class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]): - _state: MemoryObjectStreamState[T_contra] - _closed: bool = field(init=False, default=False) - - def __post_init__(self) -> None: - self._state.open_send_channels += 1 - - def send_nowait(self, item: T_contra) -> DeprecatedAwaitable: - """ - Send an item immediately if it can be done without waiting. - - :param item: the item to send - :raises ~anyio.ClosedResourceError: if this send stream has been closed - :raises ~anyio.BrokenResourceError: if the stream has been closed from the - receiving end - :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting - to receive - - """ - if self._closed: - raise ClosedResourceError - if not self._state.open_receive_channels: - raise BrokenResourceError - - if self._state.waiting_receivers: - receive_event, container = self._state.waiting_receivers.popitem(last=False) - container.append(item) - receive_event.set() - elif len(self._state.buffer) < self._state.max_buffer_size: - self._state.buffer.append(item) - else: - raise WouldBlock - - return DeprecatedAwaitable(self.send_nowait) - - async def send(self, item: T_contra) -> None: - await checkpoint() - try: - self.send_nowait(item) - except WouldBlock: - # Wait until there's someone on the receiving end - send_event = Event() - self._state.waiting_senders[send_event] = item - try: - await send_event.wait() - except BaseException: - self._state.waiting_senders.pop(send_event, None) # type: ignore[arg-type] - raise - - if self._state.waiting_senders.pop(send_event, None): # type: ignore[arg-type] - raise BrokenResourceError - - def clone(self) -> MemoryObjectSendStream[T_contra]: - """ - Create a clone of this send stream. - - Each clone can be closed separately. Only when all clones have been closed will the - sending end of the memory stream be considered closed by the receiving ends. - - :return: the cloned stream - - """ - if self._closed: - raise ClosedResourceError - - return MemoryObjectSendStream(_state=self._state) - - def close(self) -> None: - """ - Close the stream. - - This works the exact same way as :meth:`aclose`, but is provided as a special case for the - benefit of synchronous callbacks. - - """ - if not self._closed: - self._closed = True - self._state.open_send_channels -= 1 - if self._state.open_send_channels == 0: - receive_events = list(self._state.waiting_receivers.keys()) - self._state.waiting_receivers.clear() - for event in receive_events: - event.set() - - async def aclose(self) -> None: - self.close() - - def statistics(self) -> MemoryObjectStreamStatistics: - """ - Return statistics about the current state of this stream. - - .. versionadded:: 3.0 - """ - return self._state.statistics() - - def __enter__(self) -> MemoryObjectSendStream[T_contra]: - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - self.close() diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/text/run.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/text/run.py deleted file mode 100644 index 97d6da7db76c52f11d5aec8f8032c51806a37fdc..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/text/run.py +++ /dev/null @@ -1,191 +0,0 @@ -# encoding: utf-8 - -""" -Run-related proxy objects for python-docx, Run in particular. -""" - -from __future__ import absolute_import, print_function, unicode_literals - -from ..enum.style import WD_STYLE_TYPE -from ..enum.text import WD_BREAK -from .font import Font -from ..shape import InlineShape -from ..shared import Parented - - -class Run(Parented): - """ - Proxy object wrapping ``<w:r>`` element. Several of the properties on Run - take a tri-state value, |True|, |False|, or |None|. |True| and |False| - correspond to on and off respectively. |None| indicates the property is - not specified directly on the run and its effective value is taken from - the style hierarchy. - """ - def __init__(self, r, parent): - super(Run, self).__init__(parent) - self._r = self._element = self.element = r - - def add_break(self, break_type=WD_BREAK.LINE): - """ - Add a break element of *break_type* to this run. *break_type* can - take the values `WD_BREAK.LINE`, `WD_BREAK.PAGE`, and - `WD_BREAK.COLUMN` where `WD_BREAK` is imported from `docx.enum.text`. - *break_type* defaults to `WD_BREAK.LINE`. - """ - type_, clear = { - WD_BREAK.LINE: (None, None), - WD_BREAK.PAGE: ('page', None), - WD_BREAK.COLUMN: ('column', None), - WD_BREAK.LINE_CLEAR_LEFT: ('textWrapping', 'left'), - WD_BREAK.LINE_CLEAR_RIGHT: ('textWrapping', 'right'), - WD_BREAK.LINE_CLEAR_ALL: ('textWrapping', 'all'), - }[break_type] - br = self._r.add_br() - if type_ is not None: - br.type = type_ - if clear is not None: - br.clear = clear - - def add_picture(self, image_path_or_stream, width=None, height=None): - """ - Return an |InlineShape| instance containing the image identified by - *image_path_or_stream*, added to the end of this run. - *image_path_or_stream* can be a path (a string) or a file-like object - containing a binary image. If neither width nor height is specified, - the picture appears at its native size. If only one is specified, it - is used to compute a scaling factor that is then applied to the - unspecified dimension, preserving the aspect ratio of the image. The - native size of the picture is calculated using the dots-per-inch - (dpi) value specified in the image file, defaulting to 72 dpi if no - value is specified, as is often the case. - """ - inline = self.part.new_pic_inline(image_path_or_stream, width, height) - self._r.add_drawing(inline) - return InlineShape(inline) - - def add_tab(self): - """ - Add a ``<w:tab/>`` element at the end of the run, which Word - interprets as a tab character. - """ - self._r._add_tab() - - def add_text(self, text): - """ - Returns a newly appended |_Text| object (corresponding to a new - ``<w:t>`` child element) to the run, containing *text*. Compare with - the possibly more friendly approach of assigning text to the - :attr:`Run.text` property. - """ - t = self._r.add_t(text) - return _Text(t) - - @property - def bold(self): - """ - Read/write. Causes the text of the run to appear in bold. - """ - return self.font.bold - - @bold.setter - def bold(self, value): - self.font.bold = value - - def clear(self): - """ - Return reference to this run after removing all its content. All run - formatting is preserved. - """ - self._r.clear_content() - return self - - @property - def font(self): - """ - The |Font| object providing access to the character formatting - properties for this run, such as font name and size. - """ - return Font(self._element) - - @property - def italic(self): - """ - Read/write tri-state value. When |True|, causes the text of the run - to appear in italics. - """ - return self.font.italic - - @italic.setter - def italic(self, value): - self.font.italic = value - - @property - def style(self): - """ - Read/write. A |_CharacterStyle| object representing the character - style applied to this run. The default character style for the - document (often `Default Character Font`) is returned if the run has - no directly-applied character style. Setting this property to |None| - removes any directly-applied character style. - """ - style_id = self._r.style - return self.part.get_style(style_id, WD_STYLE_TYPE.CHARACTER) - - @style.setter - def style(self, style_or_name): - style_id = self.part.get_style_id( - style_or_name, WD_STYLE_TYPE.CHARACTER - ) - self._r.style = style_id - - @property - def text(self): - """ - String formed by concatenating the text equivalent of each run - content child element into a Python string. Each ``<w:t>`` element - adds the text characters it contains. A ``<w:tab/>`` element adds - a ``\\t`` character. A ``<w:cr/>`` or ``<w:br>`` element each add - a ``\\n`` character. Note that a ``<w:br>`` element can indicate - a page break or column break as well as a line break. All ``<w:br>`` - elements translate to a single ``\\n`` character regardless of their - type. All other content child elements, such as ``<w:drawing>``, are - ignored. - - Assigning text to this property has the reverse effect, translating - each ``\\t`` character to a ``<w:tab/>`` element and each ``\\n`` or - ``\\r`` character to a ``<w:cr/>`` element. Any existing run content - is replaced. Run formatting is preserved. - """ - return self._r.text - - @text.setter - def text(self, text): - self._r.text = text - - @property - def underline(self): - """ - The underline style for this |Run|, one of |None|, |True|, |False|, - or a value from :ref:`WdUnderline`. A value of |None| indicates the - run has no directly-applied underline value and so will inherit the - underline value of its containing paragraph. Assigning |None| to this - property removes any directly-applied underline value. A value of - |False| indicates a directly-applied setting of no underline, - overriding any inherited value. A value of |True| indicates single - underline. The values from :ref:`WdUnderline` are used to specify - other outline styles such as double, wavy, and dotted. - """ - return self.font.underline - - @underline.setter - def underline(self, value): - self.font.underline = value - - -class _Text(object): - """ - Proxy object wrapping ``<w:t>`` element. - """ - def __init__(self, t_elm): - super(_Text, self).__init__() - self._t = t_elm diff --git a/spaces/cihyFjudo/fairness-paper-search/EDM Prime Time Drops WAV MIDI The Ultimate Pack for EDM Producers.md b/spaces/cihyFjudo/fairness-paper-search/EDM Prime Time Drops WAV MIDI The Ultimate Pack for EDM Producers.md deleted file mode 100644 index 643fc0a1655fdb62cb415edc95af53d2bc52fbab..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/EDM Prime Time Drops WAV MIDI The Ultimate Pack for EDM Producers.md +++ /dev/null @@ -1,5 +0,0 @@ -<br /> -<p>Loaded with 450+ Mb of epic prime time sounds and samples this collection features low-end grooves, stabby chords, epic synth sounds, top and percussion loops, pitched and processed vocal hooks, FX and MIDI files for easy and unique sound layering.</p> -<h2>EDM Prime Time Drops WAV MIDI</h2><br /><p><b><b>Download File</b> ○○○ <a href="https://tinurli.com/2uwiPB">https://tinurli.com/2uwiPB</a></b></p><br /><br /> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Read Adlers Physiology of the Eye 11th Edition PDF Free 24 The Classic Textbook on Ocular Anatomy Physiology and Pathology.md b/spaces/cihyFjudo/fairness-paper-search/Read Adlers Physiology of the Eye 11th Edition PDF Free 24 The Classic Textbook on Ocular Anatomy Physiology and Pathology.md deleted file mode 100644 index 7e5001ee0db80c786ee6d7277f984acecd7667c8..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Read Adlers Physiology of the Eye 11th Edition PDF Free 24 The Classic Textbook on Ocular Anatomy Physiology and Pathology.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>adler's physiology of the eye 11th edition pdf free 24</h2><br /><p><b><b>Download Zip</b> ✦✦✦ <a href="https://tinurli.com/2uwhHY">https://tinurli.com/2uwhHY</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cihyFjudo/fairness-paper-search/Silver Grapple Download Now Save the Survivors of a Mysterious Accident!.md b/spaces/cihyFjudo/fairness-paper-search/Silver Grapple Download Now Save the Survivors of a Mysterious Accident!.md deleted file mode 100644 index 9304d11bc6bb2b20f746fcadbba59a9ce6af1961..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Silver Grapple Download Now Save the Survivors of a Mysterious Accident!.md +++ /dev/null @@ -1,18 +0,0 @@ - -<p>The Meadowsend sign at the entrance to the landing.The tour concluded with a visit to the timber landing and log pile to get a close-up look at the industrial logging machinery parked on the landing: a yard loader and slasher, a Morbark chipper and two John Deere grapple skidders to pull hitches of whole trees to the landing for processing into logs and other forest products. More than a dozen different product sorts were represented by timber piled neatly on the timber landing and awaiting transport to various mills for processing into dimensional lumber or low grade forest products such as fuelwood or wood pulp. Foresters talked about the log quality and volume as well as economic value. Incentives to create the highest quality range of products from each tree harvested drive the careful processing of logs entering the landing. This is where - according to Jeremy Turner of Meadowsend Forestry Consultants - "money is either made or lost" while working to obtain the highest value and range of products based on the quality of the logs being cut.</p> -<p>After acquiring it, use the grapple point on the ceiling of <i>Ore Processing</i> to access <i>Waste Disposal</i> directly across the gap. Just leap out and tap the L-trigger as you become close enough to it. Take the <i>Waste Disposal</i>, a water-filled ball puzzle, out to the <i>Main Quarry</i>. You should considering saving your progress. Right now you want to take the <i>Transport to Tallon Overworld South</i>, which is just outside of the Quarry. When you arrive in Tallon Overworld, set a path for the <i>Great Tree Hall</i>. Begin platforming up around the edge until you reach the Morph Ball spinner. You should have already used it, and the ledge just above it will be free to pass through. Do so and position yourself in the upper part of the <i>Great Tree Hall</i>. Continue jumping on the edge of the hall until you find the corkscrew-shaped Spider Ball track. Use it to arrive at the highest door in the entire area -- an Ice Beam door that leads to the <i>Life Grove Tunnel</i>. Flip on your scanner in this small room and you should locate a rock face that has been rendered weak due to the wet conditions. Power Bomb it to press forward into the <i>Life Grove</i>. (There is a missile expansion in LIFE GROVE TUNNEL at the top of the half pipe structure. Just boost yourself up there and place a morph ball bomb in the top center and you will fall into a hole and get the expansion) There you will find the <b>X-Ray Visor </b>, which allows you to see through walls and spot otherwise invisible objects. As you're trapped inside this secret area, you'll need to turn on the X-Ray Visor and have a look around the place. You should see that there is much scenery revealed beyond the walls of this holding -- much of which does not show up on your map.</p> -<h2>Silver Grapple Download Now</h2><br /><p><b><b>Download</b> ……… <a href="https://tinurli.com/2uwjGY">https://tinurli.com/2uwjGY</a></b></p><br /><br /> -<p>Grab it and jump back down to the unopened door, which leads into <i>Transport Tunnel D</i> and eventually the <i>Transport to Chozo Ruins South</i>. As soon as you arrive in the Chozo Ruins, go right around the corner to the <i>Transport to Tallon Overworld East</i> (Morph Ball tunnel behind save station). In Tallon, roll through <i>Transport Tunnel C</i> and into the <i>Overgrown Cavern</i>. The "overgrowth" is Venom Weed. Fire off a few rounds at it and roll through the Morph Ball sized opening to receive a <b>Missile Expansion </b>. You'll next arrive in the <i>Frigate Crash Site</i>, where you'll spot two grapple points to help you easily navigate across the water; but that's not all they're for. The one closer to the <i>Frigate Access Tunnel</i> entrance can be used to grab the <b>Missile Expansion </b> that is embedded into the cliff-side underwater. Go underwater first so you can locate the expansion, then go back up and use the grapple. Just try to line yourself up as best you can and swing right into the rock face, hopefully to land in the small cave.</p> -<p>No doubt boiling with excitement, you'll want to drop straight down into the water below to cool off. And, while you're there, check your Artifact data. You should see that <b>Lifegiver </b>, the fifth <b>Artifact </b> of twelve may be located very close. Indeed, simply access the Wave Beam door hidden on the other side of the pool of water and you will find it in the <i>Tower Chamber</i>. With all that out of the way, go back to <i>Tallon Canyon</i> via the <i>Transport to Tallon Overworld North</i>. Your next checkpoint is the <i>Root Cave</i>. As soon as you enter you should find a grapple point staring directly at you. Use it to swing to the other side and begin jumping upward. You will hit a dead end shortly after crossing a small bridge, but turn on your X-Ray Visor to uncover otherwise invisible platforms. As you arrive on the last platform, you will see a red door ahead of you. No, you can't go through those yet. However, turn just to the left -- with your X-Ray Visor activated -- and you will see a <b>Missile Expansion </b> hiding behind a wall of foliage. After nabbing the power-up, fall all the way down to the bottom and make a move for the <i>Transport to Magmoor Caverns East</i>. When you arrive in Magmoor, use <i>Transport Tunnel B</i> to make your way over to the <i>Shore Tunnel</i>. The hull is made of bendezium, which has been stressed over time by the intense heat. Set off a Power Bomb in the center of the tunnel to shatter the surrounding glass. This will let you jump beneath the bridgeway to acquire the <b>Ice Spreader </b>. Like the Wavebuster, the Ice Spreader is an optional, yet very powerful upgrade. It can freeze multiple enemies at once.</p> -<p>Before you exit <i>Fungal Hall B</i>, jump down just below the door out and use your Thermal Visor to reveal a <b>Missile Expansion </b> hidden beneath the ground; a ring of mushrooms grow around it. Use a Power Bomb to blast away the rock cover above it. Then proceed through the door into <i>Quarantine Access B</i>. There are a few invisible Bombus lurking around, so use your X-Ray Visor if you hope to locate them. Once in <i>Metroid Quarantine B</i>, use the visible Spider Ball track to access the higher platforms. From there, Grapple Beam your way over to the other side and deactivate the force field by scanning the nearby terminal. Directly through that large opening you should see another computer terminal with new <b>Pirate Data </b>. After downloading it, turn directly around and you should locate a cordite shaft, weakened by stress fractures. Use a Super Missile to blast through it and nab yourself another <b>Missile Expansion </b>. Once you've done that, access the Plasma Beam door on the same floor and save at the <i>Save Station Mines C</i>. You'll need all the help you can get, as a boss battle is about to emerge. Head up to the top floor and into <i>Elite Quarters Access</i>. You will find a gate blocking your way and the controls are frozen in frigidite. Just use the Plasma Beam to melt the shell and continue into <i>Elite Quarters</i> where the boss awaits you.</p> -<p>Turn on your Scan Visor as you enter and download the data from her stasis chamber as you approach it. It will tell you a lot about her tactics; the Omega Pirate will disappear often to repaint herself with Phazon, thus healing her wounds. This is her weakness, and you are going to exploit it to beat her.</p> -<p>Sooner, hopefully rather than later, she will die. In the process, her body will crash down upon you disintegrating into a bubbling soup of Phazon. Samus' Power Suit has survived, but the overwhelming potency of the mutagen will change her suit in the process. But, it's for the better. You will now wear the ultra-cool black, silver, and red <b>Phazon Suit</b>.</p> -<p>In "Weirdmageddon 3: Take Back The Falls," after the Resistance breaks into the Fearamid, Mabel uses it to grapple up to the top of Bill's Throne. She finds Ford, and throws the grappling hook down to Dipper, who joins her. Later, she uses it to help her and Dipper escape from Bill. In the episode's credits, Stan grabs Ford and uses the grappling hook to carry them both up and out of the screen.</p> -<p></p> -<p>Rotobec is recognized worldwide for its innovation. All of our grapples are made with only the highest quality steel and components to provide you with the best and most durable products out there. Combined with our exceptional workmanship, we ensure the longevity of your attachment with our top engineered designs and meticulous quality control. We have put our heart and soul in every detail to bring you the best and highest performing attachments in the industry.</p> -<p>All Rotobec grapples are recognized for their unique shape that allows material to easily roll up the inner surface of the jaw. Our engineering team has invested an incredible amount of time into perfecting the jaw curvature of every grapple. Our jaw design allows for optimal pickup. This feature allows excavators operators to increase their efficiency and outlift the competition every time.</p> -<p>The grapple rake is designed with Grade 100 steel teeth, making it nearly three times stronger than regular steel.<ul> <li>Greaseable hinge points ensure long life</li> <li>Optimal spacing between teeth allows unwanted debris to fall through</li> <li>Large, curved teeth provide optimal material grasping and raking</li></ul></p> -<p>This heavy-duty implement moves difficult to handle material like scrap, waste, and rocks with ease.<ul> <li>Independent grapples grab uneven loads</li> <li>Gusseted tines ensure strength and longevity</li> <li>Two heavy-duty cylinders</li></ul></p> -<p>There are several characteristics of the current milieu that facilitate a perfect storm of stressors. These traumas are chronic events with an ambiguous endpoint. We do not know how bad things will get, nor when recovery can truly begin. Individuals must grapple with intense direct exposure to cascading events (for example, personal illness or loss, social isolation, economic loss, violent policing), with varying and sometimes conflicting policies dictating public response. Concurrently, these events have been broadcast in real time, as they unfolded, on traditional and social media, with individuals watching news coverage repeatedly and across multiple mediums, compounding their exposure. News has been almost entirely bad, with escalating intensity. The overlay of sensationalized media coverage in the context of repeated direct exposure to adversity is likely creating an additional crisis for public mental health.</p> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Universal BIOS Backup ToolKit 2.0.zip utorrent A Simple and Effective Solution for BIOS Backup.md b/spaces/cihyFjudo/fairness-paper-search/Universal BIOS Backup ToolKit 2.0.zip utorrent A Simple and Effective Solution for BIOS Backup.md deleted file mode 100644 index c152d48ff3594f30057acbee5ca29aa3aaccea54..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Universal BIOS Backup ToolKit 2.0.zip utorrent A Simple and Effective Solution for BIOS Backup.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Universal BIOS Backup ToolKit 2.0.zip utorrent</h2><br /><p><b><b>DOWNLOAD</b> ⇒ <a href="https://tinurli.com/2uwiAw">https://tinurli.com/2uwiAw</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cjayic/sovits-overwatch2/data_utils.py b/spaces/cjayic/sovits-overwatch2/data_utils.py deleted file mode 100644 index b08071a12ce829dade108a355a1da08093b193c5..0000000000000000000000000000000000000000 --- a/spaces/cjayic/sovits-overwatch2/data_utils.py +++ /dev/null @@ -1,331 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_unit_audio_pairs - - -class UnitAudioLoader(torch.utils.data.Dataset): - ''' - 1) loads audio and speech units - 2) compute spectrograms - ''' - - def __init__(self, unit_audio_pairs, hparams, train=True): - self.unit_audio_pairs = load_unit_audio_pairs(unit_audio_pairs) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - random.seed(1234) - random.shuffle(self.unit_audio_pairs) - self._filter() - - def _filter(self): - lengths = [] - for audio_path, _ in self.unit_audio_pairs: - lengths.append(os.path.getsize(audio_path) // (2 * self.hop_length)) - self.lengths = lengths - - def get_unit_audio_pair(self, unit_audio_pairs): - audio_path, unit_path = unit_audio_pairs[0], unit_audio_pairs[1] - unit = np.load(unit_path) - unit = torch.FloatTensor(unit) - # unit = torch.LongTensor(unit) - spec, wav = self.get_audio(audio_path) - return (unit, spec, wav) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def __getitem__(self, index): - return self.get_unit_audio_pair(self.unit_audio_pairs[index]) - - def __len__(self): - return len(self.unit_audio_pairs) - - -class UnitAudioCollate(): - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [unit, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_unit_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - unit_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - - unit_padded = torch.FloatTensor(len(batch), max_unit_len, 256) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - unit_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - unit = row[0] - unit_padded[i, :unit.size(0)] = unit - unit_lengths[i] = unit.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - if self.return_ids: - return unit_padded, unit_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing - return unit_padded, unit_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths - -"""Multi speaker version""" -class UnitAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, speech unit pairs - 2) computes spectrograms from audio files. - """ - def __init__(self, unit_sid_audio_pairs, hparams): - self.unit_sid_audio_pairs = load_unit_audio_pairs(unit_sid_audio_pairs) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - random.seed(1234) - random.shuffle(self.unit_sid_audio_pairs) - self._filter() - - def _filter(self): - lengths = [] - for audio_path, _, _ in self.unit_sid_audio_pairs: - lengths.append(os.path.getsize(audio_path) // (2 * self.hop_length)) - self.lengths = lengths - - def get_unit_sid_audio_pair(self, unit_sid_audio_pair): - # separate filename, speaker_id and text - audio_path, sid, unit_path = unit_sid_audio_pair[0], unit_sid_audio_pair[1], unit_sid_audio_pair[2] - unit = np.load(unit_path) - unit = torch.FloatTensor(unit) - # unit = torch.LongTensor(unit) - spec, wav = self.get_audio(audio_path) - sid = self.get_sid(sid) - return (unit, spec, wav, sid) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_unit_sid_audio_pair(self.unit_sid_audio_pairs[index]) - - def __len__(self): - return len(self.unit_sid_audio_pairs) - -class UnitAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [unit, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_unit_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - unit_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - unit_padded = torch.FloatTensor(len(batch), max_unit_len, 256) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - unit_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - unit = row[0] - unit_padded[i, :unit.size(0)] = unit - unit_lengths[i] = unit.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - if self.return_ids: - return unit_padded, unit_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing - return unit_padded, unit_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/clement13430/RIOT_GAME/app.py b/spaces/clement13430/RIOT_GAME/app.py deleted file mode 100644 index 6d893e7baf812b503447633dc8c1abd92386f6f1..0000000000000000000000000000000000000000 --- a/spaces/clement13430/RIOT_GAME/app.py +++ /dev/null @@ -1,223 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Sun Jan 15 20:36:24 2023 - -@author: cleme -""" - -import requests -import numpy as np -import time -import tensorflow as tf -import pdb -import sys -import urllib, json -from tensorflow import keras -from tensorflow.keras import layers -import datetime -from datetime import date -from tqdm import tqdm -import matplotlib.pyplot as plt -import random -from keras import layers, initializers -import os -import gradio as gr - - -url_all_queue_Id = 'https://static.developer.riotgames.com/docs/lol/queues.json' -queue_Id_solo_duo = 420 -url = 'http://ddragon.leagueoflegends.com/cdn/12.23.1/data/en_US/champion.json' -response = urllib.request.urlopen(url) -all_champions = json.loads(response.read()) -number_of_champions = len(all_champions['data']) -champ_Id = {} -for i, champions in enumerate(all_champions['data'].keys()): - champ_Id[str(champions).lower()] = i - - -model_to_use = keras.models.load_model('default_model_single_layer') - - -def normalization_division(division, above_diamond): - if above_diamond == True: - return 0 - if division == 'I': - mult = 3 - if division == 'II': - mult = 2 - if division == 'III': - mult = 1 - if division == 'IV': - mult = 0 - - return mult - -def identity(x): - return x - -def normalization_rank(info, function = identity): - mult_rank = 0 - tier = info['tier'] - division = info['rank'] - LP = info['leaguePoints'] - if tier == 'IRON': - mult_rank = 0 - mult_div = normalization_division(division, False) - if tier == 'BRONZE': - mult_rank = 1 - mult_div = normalization_division(division, False) - if tier == 'SILVER': - mult_rank = 2 - mult_div = normalization_division(division, False) - if tier == 'GOLD': - mult_rank = 3 - mult_div = normalization_division(division, False) - if tier == 'PLATINUM': - mult_rank = 4 - mult_div = normalization_division(division, False) - if tier == 'DIAMOND': - mult_rank = 5 - mult_div = normalization_division(division, False) - if tier == 'MASTER' or tier == 'CHALLENGER' or tier == 'GRANDMASTER' : - mult_rank = 6 - mult_div = normalization_division(division, True) - rank = mult_rank * 400 + mult_div * 100 + LP - rank = function(rank)/ function(4200) - - return rank - - -def define_player(tier, division, Leaguepoints, champion_name): - info = {'tier' : tier, 'rank': division, 'leaguePoints': Leaguepoints} - relative_LP = normalization_rank(info) - if champion_name == 'wukong': - champion_name = 'monkeyking' - champ_id = champ_Id[champion_name] - dic = {'relative_LP' : relative_LP, 'champ_id': champ_id, 'champion_name': champion_name} - return dic - - -def creation_match(blue_team, red_team): - res = [] - for team in ([blue_team, red_team]): - result = [] - result += [team['top']['relative_LP']] - result += [team['jungle']['relative_LP']] - result += [team['mid']['relative_LP']] - result += [team['bottom']['relative_LP']] - result += [team['utility']['relative_LP']] - - tab = number_of_champions * [0] - positions = ['top', 'jungle', 'mid', 'bottom','utility'] - for pos in positions: - tab[team[pos]['champ_id']] = 1 - - result += tab - res.append(result) - return res - - -def predict_winner_interface(tier_top_blue, div_top_blue, lp_top_blue, champ_top_blue, tier_jungle_blue, div_jungle_blue, - lp_jungle_blue, champ_jungle_blue, tier_mid_blue, div_mid_blue, lp_mid_blue, champ_mid_blue, - tier_bottom_blue, div_bottom_blue, lp_bottom_blue, champ_bottom_blue, tier_utility_blue, - div_utility_blue, lp_utility_blue, champ_utility_blue, tier_top_red, div_top_red, lp_top_red, - champ_top_red, tier_jungle_red, div_jungle_red, lp_jungle_red, champ_jungle_red, tier_mid_red, - div_mid_red, lp_mid_red, champ_mid_red, tier_bottom_red, div_bottom_red, lp_bottom_red, - champ_bottom_red, tier_utility_red, div_utility_red, lp_utility_red, champ_utility_red - ): - all_champs = [champ_top_blue, champ_jungle_blue, champ_mid_blue, champ_bottom_blue, champ_utility_blue, champ_top_red, - champ_jungle_red, champ_mid_red, champ_bottom_red, champ_utility_red] - for champ in all_champs: - if all_champs.count(champ) > 1: - sys.exit('error, a champ can only be choosen once') - - team_1 = {'top' : define_player(tier_top_blue, div_top_blue, lp_top_blue, champ_top_blue), - 'jungle' : define_player(tier_jungle_blue, div_jungle_blue, lp_jungle_blue, champ_jungle_blue), - 'mid' : define_player(tier_mid_blue, div_mid_blue, lp_mid_blue, champ_mid_blue), - 'bottom' : define_player(tier_bottom_blue, div_bottom_blue, lp_bottom_blue, champ_bottom_blue), - 'utility' : define_player(tier_utility_blue, div_utility_blue, lp_utility_blue, champ_utility_blue)} - - team_2 = {'top' : define_player(tier_top_red, div_top_red, lp_top_red, champ_top_red), - 'jungle' : define_player(tier_jungle_red, div_jungle_red, lp_jungle_red, champ_jungle_red), - 'mid' : define_player(tier_mid_red, div_mid_red, lp_mid_red, champ_mid_red), - 'bottom' : define_player(tier_bottom_red, div_bottom_red, lp_bottom_red, champ_bottom_red), - 'utility' : define_player(tier_utility_red, div_utility_red, lp_utility_red, champ_utility_red)} - - game = creation_match(team_1, team_2) - result = model_to_use.predict([game])[0][0] - winner = round(result) - if winner == 1: - equipe_winner = 'red' - if winner == 0: - equipe_winner = 'blue' - result = 1 - result - text = f'the {equipe_winner} team will win: final pourcentage = {100 * result}%' - return text - - -all_tiers = ['IRON', 'BRONZE', 'SILVER', 'GOLD', 'PLATINUM', 'DIAMOND', 'MASTER' , 'GRANDMASTER', 'CHALLENGER'] -all_division = ['IV', 'III', 'II', 'I'] - - -demo = gr.Interface( - fn=predict_winner_interface, - title="prediction of the winner", - description="Predict which team (blue or red) will win the game", - allow_flagging="never", - inputs=[ - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier top blue side"), - gr.inputs.Dropdown(choices= all_division, default="I", label="division top blue side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points top blue side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="jax", label="champion top blue side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier jungle blue side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="division jungle blue side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points jungle blue side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="elise", label="champion jungle blue side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier mid blue side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="division mid blue side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points mid blue side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="fizz", label="champion mid blue side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier bottom blue side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="division bottom blue side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points bottom blue side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="jhin", label="champion bottom blue side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier utility blue side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="division utility blue side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points utility blue side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="bard", label="champion utility blue side"), - - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier top red side"), - gr.inputs.Dropdown(choices= all_division ,default="I", label="division top red side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points top red side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="darius", label="champion top red side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier jungle red side"), - gr.inputs.Dropdown(choices= all_division ,default="I", label="division jungle red side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points jungle red side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="hecarim", label="champion jungle red side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier mid red side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="division mid red side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points mid red side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="yasuo", label="champion mid red side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier bottom red side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="bottom utility red side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points bottom red side"), - gr.inputs.Dropdown(choices= list(champ_Id.keys()), default="ashe", label="champion bottom red side"), - - gr.inputs.Dropdown(choices= all_tiers, default="GOLD", label="tier utility red side"), - gr.inputs.Dropdown(choices= all_division , default="I", label="division utility red side"), - gr.inputs.Slider(minimum=0,maximum=2000,default=50,step=1, label="League points utility red side"), - gr.inputs.Dropdown(choices=list(champ_Id.keys()), default="alistar", label="champion utility red side"), - - ], - outputs = 'text') - - -demo.launch(share=False, show_error = True) \ No newline at end of file diff --git a/spaces/cloversid/rvc-ai/README.md b/spaces/cloversid/rvc-ai/README.md deleted file mode 100644 index c6b5793f1563e0cf367f9673c4f17ed341b2af06..0000000000000000000000000000000000000000 --- a/spaces/cloversid/rvc-ai/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: RVC AI -emoji: 🌍 -colorFrom: blue -colorTo: pink -sdk: docker -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/cncn102/bingo1/src/components/ui/separator.tsx b/spaces/cncn102/bingo1/src/components/ui/separator.tsx deleted file mode 100644 index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000 --- a/spaces/cncn102/bingo1/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SeparatorPrimitive from '@radix-ui/react-separator' - -import { cn } from '@/lib/utils' - -const Separator = React.forwardRef< - React.ElementRef<typeof SeparatorPrimitive.Root>, - React.ComponentPropsWithoutRef<typeof SeparatorPrimitive.Root> ->( - ( - { className, orientation = 'horizontal', decorative = true, ...props }, - ref - ) => ( - <SeparatorPrimitive.Root - ref={ref} - decorative={decorative} - orientation={orientation} - className={cn( - 'shrink-0 bg-border', - orientation === 'horizontal' ? 'h-[1px] w-full' : 'h-full w-[1px]', - className - )} - {...props} - /> - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/codebox/diffuse-flood/build/index.html b/spaces/codebox/diffuse-flood/build/index.html deleted file mode 100644 index e481edc32e18acf761ee3d7be841a48a96e2ad2c..0000000000000000000000000000000000000000 --- a/spaces/codebox/diffuse-flood/build/index.html +++ /dev/null @@ -1,56 +0,0 @@ -<!DOCTYPE html> -<html lang="en"> - <head> - <meta charset="utf-8" /> - <link rel="icon" href="/staticspaceiframe/codebox/diffuse-flood/build/favicon.png" /> - <meta name="viewport" content="width=device-width" /> - <meta http-equiv="content-security-policy" content=""><link href="https://cdnjs.cloudflare.com/ajax/libs/drawingboard.js/0.4.2/drawingboard.css" rel="stylesheet" data-svelte="svelte-bw39ln"><script src="https://code.jquery.com/jquery-1.12.4.min.js" data-svelte="svelte-bw39ln"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/drawingboard.js/0.4.2/drawingboard.min.js" data-svelte="svelte-bw39ln"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js" data-svelte="svelte-bw39ln"></script> - <link href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/assets/+layout-2ac25133.css" rel="stylesheet"> - <link href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/assets/+page-376b236d.css" rel="stylesheet"> - <link rel="modulepreload" href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/start-ad08af0f.js"> - <link rel="modulepreload" href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/chunks/index-a207c28c.js"> - <link rel="modulepreload" href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/chunks/singletons-46497942.js"> - <link rel="modulepreload" href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/components/pages/_layout.svelte-eed40348.js"> - <link rel="modulepreload" href="/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/components/pages/_page.svelte-2a5d0087.js"> - </head> - <body> - <div> - - - -<div class="flex flex-wrap gap-x-4 gap-y-2 justify-center my-8"><canvas class="border-[1.2px] desktop:mt-[34px] hidden"></canvas> - <div class="flex flex-col items-center "><div><p>Loading…</p> - <p>█▒▒▒▒▒▒▒▒▒</p></div> - <div id="board-container"></div> - </div></div> -<article class="prose-sm px-4 md:px-12 lg:px-56 mb-8 hidden"><div class="text-center"><p>Stable Diffusion model by <a href="https://huggingface.co/CompVis" rel="nofollow">CompVis</a> and <a href="https://huggingface.co/stabilityai" rel="nofollow">Stability AI</a> - Demo by 🤗 Hugging Face</p> -<p>Powered by <a href="https://github.com/huggingface/diffusers" rel="nofollow">🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch</a>. Based on <a href="https://twitter.com/psuraj28/status/1562039265126670339" rel="nofollow">notebook by @psuraj28</a></p> -<p>Check out <a href="https://huggingface.co/spaces/stabilityai/stable-diffusion" rel="nofollow">Stable Diffusion Gradio demo</a></p></div> -<h3>LICENSE</h3> -<p>The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" rel="nofollow">CreativeML Open RAIL-M</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" rel="nofollow">read the license</a></p> -<h3>Biases and content acknowledgment</h3> -<p>Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" rel="nofollow">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" rel="nofollow">model card</a></p></article> - - <script type="module" data-sveltekit-hydrate="1kokcf0"> - import { set_public_env, start } from "/staticspaceiframe/codebox/diffuse-flood/build/_app/immutable/start-ad08af0f.js"; - - set_public_env({}); - - start({ - target: document.querySelector('[data-sveltekit-hydrate="1kokcf0"]').parentNode, - paths: {"base":"/staticspaceiframe/codebox/diffuse-flood/build","assets":"/staticspaceiframe/codebox/diffuse-flood/build"}, - route: true, - spa: false, - trailing_slash: "never", - hydrate: { - status: 200, - error: null, - node_ids: [0, 2], - params: {}, - routeId: "" - } - }); - </script> - </div> - </body> -</html> diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/alpha/idctdsp_alpha.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/alpha/idctdsp_alpha.c deleted file mode 100644 index bd43842535a94f1e1f3053d36a7328e12116b9e6..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/alpha/idctdsp_alpha.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2002 Falk Hueffner <falk@debian.org> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/attributes.h" -#include "libavcodec/idctdsp.h" -#include "idctdsp_alpha.h" -#include "asm.h" - -void put_pixels_clamped_mvi_asm(const int16_t *block, uint8_t *pixels, - ptrdiff_t line_size); -void add_pixels_clamped_mvi_asm(const int16_t *block, uint8_t *pixels, - ptrdiff_t line_size); - -void (*put_pixels_clamped_axp_p)(const int16_t *block, uint8_t *pixels, - ptrdiff_t line_size); -void (*add_pixels_clamped_axp_p)(const int16_t *block, uint8_t *pixels, - ptrdiff_t line_size); - -#if 0 -/* These functions were the base for the optimized assembler routines, - and remain here for documentation purposes. */ -static void put_pixels_clamped_mvi(const int16_t *block, uint8_t *pixels, - ptrdiff_t line_size) -{ - int i = 8; - uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */ - - do { - uint64_t shorts0, shorts1; - - shorts0 = ldq(block); - shorts0 = maxsw4(shorts0, 0); - shorts0 = minsw4(shorts0, clampmask); - stl(pkwb(shorts0), pixels); - - shorts1 = ldq(block + 4); - shorts1 = maxsw4(shorts1, 0); - shorts1 = minsw4(shorts1, clampmask); - stl(pkwb(shorts1), pixels + 4); - - pixels += line_size; - block += 8; - } while (--i); -} - -void add_pixels_clamped_mvi(const int16_t *block, uint8_t *pixels, - ptrdiff_t line_size) -{ - int h = 8; - /* Keep this function a leaf function by generating the constants - manually (mainly for the hack value ;-). */ - uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */ - uint64_t signmask = zap(-1, 0x33); - signmask ^= signmask >> 1; /* 0x8000800080008000 */ - - do { - uint64_t shorts0, pix0, signs0; - uint64_t shorts1, pix1, signs1; - - shorts0 = ldq(block); - shorts1 = ldq(block + 4); - - pix0 = unpkbw(ldl(pixels)); - /* Signed subword add (MMX paddw). */ - signs0 = shorts0 & signmask; - shorts0 &= ~signmask; - shorts0 += pix0; - shorts0 ^= signs0; - /* Clamp. */ - shorts0 = maxsw4(shorts0, 0); - shorts0 = minsw4(shorts0, clampmask); - - /* Next 4. */ - pix1 = unpkbw(ldl(pixels + 4)); - signs1 = shorts1 & signmask; - shorts1 &= ~signmask; - shorts1 += pix1; - shorts1 ^= signs1; - shorts1 = maxsw4(shorts1, 0); - shorts1 = minsw4(shorts1, clampmask); - - stl(pkwb(shorts0), pixels); - stl(pkwb(shorts1), pixels + 4); - - pixels += line_size; - block += 8; - } while (--h); -} -#endif - -av_cold void ff_idctdsp_init_alpha(IDCTDSPContext *c, AVCodecContext *avctx, - unsigned high_bit_depth) -{ - /* amask clears all bits that correspond to present features. */ - if (amask(AMASK_MVI) == 0) { - c->put_pixels_clamped = put_pixels_clamped_mvi_asm; - c->add_pixels_clamped = add_pixels_clamped_mvi_asm; - } - - put_pixels_clamped_axp_p = c->put_pixels_clamped; - add_pixels_clamped_axp_p = c->add_pixels_clamped; - - if (!high_bit_depth && !avctx->lowres && - (avctx->idct_algo == FF_IDCT_AUTO)) { - c->idct_put = ff_simple_idct_put_axp; - c->idct_add = ff_simple_idct_add_axp; - c->idct = ff_simple_idct_axp; - } -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264pred.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264pred.h deleted file mode 100644 index cb008548fc283ec8a27ed28f56cfa584a4bb2dd6..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264pred.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder - * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * H.264 / AVC / MPEG-4 prediction functions. - * @author Michael Niedermayer <michaelni@gmx.at> - */ - -#ifndef AVCODEC_H264PRED_H -#define AVCODEC_H264PRED_H - -#include <stddef.h> -#include <stdint.h> - -/** - * Prediction types - */ -//@{ -#define VERT_PRED 0 -#define HOR_PRED 1 -#define DC_PRED 2 -#define DIAG_DOWN_LEFT_PRED 3 -#define DIAG_DOWN_RIGHT_PRED 4 -#define VERT_RIGHT_PRED 5 -#define HOR_DOWN_PRED 6 -#define VERT_LEFT_PRED 7 -#define HOR_UP_PRED 8 - -// DC edge (not for VP8) -#define LEFT_DC_PRED 9 -#define TOP_DC_PRED 10 -#define DC_128_PRED 11 - -// RV40 specific -#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN 12 -#define HOR_UP_PRED_RV40_NODOWN 13 -#define VERT_LEFT_PRED_RV40_NODOWN 14 - -// VP8 specific -#define TM_VP8_PRED 9 ///< "True Motion", used instead of plane -#define VERT_VP8_PRED 10 ///< for VP8, #VERT_PRED is the average of - ///< (left col+cur col x2+right col) / 4; - ///< this is the "unaveraged" one -#define HOR_VP8_PRED 14 ///< unaveraged version of #HOR_PRED, see - ///< #VERT_VP8_PRED for details -#define DC_127_PRED 12 -#define DC_129_PRED 13 - -#define DC_PRED8x8 0 -#define HOR_PRED8x8 1 -#define VERT_PRED8x8 2 -#define PLANE_PRED8x8 3 - -// DC edge -#define LEFT_DC_PRED8x8 4 -#define TOP_DC_PRED8x8 5 -#define DC_128_PRED8x8 6 - -// H.264/SVQ3 (8x8) specific -#define ALZHEIMER_DC_L0T_PRED8x8 7 -#define ALZHEIMER_DC_0LT_PRED8x8 8 -#define ALZHEIMER_DC_L00_PRED8x8 9 -#define ALZHEIMER_DC_0L0_PRED8x8 10 - -// VP8 specific -#define DC_127_PRED8x8 7 -#define DC_129_PRED8x8 8 -//@} - -#define PART_NOT_AVAILABLE -2 - -/** - * Context for storing H.264 prediction functions - */ -typedef struct H264PredContext { - void(*pred4x4[9 + 3 + 3])(uint8_t *src, const uint8_t *topright, - ptrdiff_t stride); - void(*pred8x8l[9 + 3])(uint8_t *src, int topleft, int topright, - ptrdiff_t stride); - void(*pred8x8[4 + 3 + 4])(uint8_t *src, ptrdiff_t stride); - void(*pred16x16[4 + 3 + 2])(uint8_t *src, ptrdiff_t stride); - - void(*pred4x4_add[2])(uint8_t *pix /*align 4*/, - int16_t *block /*align 16*/, ptrdiff_t stride); - void(*pred8x8l_add[2])(uint8_t *pix /*align 8*/, - int16_t *block /*align 16*/, ptrdiff_t stride); - void(*pred8x8l_filter_add[2])(uint8_t *pix /*align 8*/, - int16_t *block /*align 16*/, int topleft, int topright, ptrdiff_t stride); - void(*pred8x8_add[3])(uint8_t *pix /*align 8*/, - const int *block_offset, - int16_t *block /*align 16*/, ptrdiff_t stride); - void(*pred16x16_add[3])(uint8_t *pix /*align 16*/, - const int *block_offset, - int16_t *block /*align 16*/, ptrdiff_t stride); -} H264PredContext; - -void ff_h264_pred_init(H264PredContext *h, int codec_id, - const int bit_depth, const int chroma_format_idc); -void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id, - const int bit_depth, - const int chroma_format_idc); -void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, - const int bit_depth, const int chroma_format_idc); -void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, - const int bit_depth, const int chroma_format_idc); -void ff_h264_pred_init_mips(H264PredContext *h, int codec_id, - const int bit_depth, const int chroma_format_idc); -void ff_h264_pred_init_loongarch(H264PredContext *h, int codec_id, - const int bit_depth, const int chroma_format_idc); - -#endif /* AVCODEC_H264PRED_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h264idct_msa.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h264idct_msa.c deleted file mode 100644 index 1a20a3e30e57c007cc7d5d1624fb82b458ceb8ac..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h264idct_msa.c +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright (c) 2015 - 2017 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/mips/generic_macros_msa.h" -#include "h264dsp_mips.h" -#include "libavcodec/bit_depth_template.c" - -#define AVC_ITRANS_H(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - tmp0_m = in0 + in2; \ - tmp1_m = in0 - in2; \ - tmp2_m = in1 >> 1; \ - tmp2_m = tmp2_m - in3; \ - tmp3_m = in3 >> 1; \ - tmp3_m = in1 + tmp3_m; \ - \ - BUTTERFLY_4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, out0, out1, out2, out3); \ -} - -static void avc_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src, - int32_t de_q_val) -{ -#define DC_DEST_STRIDE 16 - int16_t out0, out1, out2, out3, out4, out5, out6, out7; - v8i16 src1, src3; - v8i16 vec0, vec1, vec2, vec3; - v8i16 tmp0, tmp1, tmp2, tmp3; - v8i16 hres0, hres1, hres2, hres3; - v8i16 vres0, vres1, vres2, vres3; - v4i32 vres0_r, vres1_r, vres2_r, vres3_r; - const v4i32 de_q_vec = __msa_fill_w(de_q_val); - const v8i16 src0 = LD_SH(src); - const v8i16 src2 = LD_SH(src + 8); - - ILVL_D2_SH(src0, src0, src2, src2, src1, src3); - TRANSPOSE4x4_SH_SH(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3); - BUTTERFLY_4(tmp0, tmp2, tmp3, tmp1, vec0, vec3, vec2, vec1); - BUTTERFLY_4(vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1); - TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3); - BUTTERFLY_4(hres0, hres1, hres3, hres2, vec0, vec3, vec2, vec1); - BUTTERFLY_4(vec0, vec1, vec2, vec3, vres0, vres1, vres2, vres3); - UNPCK_R_SH_SW(vres0, vres0_r); - UNPCK_R_SH_SW(vres1, vres1_r); - UNPCK_R_SH_SW(vres2, vres2_r); - UNPCK_R_SH_SW(vres3, vres3_r); - - vres0_r *= de_q_vec; - vres1_r *= de_q_vec; - vres2_r *= de_q_vec; - vres3_r *= de_q_vec; - - SRARI_W4_SW(vres0_r, vres1_r, vres2_r, vres3_r, 8); - PCKEV_H2_SH(vres1_r, vres0_r, vres3_r, vres2_r, vec0, vec1); - - out0 = __msa_copy_s_h(vec0, 0); - out1 = __msa_copy_s_h(vec0, 1); - out2 = __msa_copy_s_h(vec0, 2); - out3 = __msa_copy_s_h(vec0, 3); - out4 = __msa_copy_s_h(vec0, 4); - out5 = __msa_copy_s_h(vec0, 5); - out6 = __msa_copy_s_h(vec0, 6); - out7 = __msa_copy_s_h(vec0, 7); - SH(out0, (dst + 0 * DC_DEST_STRIDE)); - SH(out1, (dst + 2 * DC_DEST_STRIDE)); - SH(out2, (dst + 8 * DC_DEST_STRIDE)); - SH(out3, (dst + 10 * DC_DEST_STRIDE)); - SH(out4, (dst + 1 * DC_DEST_STRIDE)); - SH(out5, (dst + 3 * DC_DEST_STRIDE)); - SH(out6, (dst + 9 * DC_DEST_STRIDE)); - SH(out7, (dst + 11 * DC_DEST_STRIDE)); - - out0 = __msa_copy_s_h(vec1, 0); - out1 = __msa_copy_s_h(vec1, 1); - out2 = __msa_copy_s_h(vec1, 2); - out3 = __msa_copy_s_h(vec1, 3); - out4 = __msa_copy_s_h(vec1, 4); - out5 = __msa_copy_s_h(vec1, 5); - out6 = __msa_copy_s_h(vec1, 6); - out7 = __msa_copy_s_h(vec1, 7); - SH(out0, (dst + 4 * DC_DEST_STRIDE)); - SH(out1, (dst + 6 * DC_DEST_STRIDE)); - SH(out2, (dst + 12 * DC_DEST_STRIDE)); - SH(out3, (dst + 14 * DC_DEST_STRIDE)); - SH(out4, (dst + 5 * DC_DEST_STRIDE)); - SH(out5, (dst + 7 * DC_DEST_STRIDE)); - SH(out6, (dst + 13 * DC_DEST_STRIDE)); - SH(out7, (dst + 15 * DC_DEST_STRIDE)); - -#undef DC_DEST_STRIDE -} - -static void avc_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride) -{ - v8i16 src0, src1, src2, src3, src4, src5, src6, src7; - v8i16 vec0, vec1, vec2, vec3; - v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - v8i16 res0, res1, res2, res3, res4, res5, res6, res7; - v4i32 tmp0_r, tmp1_r, tmp2_r, tmp3_r, tmp4_r, tmp5_r, tmp6_r, tmp7_r; - v4i32 tmp0_l, tmp1_l, tmp2_l, tmp3_l, tmp4_l, tmp5_l, tmp6_l, tmp7_l; - v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec0_l, vec1_l, vec2_l, vec3_l; - v4i32 res0_r, res1_r, res2_r, res3_r, res4_r, res5_r, res6_r, res7_r; - v4i32 res0_l, res1_l, res2_l, res3_l, res4_l, res5_l, res6_l, res7_l; - v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v8i16 zeros = { 0 }; - - src[0] += 32; - - LD_SH8(src, 8, src0, src1, src2, src3, src4, src5, src6, src7); - ST_SH8(zeros, zeros, zeros, zeros, zeros, zeros, zeros, zeros, src, 8); - - vec0 = src0 + src4; - vec1 = src0 - src4; - vec2 = src2 >> 1; - vec2 = vec2 - src6; - vec3 = src6 >> 1; - vec3 = src2 + vec3; - - BUTTERFLY_4(vec0, vec1, vec2, vec3, tmp0, tmp1, tmp2, tmp3); - - vec0 = src7 >> 1; - vec0 = src5 - vec0 - src3 - src7; - vec1 = src3 >> 1; - vec1 = src1 - vec1 + src7 - src3; - vec2 = src5 >> 1; - vec2 = vec2 - src1 + src7 + src5; - vec3 = src1 >> 1; - vec3 = vec3 + src3 + src5 + src1; - tmp4 = vec3 >> 2; - tmp4 += vec0; - tmp5 = vec2 >> 2; - tmp5 += vec1; - tmp6 = vec1 >> 2; - tmp6 -= vec2; - tmp7 = vec0 >> 2; - tmp7 = vec3 - tmp7; - - BUTTERFLY_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - res0, res1, res2, res3, res4, res5, res6, res7); - TRANSPOSE8x8_SH_SH(res0, res1, res2, res3, res4, res5, res6, res7, - res0, res1, res2, res3, res4, res5, res6, res7); - UNPCK_SH_SW(res0, tmp0_r, tmp0_l); - UNPCK_SH_SW(res1, tmp1_r, tmp1_l); - UNPCK_SH_SW(res2, tmp2_r, tmp2_l); - UNPCK_SH_SW(res3, tmp3_r, tmp3_l); - UNPCK_SH_SW(res4, tmp4_r, tmp4_l); - UNPCK_SH_SW(res5, tmp5_r, tmp5_l); - UNPCK_SH_SW(res6, tmp6_r, tmp6_l); - UNPCK_SH_SW(res7, tmp7_r, tmp7_l); - BUTTERFLY_4(tmp0_r, tmp0_l, tmp4_l, tmp4_r, vec0_r, vec0_l, vec1_l, vec1_r); - - vec2_r = tmp2_r >> 1; - vec2_l = tmp2_l >> 1; - vec2_r -= tmp6_r; - vec2_l -= tmp6_l; - vec3_r = tmp6_r >> 1; - vec3_l = tmp6_l >> 1; - vec3_r += tmp2_r; - vec3_l += tmp2_l; - - BUTTERFLY_4(vec0_r, vec1_r, vec2_r, vec3_r, tmp0_r, tmp2_r, tmp4_r, tmp6_r); - BUTTERFLY_4(vec0_l, vec1_l, vec2_l, vec3_l, tmp0_l, tmp2_l, tmp4_l, tmp6_l); - - vec0_r = tmp7_r >> 1; - vec0_l = tmp7_l >> 1; - vec0_r = tmp5_r - vec0_r - tmp3_r - tmp7_r; - vec0_l = tmp5_l - vec0_l - tmp3_l - tmp7_l; - vec1_r = tmp3_r >> 1; - vec1_l = tmp3_l >> 1; - vec1_r = tmp1_r - vec1_r + tmp7_r - tmp3_r; - vec1_l = tmp1_l - vec1_l + tmp7_l - tmp3_l; - vec2_r = tmp5_r >> 1; - vec2_l = tmp5_l >> 1; - vec2_r = vec2_r - tmp1_r + tmp7_r + tmp5_r; - vec2_l = vec2_l - tmp1_l + tmp7_l + tmp5_l; - vec3_r = tmp1_r >> 1; - vec3_l = tmp1_l >> 1; - vec3_r = vec3_r + tmp3_r + tmp5_r + tmp1_r; - vec3_l = vec3_l + tmp3_l + tmp5_l + tmp1_l; - tmp1_r = vec3_r >> 2; - tmp1_l = vec3_l >> 2; - tmp1_r += vec0_r; - tmp1_l += vec0_l; - tmp3_r = vec2_r >> 2; - tmp3_l = vec2_l >> 2; - tmp3_r += vec1_r; - tmp3_l += vec1_l; - tmp5_r = vec1_r >> 2; - tmp5_l = vec1_l >> 2; - tmp5_r -= vec2_r; - tmp5_l -= vec2_l; - tmp7_r = vec0_r >> 2; - tmp7_l = vec0_l >> 2; - tmp7_r = vec3_r - tmp7_r; - tmp7_l = vec3_l - tmp7_l; - - BUTTERFLY_4(tmp0_r, tmp0_l, tmp7_l, tmp7_r, res0_r, res0_l, res7_l, res7_r); - BUTTERFLY_4(tmp2_r, tmp2_l, tmp5_l, tmp5_r, res1_r, res1_l, res6_l, res6_r); - BUTTERFLY_4(tmp4_r, tmp4_l, tmp3_l, tmp3_r, res2_r, res2_l, res5_l, res5_r); - BUTTERFLY_4(tmp6_r, tmp6_l, tmp1_l, tmp1_r, res3_r, res3_l, res4_l, res4_r); - SRA_4V(res0_r, res0_l, res1_r, res1_l, 6); - SRA_4V(res2_r, res2_l, res3_r, res3_l, 6); - SRA_4V(res4_r, res4_l, res5_r, res5_l, 6); - SRA_4V(res6_r, res6_l, res7_r, res7_l, 6); - PCKEV_H4_SH(res0_l, res0_r, res1_l, res1_r, res2_l, res2_r, res3_l, res3_r, - res0, res1, res2, res3); - PCKEV_H4_SH(res4_l, res4_r, res5_l, res5_r, res6_l, res6_r, res7_l, res7_r, - res4, res5, res6, res7); - LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); - ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3, - tmp0, tmp1, tmp2, tmp3); - ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7, - tmp4, tmp5, tmp6, tmp7); - ADD4(res0, tmp0, res1, tmp1, res2, tmp2, res3, tmp3, - res0, res1, res2, res3); - ADD4(res4, tmp4, res5, tmp5, res6, tmp6, res7, tmp7, - res4, res5, res6, res7); - CLIP_SH8_0_255(res0, res1, res2, res3, res4, res5, res6, res7); - PCKEV_B4_SB(res1, res0, res3, res2, res5, res4, res7, res6, - dst0, dst1, dst2, dst3); - ST_D8(dst0, dst1, dst2, dst3, 0, 1, 0, 1, 0, 1, 0, 1, dst, dst_stride) -} - -static void avc_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src, - int32_t dst_stride) -{ - int32_t dc_val; - v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v8i16 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r; - v8i16 dc; - v16i8 zeros = { 0 }; - - dc_val = (src[0] + 32) >> 6; - dc = __msa_fill_h(dc_val); - - src[0] = 0; - - LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); - ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3, - dst0_r, dst1_r, dst2_r, dst3_r); - ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7, - dst4_r, dst5_r, dst6_r, dst7_r); - ADD4(dst0_r, dc, dst1_r, dc, dst2_r, dc, dst3_r, dc, - dst0_r, dst1_r, dst2_r, dst3_r); - ADD4(dst4_r, dc, dst5_r, dc, dst6_r, dc, dst7_r, dc, - dst4_r, dst5_r, dst6_r, dst7_r); - CLIP_SH8_0_255(dst0_r, dst1_r, dst2_r, dst3_r, - dst4_r, dst5_r, dst6_r, dst7_r); - PCKEV_B4_SB(dst1_r, dst0_r, dst3_r, dst2_r, dst5_r, dst4_r, dst7_r, dst6_r, - dst0, dst1, dst2, dst3); - ST_D8(dst0, dst1, dst2, dst3, 0, 1, 0, 1, 0, 1, 0, 1, dst, dst_stride) -} - -void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src, int32_t dst_stride) -{ - uint32_t src0_m, src1_m, src2_m, src3_m, out0_m, out1_m, out2_m, out3_m; - v16i8 dst0_m = { 0 }; - v16i8 dst1_m = { 0 }; - v8i16 hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3; - v8i16 inp0_m, inp1_m, res0_m, res1_m, src1, src3; - const v8i16 src0 = LD_SH(src); - const v8i16 src2 = LD_SH(src + 8); - const v8i16 zero = { 0 }; - const uint8_t *dst1 = dst + dst_stride; - const uint8_t *dst2 = dst + 2 * dst_stride; - const uint8_t *dst3 = dst + 3 * dst_stride; - - ILVL_D2_SH(src0, src0, src2, src2, src1, src3); - ST_SH2(zero, zero, src, 8); - AVC_ITRANS_H(src0, src1, src2, src3, hres0, hres1, hres2, hres3); - TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3); - AVC_ITRANS_H(hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3); - src0_m = LW(dst); - src1_m = LW(dst1); - SRARI_H4_SH(vres0, vres1, vres2, vres3, 6); - src2_m = LW(dst2); - src3_m = LW(dst3); - ILVR_D2_SH(vres1, vres0, vres3, vres2, inp0_m, inp1_m); - INSERT_W2_SB(src0_m, src1_m, dst0_m); - INSERT_W2_SB(src2_m, src3_m, dst1_m); - ILVR_B2_SH(zero, dst0_m, zero, dst1_m, res0_m, res1_m); - ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); - CLIP_SH2_0_255(res0_m, res1_m); - PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); - out0_m = __msa_copy_u_w((v4i32) dst0_m, 0); - out1_m = __msa_copy_u_w((v4i32) dst0_m, 1); - out2_m = __msa_copy_u_w((v4i32) dst1_m, 0); - out3_m = __msa_copy_u_w((v4i32) dst1_m, 1); - SW(out0_m, dst); - SW(out1_m, dst1); - SW(out2_m, dst2); - SW(out3_m, dst3); -} - -void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src, - int32_t dst_stride) -{ - avc_idct8_addblk_msa(dst, src, dst_stride); -} - -void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src, - int32_t dst_stride) -{ - v16u8 pred = { 0 }; - v16i8 out; - v8i16 pred_r, pred_l; - const uint32_t src0 = LW(dst); - const uint32_t src1 = LW(dst + dst_stride); - const uint32_t src2 = LW(dst + 2 * dst_stride); - const uint32_t src3 = LW(dst + 3 * dst_stride); - const int16_t dc = (src[0] + 32) >> 6; - const v8i16 input_dc = __msa_fill_h(dc); - - src[0] = 0; - INSERT_W4_UB(src0, src1, src2, src3, pred); - UNPCK_UB_SH(pred, pred_r, pred_l); - ADD2(pred_r, input_dc, pred_l, input_dc, pred_r, pred_l); - CLIP_SH2_0_255(pred_r, pred_l); - out = __msa_pckev_b((v16i8) pred_l, (v16i8) pred_r); - ST_W4(out, 0, 1, 2, 3, dst, dst_stride); -} - -void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src, - int32_t dst_stride) -{ - avc_idct8_dc_addblk_msa(dst, src, dst_stride); -} - -void ff_h264_idct_add16_msa(uint8_t *dst, - const int32_t *blk_offset, - int16_t *block, int32_t dst_stride, - const uint8_t nzc[5 * 8]) -{ - int32_t i; - - for (i = 0; i < 16; i++) { - int32_t nnz = nzc[scan8[i]]; - - if (nnz) { - if (nnz == 1 && ((dctcoef *) block)[i * 16]) - ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - else - ff_h264_idct_add_msa(dst + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - } - } -} - -void ff_h264_idct8_add4_msa(uint8_t *dst, const int32_t *blk_offset, - int16_t *block, int32_t dst_stride, - const uint8_t nzc[5 * 8]) -{ - int32_t cnt; - - for (cnt = 0; cnt < 16; cnt += 4) { - int32_t nnz = nzc[scan8[cnt]]; - - if (nnz) { - if (nnz == 1 && ((dctcoef *) block)[cnt * 16]) - ff_h264_idct8_dc_addblk_msa(dst + blk_offset[cnt], - block + cnt * 16 * sizeof(pixel), - dst_stride); - else - ff_h264_idct8_addblk_msa(dst + blk_offset[cnt], - block + cnt * 16 * sizeof(pixel), - dst_stride); - } - } -} - -void ff_h264_idct_add8_msa(uint8_t **dst, - const int32_t *blk_offset, - int16_t *block, int32_t dst_stride, - const uint8_t nzc[15 * 8]) -{ - int32_t i, j; - - for (j = 1; j < 3; j++) { - for (i = (j * 16); i < (j * 16 + 4); i++) { - if (nzc[scan8[i]]) - ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - else if (((dctcoef *) block)[i * 16]) - ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - } - } -} - -void ff_h264_idct_add8_422_msa(uint8_t **dst, - const int32_t *blk_offset, - int16_t *block, int32_t dst_stride, - const uint8_t nzc[15 * 8]) -{ - int32_t i, j; - - for (j = 1; j < 3; j++) { - for (i = (j * 16); i < (j * 16 + 4); i++) { - if (nzc[scan8[i]]) - ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - else if (((dctcoef *) block)[i * 16]) - ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - } - } - - for (j = 1; j < 3; j++) { - for (i = (j * 16 + 4); i < (j * 16 + 8); i++) { - if (nzc[scan8[i + 4]]) - ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i + 4], - block + i * 16 * sizeof(pixel), - dst_stride); - else if (((dctcoef *) block)[i * 16]) - ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i + 4], - block + i * 16 * sizeof(pixel), - dst_stride); - } - } -} - -void ff_h264_idct_add16_intra_msa(uint8_t *dst, - const int32_t *blk_offset, - int16_t *block, - int32_t dst_stride, - const uint8_t nzc[5 * 8]) -{ - int32_t i; - - for (i = 0; i < 16; i++) { - if (nzc[scan8[i]]) - ff_h264_idct_add_msa(dst + blk_offset[i], - block + i * 16 * sizeof(pixel), dst_stride); - else if (((dctcoef *) block)[i * 16]) - ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i], - block + i * 16 * sizeof(pixel), - dst_stride); - } -} - -void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src, - int32_t de_qval) -{ - avc_deq_idct_luma_dc_msa(dst, src, de_qval); -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/All Cars Unlocked in 3D Driving Class APK Mod for Real Driving Simulation.md b/spaces/congsaPfin/Manga-OCR/logs/All Cars Unlocked in 3D Driving Class APK Mod for Real Driving Simulation.md deleted file mode 100644 index d25cd477794697c69f52a329d81aca26131f142c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/All Cars Unlocked in 3D Driving Class APK Mod for Real Driving Simulation.md +++ /dev/null @@ -1,95 +0,0 @@ - -<h1>3D Driving Class Mod Apk: A Realistic Driving Simulator with All Cars Unlocked</h1> - <h2>Introduction</h2> - <p>Do you love driving games? Do you want to learn how to drive in different situations and environments? Do you wish you could drive any car you want without spending a dime? If you answered yes to any of these questions, then you should try <strong>3D Driving Class Mod Apk</strong>, a realistic driving simulator game for Android devices.</p> - <h3>What is 3D Driving Class Mod Apk?</h3> - <p>3D Driving Class Mod Apk is a modified version of the original 3D Driving Class game, which is a popular driving simulator game developed by John Games. In this game, you can learn how to drive in various scenarios, such as city, highway, parking, off-road, and more. You can also take driving tests and get licenses for different vehicles, such as cars, buses, trucks, motorcycles, and even tanks.</p> -<h2>3d driving class all cars unlocked apk</h2><br /><p><b><b>Download</b> ✒ ✒ ✒ <a href="https://urlca.com/2uOe39">https://urlca.com/2uOe39</a></b></p><br /><br /> - <h3>Why download 3D Driving Class Mod Apk?</h3> - <p>The main reason to download 3D Driving Class Mod Apk is that it gives you access to all the features and content that are locked or limited in the original game. For example, you can unlock all the cars in the game, which include sports cars, supercars, luxury cars, and more. You can also get unlimited money and gems, which you can use to buy new cars, upgrade them, or customize them. Moreover, you can enjoy the game without any ads or interruptions.</p> - <h2>Features of 3D Driving Class Mod Apk</h2> - <p>Here are some of the amazing features that you can enjoy when you download 3D Driving Class Mod Apk:</p> - <h3>All cars unlocked</h3> - <p>One of the best features of 3D Driving Class Mod Apk is that it unlocks all the cars in the game. You can choose from over 100 cars, each with its own characteristics and performance. You can drive any car you want, whether it's a sedan, a hatchback, a coupe, a convertible, or a SUV. You can also drive exotic cars, such as Lamborghini, Ferrari, Bugatti, Porsche, and more.</p> - <h3>Unlimited money and gems</h3> - <p>Another great feature of 3D Driving Class Mod Apk is that it gives you unlimited money and gems. Money and gems are the main currencies in the game, which you can use to buy new cars, upgrade them, or customize them. You can also use them to unlock new modes and scenarios in the game. With unlimited money and gems, you don't have to worry about running out of resources or spending real money on the game.</p> - <h3>Various modes and scenarios</h3> - <p>3D Driving Class Mod Apk also offers various modes and scenarios for you to enjoy. You can choose from different driving modes, such as free driving, mission mode, license mode, or multiplayer mode. You can also choose from different driving scenarios, such as city, highway, parking lot, off-road, snowfield, desert, and more. Each mode and scenario has its own challenges and objectives for you to complete.</p> - <h3>Realistic graphics and physics</h3> - <p>The graphics and physics of 3D Driving Class Mod Apk are also very realistic and impressive. The game uses high-quality 3D graphics that make the cars and environments look stunning and detailed. The game also uses realistic physics that make the driving experience more authentic and fun and challenging. You can feel the difference between driving a sports car and a truck, or driving on a smooth road and a bumpy terrain. You can also adjust the weather, time, and traffic conditions to suit your preferences.</p> - <h3>Easy controls and customization</h3> - <p>The controls and customization of 3D Driving Class Mod Apk are also very easy and user-friendly. You can choose from different control options, such as steering wheel, buttons, tilt, or joystick. You can also customize the camera angle, the sound effects, the language, and the graphics quality. You can also change the color, the wheels, the spoiler, and the stickers of your car.</p> - <h2>How to download and install 3D Driving Class Mod Apk?</h2> - <p>If you are interested in downloading and installing 3D Driving Class Mod Apk, you can follow these simple steps:</p> -<p>3d driving class mod apk unlimited money and cars<br /> -3d driving class hack apk download all vehicles unlocked<br /> -3d driving class simulator apk free download with all cars<br /> -3d driving class latest version mod apk unlock everything<br /> -3d driving class premium apk full unlocked all features<br /> -3d driving class cheats apk unlimited coins and cars<br /> -3d driving class game apk download modded with all vehicles<br /> -3d driving class cracked apk no ads and all cars unlocked<br /> -3d driving class pro apk full version with all cars<br /> -3d driving class android apk mod unlock all vehicles<br /> -3d driving class offline apk unlimited money and cars<br /> -3d driving class updated apk download with all cars unlocked<br /> -3d driving class realistic simulator mod apk all vehicles<br /> -3d driving class mega mod apk free download unlock everything<br /> -3d driving class hack version apk unlimited coins and cars<br /> -3d driving class original apk download with all cars unlocked<br /> -3d driving class best simulator mod apk unlock all features<br /> -3d driving class vip mod apk no root with all cars<br /> -3d driving class new version apk mod unlimited money and vehicles<br /> -3d driving class online apk free download with all cars unlocked<br /> -3d driving class ultimate simulator mod apk all vehicles<br /> -3d driving class super mod apk no ads and unlock everything<br /> -3d driving class full unlocked apk unlimited coins and cars<br /> -3d driving class old version apk download with all vehicles unlocked<br /> -3d driving class amazing simulator mod apk unlock all features<br /> -3d driving class gold mod apk no verification with all cars<br /> -3d driving class latest mod apk unlimited money and vehicles<br /> -3d driving class extreme mod apk free download unlock everything<br /> -3d driving class unlimited cars apk hack version download<br /> -3d driving class modded apk with all vehicles unlocked free<br /> -3d driving class realistic mod apk unlock all features and cars<br /> -3d driving class premium mod apk no ads and unlimited money<br /> -3d driving class full version apk download with all cars unlocked<br /> -3d driving class awesome simulator mod apk all vehicles<br /> -3d driving class plus mod apk no survey with all cars<br /> -3d driving class new update apk mod unlimited money and vehicles<br /> -3d driving class extreme simulator mod apk unlock everything<br /> -3d driving class super hack apk no ads and unlimited coins<br /> -3d driving class full hack apk download with all cars unlocked<br /> -3d driving class amazing mod apk with all vehicles free download</p> - <h3>Step 1: Download the apk file from the link below</h3> - <p>The first step is to download the apk file of 3D Driving Class Mod Apk from the link provided below. The file size is about 100 MB, so make sure you have enough space on your device. The link is safe and secure, so you don't have to worry about any viruses or malware.</p> - <h3>Step 2: Enable unknown sources on your device</h3> - <p>The second step is to enable unknown sources on your device. This is necessary because the apk file is not from the Google Play Store, so you need to allow your device to install apps from other sources. To do this, go to your device settings, then security, then unknown sources, and turn it on.</p> - <h3>Step 3: Install the apk file and enjoy the game</h3> - <p>The third and final step is to install the apk file and enjoy the game. To do this, locate the apk file on your device, tap on it, and follow the instructions on the screen. Once the installation is complete, you can open the game and start driving.</p> - <h2>Conclusion</h2> - <p>3D Driving Class Mod Apk is a realistic driving simulator game that lets you drive any car you want in various modes and scenarios. You can also enjoy unlimited money and gems, all cars unlocked, realistic graphics and physics, easy controls and customization, and more. If you are looking for a fun and educational driving game for your Android device, you should definitely download 3D Driving Class Mod Apk from the link below.</p> - <h2>FAQs</h2> - <p>Here are some of the frequently asked questions about 3D Driving Class Mod Apk:</p> - <ul> -<li><strong>Is 3D Driving Class Mod Apk safe to download and install?</strong></li> -<p>Yes, 3D Driving Class Mod Apk is safe to download and install. The apk file is scanned for viruses and malware before uploading it to the link below. However, you should always download apps from trusted sources and at your own risk.</p> -<li><strong>Do I need to root my device to use 3D Driving Class Mod Apk?</strong></li> -<p>No, you don't need to root your device to use 3D Driving Class Mod Apk. The mod apk works fine on both rooted and non-rooted devices.</p> -<li><strong>Can I play 3D Driving Class Mod Apk online with other players?</strong></li> -<p>Yes, you can play 3D Driving Class Mod Apk online with other players. The game has a multiplayer mode where you can join or create rooms with other drivers. You can also chat with them and compete with them in different modes.</p> -<li><strong>Can I update 3D Driving Class Mod Apk to the latest version?</strong></li> -<p>No, you cannot update 3D Driving Class Mod Apk to the latest version. The mod apk is based on an older version of the original game, so if you update it, you will lose all the mod features and content. If you want to update the game, you have to uninstall the mod apk and install the original game from the Google Play Store.</p> -<li><strong>What if I encounter any problems or errors while using 3D Driving Class Mod Apk?</strong></li> -<p>If you encounter any problems or errors while using 3D Driving Class Mod Apk, you can try these solutions:</p> -<ul> -<li>Make sure you have enough storage space on your device.</li> -<li>Make sure you have a stable internet connection.</li> -<li>Make sure you have enabled unknown sources on your device.</li> -<li>Make sure you have downloaded and installed the apk file correctly.</li> -<li>Restart your device and try again.</li> -</ul> -</ul> - <p></</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator for PC Windows 10 and Experience Real Physics.md b/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator for PC Windows 10 and Experience Real Physics.md deleted file mode 100644 index f045569048ac9cea45ba6eb341f7fb41c4de4d2b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator for PC Windows 10 and Experience Real Physics.md +++ /dev/null @@ -1,116 +0,0 @@ -<br /> -<h1>Extreme Car Driving Simulator: How to Download and Play on PC</h1> -<h2>Introduction</h2> -<p>Do you love driving fast cars and performing stunts in a virtual city? Do you want to experience the thrill of racing and drifting without breaking any laws or risking your safety? If you answered yes, then you should try <strong>Extreme Car Driving Simulator</strong>, one of the most popular and realistic car simulator games for Android devices.</p> -<h2>extreme car driving simulator download for pc windows 10</h2><br /><p><b><b>Download</b> ✶✶✶ <a href="https://urlca.com/2uO4Yb">https://urlca.com/2uO4Yb</a></b></p><br /><br /> -<p>In this article, we will show you how to download and play Extreme Car Driving Simulator on your PC using BlueStacks, the best Android emulator for gaming. We will also tell you why playing this game on PC is better than playing it on your mobile device, and what features and enhancements you can enjoy with BlueStacks. Let's get started!</p> -<h2>What is Extreme Car Driving Simulator?</h2> -<p>Extreme Car Driving Simulator is a casual racing game developed by AxesInMotion Racing. It was released in 2014 and has over 100 million downloads on Google Play Store. It is one of the most realistic car simulator games ever made, thanks to its advanced real physics engine.</p> -<p>In this game, you can drive, drift, and feel a racing sports car in an open world environment. You can be a furious racer on a whole city for you, or just enjoy a relaxing drive in the sandbox mode. You can also choose from different game modes, such as checkpoint, traffic, free roam, or ghost mode. You can also customize your car with different colors, wheels, spoilers, and more.</p> -<h2>Why play Extreme Car Driving Simulator on PC?</h2> -<p>Playing Extreme Car Driving Simulator on your mobile device is fun, but playing it on your PC is even better. Here are some reasons why:</p> -<ul> -<li>You can enjoy a larger screen and better graphics. No more squinting or missing details. You can see every curve, bump, and scratch on your car and the city.</li> -<li>You can have more control over your car with your keyboard and mouse. You can steer, accelerate, brake, and drift with more precision and ease. You can also use the full HUD display that shows you all the information you need, such as speedometer, odometer, gear indicator, etc.</li> -<li>You can play with less lag and smoother performance. No more crashes, freezes, or glitches. You can run the game at high settings without compromising your PC's resources.</li> -<li>You can access more features and enhancements with BlueStacks. You can use macros to automate tasks, multi-instance to play multiple games at once, script to execute commands, and more. You can also record your gameplay, take screenshots, chat with other players, and stream your game online.</li> -</ul> -<h2>How to download and play Extreme Car Driving Simulator on PC</h2> -<p>Downloading and playing Extreme Car Driving Simulator on PC is easy with BlueStacks. Just follow these simple steps:</p> -<h3>Step 1: Download and install BlueStacks on your PC</h3> -<p>BlueStacks is the best Android emulator for gaming. It lets you play any Android game or app on your PC with ease. To download BlueStacks, go to <a href="(^1^)">this link</a> and click on the "Download Extreme Car Driving Simulator on PC" button. This will start the download of the BlueStacks installer file. Once the download is complete, run the installer and follow the instructions to install BlueStacks on your PC.</p> -<h3>Step 2: Complete Google sign-in to access the Play Store</h3> -<p>After installing BlueStacks, launch it and complete the Google sign-in process. This will give you access to the Google Play Store, where you can find and download any Android game or app you want. If you already have a Google account, you can use it to sign in. If not, you can create one for free.</p> -<h3>Step 3: Look for Extreme Car Driving Simulator in the search bar</h3> -<p>Once you are on the home screen of BlueStacks, look for the search bar on the top right corner. Type "Extreme Car Driving Simulator" and hit enter. This will show you the game's page on the Play Store.</p> -<p>extreme car driving simulator pc game free download<br /> -extreme car driving simulator for windows 10 laptop<br /> -extreme car driving simulator online play on pc<br /> -extreme car driving simulator bluestacks download<br /> -extreme car driving simulator microsoft store app<br /> -extreme car driving simulator pc version full<br /> -extreme car driving simulator windows 10 apk<br /> -extreme car driving simulator pc gameplay<br /> -extreme car driving simulator for windows 10 offline<br /> -extreme car driving simulator pc requirements<br /> -extreme car driving simulator download for pc softonic<br /> -extreme car driving simulator for windows 10 64 bit<br /> -extreme car driving simulator pc controls<br /> -extreme car driving simulator windows 10 cheats<br /> -extreme car driving simulator pc mod apk<br /> -extreme car driving simulator download for pc windows 7<br /> -extreme car driving simulator for windows 10 review<br /> -extreme car driving simulator pc graphics settings<br /> -extreme car driving simulator windows 10 update<br /> -extreme car driving simulator pc hack<br /> -extreme car driving simulator download for pc windows 8<br /> -extreme car driving simulator for windows 10 free coins<br /> -extreme car driving simulator pc steering wheel support<br /> -extreme car driving simulator windows 10 tips and tricks<br /> -extreme car driving simulator pc multiplayer<br /> -extreme car driving simulator download for pc nox player<br /> -extreme car driving simulator for windows 10 latest version<br /> -extreme car driving simulator pc best cars<br /> -extreme car driving simulator windows 10 bug fixes<br /> -extreme car driving simulator pc keyboard shortcuts<br /> -extreme car driving simulator download for pc memu play<br /> -extreme car driving simulator for windows 10 rating<br /> -extreme car driving simulator pc system requirements<br /> -extreme car driving simulator windows 10 new features<br /> -extreme car driving simulator pc how to install<br /> -extreme car driving simulator download for pc ldplayer<br /> -extreme car driving simulator for windows 10 size<br /> -extreme car driving simulator pc guide and walkthrough<br /> -extreme car driving simulator windows 10 performance issues<br /> -extreme car driving simulator pc how to play with friends<br /> -extreme car driving simulator download for pc gameloop<br /> -extreme car driving simulator for windows 10 pros and cons<br /> -extreme car driving simulator pc comparison with android version<br /> -extreme car driving simulator windows 10 feedback and suggestions<br /> -extreme car driving simulator pc how to unlock all cars<br /> -extreme car driving simulator download for pc remix os player<br /> -extreme car driving simulator for windows 10 alternatives and similar apps <br /> -extreme car driving simulator pc troubleshooting and support <br /> -extreme car driving simulator windows 10 frequently asked questions</p> -<h3>Step 4: Click to install Extreme Car Driving Simulator from the search results</h3> -<p>On the game's page, click on the "Install" button. This will start the download and installation of Extreme Car Driving Simulator on your PC. You can monitor the progress on the bottom right corner of BlueStacks.</p> -<h3>Step 5: Click the Extreme Car Driving Simulator icon on the home screen to start playing</h3> -<p>Once the installation is complete, you will see the Extreme Car Driving Simulator icon on the home screen of BlueStacks. Click on it to launch the game and start playing. You can also find it in the "My Games" tab on BlueStacks.</p> -<h2>Game features and enhancements of Extreme Car Driving Simulator on PC</h2> -<p>Playing Extreme Car Driving Simulator on PC with BlueStacks will give you a lot of advantages over playing it on your mobile device. Here are some of them:</p> -<h3>Realistic physics and graphics</h3> -<p>Extreme Car Driving Simulator boasts of having one of the most realistic physics engines ever created for a car simulator game. You can feel every bump, skid, crash, and burn as you drive your car in a virtual city. You can also enjoy stunning graphics that make the game look more lifelike and immersive. You can adjust the graphics settings to suit your PC's capabilities and preferences.</p> -<h3>Open world sandbox mode</h3> -<p>One of the best features of Extreme Car Driving Simulator is its open world sandbox mode. In this mode, you can explore a huge city with no limits or rules. You can drive anywhere you want, do anything you want, and have fun as much as you want. You can also find hidden secrets and surprises in the city, such as ramps, loops, bridges, tunnels, etc.</p> -<h3>Multiple game modes and vehicles</h3> -<p>If you want more challenge and variety in your gameplay, you can try out different game modes in Extreme Car Driving Simulator. You can race against time in checkpoint mode, dodge traffic in traffic mode, chase ghosts in ghost mode, or create your own challenges in free roam mode. You can also choose from different vehicles to drive, such as sports cars, muscle cars, off-road vehicles, police cars, etc.</p> -<h3>Macros, Multi Instance, and Script features of BlueStacks</h3> -<p>With BlueStacks, you can also access some amazing features that will enhance your gaming experience even more. For example, you can use macros to automate repetitive tasks or create custom combos. You can use multi-instance to play multiple games or accounts at once. You can use script to execute commands or scripts with a single keystroke. These features will help you save time, improve your skills, and have more fun.</p> -<h2>Conclusion</h2> -<p>Extreme Car Driving Simulator is a great game for anyone who loves driving fast cars and performing stunts in a virtual city. It is one of the most realistic car simulator games ever made, thanks to its advanced real physics engine. It also offers a lot of features and options that make it more enjoyable and diverse.</p> -<p>If you want to play this game on your PC, you can do so easily with BlueStacks. BlueStacks is the best Android emulator for gaming that lets you play any Android game or app on your PC with ease. It also gives you a lot of advantages over playing it on your mobile device, such as a larger screen, better graphics, more control, less lag, and more features.</p> -<p>To download and play Extreme Car Driving Simulator on PC with BlueStacks, just follow these simple steps:</p> -<ol> -<li>Download and install BlueStacks on your PC</li> -<li>Complete Google sign-in to access the Play Store</li> -<li>Look for Extreme Car Driving Simulator in the search bar</li> -<li>Click to install Extreme Car Driving Simulator from the search results</li> -<li>Click the Extreme Car Driving Simulator icon on the home screen to start playing</li> -</ol> -<p>That's it! You are now ready to enjoy Extreme Car Driving Simulator on your PC with BlueStacks. Have fun and drive safely!</p> -<h2>FAQs</h2> -<p>Here are some frequently asked questions about Extreme Car Driving Simulator and BlueStacks:</p> -<ul> -<li><strong>Is Extreme Car Driving Simulator free to play?</strong></li> -<li>Yes, Extreme Car Driving Simulator is free to play. However, it contains ads and in-app purchases that can enhance your gameplay or remove ads.</li> -<li><strong>Can I play Extreme Car Driving Simulator offline?</strong></li> -<li>Yes, you can play Extreme Car Driving Simulator offline. However, some features may not be available, such as leaderboards, achievements, or online multiplayer.</li> -<li><strong>Is BlueStacks safe to use?</strong></li> -<li>Yes, BlueStacks is safe to use. It is a legitimate and trusted Android emulator that has been downloaded by millions of users worldwide. It does not contain any malware, viruses, or spyware. It also respects your privacy and does not collect any personal data without your consent.</li> -<li><strong>How much space does BlueStacks require on my PC?</strong></li> -<li>BlueStacks requires about 5 GB of space on your PC. However, this may vary depending on the games and apps you install on BlueStacks.</li> -<li><strong>Can I play Extreme Car Driving Simulator with a controller or a steering wheel?</strong></li> -<li>Yes, you can play Extreme Car Driving Simulator with a controller or a steering wheel. You can connect your device to your PC via USB or Bluetooth and map the buttons or pedals to the game controls using the BlueStacks Gamepad Controls feature.</li> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Jetta 5 Install APK The Ultimate Guide to Playing GTA 5 Mobile on PC and Mac.md b/spaces/congsaPfin/Manga-OCR/logs/Jetta 5 Install APK The Ultimate Guide to Playing GTA 5 Mobile on PC and Mac.md deleted file mode 100644 index 457ee062d0decc425235f41afa8cd2b908b9e3df..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Jetta 5 Install APK The Ultimate Guide to Playing GTA 5 Mobile on PC and Mac.md +++ /dev/null @@ -1,98 +0,0 @@ -<br /> -<h1>How to Install GTA 5 Mobile APK on Your Jetta 5</h1> -<p>If you are a fan of Grand Theft Auto V, you might have heard of GTA 5 Mobile APK, a mobile version of the popular game that lets you play it on your Android or iOS device. But did you know that you can also play GTA 5 Mobile APK on your Jetta 5 screen? Yes, you read that right. You can enjoy the thrilling world of Los Santos and its criminal underworld on your car's display, thanks to Android Auto or CarPlay. In this article, we will show you how to install GTA 5 Mobile APK on your Jetta 5 and give you some tips and tricks for playing it.</p> -<h2>jetta 5 install apk</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://urlca.com/2uOeIl">https://urlca.com/2uOeIl</a></b></p><br /><br /> - <h2>Requirements for Installing GTA 5 Mobile APK on Your Jetta 5</h2> -<p>Before you can install GTA 5 Mobile APK on your Jetta 5, you need to have the following requirements:</p> -<ul> -<li>A Jetta 5 with Android Auto or CarPlay installed. You can check if your car supports these features by visiting <a href="(^1^)">https://www.android.com/auto/</a> or <a href="(^2^)">https://www.apple.com/ios/carplay/</a>.</li> -<li>A compatible Android or iOS device with GTA 5 Mobile APK downloaded. You can download the game from <a href="(^3^)">https://www.bluestacks.com/apps/action/gta-5-mobile-grand-theft-auto-on-pc.html</a> or <a href="(^4^)">https://www.bluestacks.com/apps/adventure/gta-5-grand-theft-auto-on-pc.html</a>. Make sure you have enough storage space and battery life on your device.</li> -<li>A USB cable to connect your device to your Jetta 5. You can use any standard USB cable that fits your device's port.</li> -</ul> - <h2>Steps for Installing GTA 5 Mobile APK on Your Jetta 5</h2> -<p>Once you have all the requirements ready, you can follow these steps to install GTA 5 Mobile APK on your Jetta 5:</p> -<h3>Step 1: Connect your device to your Jetta 5 via USB cable</h3> -<p>Plug one end of the USB cable into your device's port and the other end into the USB port of your Jetta 5. Make sure the connection is secure and stable.</p> -<h3>Step 2: Launch Android Auto or CarPlay on your Jetta 5 screen</h3> -<p>Depending on whether you have an Android or iOS device, you will see either Android Auto or CarPlay icon on your Jetta 5 screen. Tap on the icon to launch the feature and follow the instructions to set it up if it's your first time using it.</p> -<p>jetta 5 install apk bluestacks<br /> -jetta 5 install apk android<br /> -jetta 5 install apk pc<br /> -jetta 5 install apk mac<br /> -jetta 5 install apk windows<br /> -jetta 5 install apk download<br /> -jetta 5 install apk free<br /> -jetta 5 install apk offline<br /> -jetta 5 install apk online<br /> -jetta 5 install apk latest version<br /> -jetta 5 install apk mod<br /> -jetta 5 install apk hack<br /> -jetta 5 install apk cheats<br /> -jetta 5 install apk unlimited money<br /> -jetta 5 install apk obb<br /> -jetta 5 install apk data<br /> -jetta 5 install apk full<br /> -jetta 5 install apk cracked<br /> -jetta 5 install apk premium<br /> -jetta 5 install apk pro<br /> -jetta 5 install apk emulator<br /> -jetta 5 install apk nox<br /> -jetta 5 install apk memu<br /> -jetta 5 install apk ldplayer<br /> -jetta 5 install apk gameloop<br /> -jetta 5 install apk guide<br /> -jetta 5 install apk tutorial<br /> -jetta 5 install apk tips<br /> -jetta 5 install apk tricks<br /> -jetta 5 install apk review<br /> -jetta 5 install apk gameplay<br /> -jetta 5 install apk video<br /> -jetta 5 install apk youtube<br /> -jetta 5 install apk reddit<br /> -jetta 5 install apk forum<br /> -jetta 5 install apk blog<br /> -jetta 5 install apk website<br /> -jetta 5 install apk link<br /> -jetta 5 install apk file<br /> -jetta 5 install apk mirror<br /> -jetta 5 install apk update<br /> -jetta 5 install apk new features<br /> -jetta 5 install apk bug fixes<br /> -jetta 5 install apk performance improvements<br /> -jetta 5 install apk security enhancements<br /> -jetta 5 install apk compatibility issues<br /> -jetta 5 install apk system requirements<br /> -jetta 5 install apk customer support<br /> -jetta 5 install apk feedbacks</p> -<h3>Step 3: Open GTA 5 Mobile APK on your device and tap on the play button</h3> -<p>On your device , open GTA 5 Mobile APK and tap on the play button. You will see a loading screen and then the game will start. You can choose to play the story mode or the online mode, depending on your preference.</p> -<h3>Step 4: Enjoy playing GTA 5 Mobile APK on your Jetta 5 screen</h3> -<p>You can now enjoy playing GTA 5 Mobile APK on your Jetta 5 screen. You will see the game graphics and sounds on your car's display, while you can control the game using your device's touchscreen or buttons. You can also use the steering wheel controls or voice commands to navigate the game menus and options.</p> - <h2>Tips and Tricks for Playing GTA 5 Mobile APK on Your Jetta 5</h2> -<p>Playing GTA 5 Mobile APK on your Jetta 5 can be a lot of fun, but it can also be challenging and risky. Here are some tips and tricks to help you have a better and safer gaming experience:</p> -<ul> -<li>Use the steering wheel controls or voice commands to navigate the game menus and options. This way, you can avoid looking at your device's screen and focus more on the road. You can also adjust the graphics settings and sound volume to optimize the game performance and experience.</li> -<li>Be aware of the traffic laws and road safety when playing GTA 5 Mobile APK on your Jetta 5. Remember that you are still driving a real car, not a virtual one. Do not speed, run red lights, or cause accidents while playing the game. You could get fined, arrested, or injured if you do so.</li> -<li>Do not play GTA 5 Mobile APK on your Jetta 5 while driving alone or at night. It is better to have a passenger with you who can help you with the game or warn you of any dangers on the road. It is also safer to play the game during daylight hours, when you can see better and avoid potential hazards.</li> -<li>Do not play GTA 5 Mobile APK on your Jetta 5 for too long or too often. Playing the game for a short time or occasionally can be fun and relaxing, but playing it for a long time or frequently can be addictive and harmful. You could lose track of time, get distracted, or suffer from eye strain, headaches, or fatigue.</li> -</ul> - <h2>Conclusion</h2> -<p>GTA 5 Mobile APK is a mobile version of Grand Theft Auto V, one of the most popular and exciting games ever made. You can play it on your Android or iOS device, but you can also play it on your Jetta 5 screen, thanks to Android Auto or CarPlay. All you need is a Jetta 5 with Android Auto or CarPlay installed, a compatible device with GTA 5 Mobile APK downloaded, and a USB cable to connect them. Then, you can follow the steps we have shown you to install GTA 5 Mobile APK on your Jetta 5 and enjoy playing it.</p> -<p>However, playing GTA 5 Mobile APK on your Jetta 5 also comes with some challenges and risks. You need to be careful and responsible when playing the game on your car's display, as you are still driving a real car on real roads. You need to follow the traffic laws and road safety rules, avoid distractions and accidents, and limit your gaming time and frequency. You also need to use the steering wheel controls or voice commands to navigate the game menus and options, and adjust the graphics settings and sound volume to optimize the game performance and experience.</p> -<p>We hope this article has helped you learn how to install GTA 5 Mobile APK on your Jetta 5 and how to play it safely and enjoyably. If you have any questions or comments, please feel free to leave them below. And if you liked this article, please share it with your friends who might be interested in playing GTA 5 Mobile APK on their Jetta 5s. Thank you for reading!</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about installing GTA 5 Mobile APK on your Jetta 5:</p> -<ol> -<li><b>Is GTA 5 Mobile APK legal?</b></li> -<p>GTA 5 Mobile APK is not an official product of Rockstar Games, the developer of Grand Theft Auto V. It is a fan-made mod that allows you to play GTA V on your mobile device. Therefore, it is not legal to download or use GTA 5 Mobile APK without permission from Rockstar Games. However, there have been no reports of legal actions taken against users of GTA 5 Mobile APK so far.</p> -<li><b>Is GTA 5 Mobile APK safe?</b></li> -<p>GTA 5 Mobile APK is generally safe to download and use, as long as you get it from a trusted source like <a href="(^3 ^)"></a> or <a href="">https://www.bluestacks.com/apps/adventure/gta-5-grand-theft-auto-on-pc.html</a>. However, you should always scan the file for viruses and malware before installing it on your device. You should also be careful when playing GTA 5 Mobile APK on your Jetta 5, as it can be distracting and dangerous if you don't follow the traffic laws and road safety rules.</p> -<li><b>Is GTA 5 Mobile APK free?</b></li> -<p>GTA 5 Mobile APK is free to download and use, as it is a fan-made mod that does not require any payment or subscription. However, you still need to own a copy of Grand Theft Auto V on your PC or console, as GTA 5 Mobile APK uses the game data from the original game. You also need to have enough storage space and internet data on your device to download and play GTA 5 Mobile APK.</p> -<li><b>Can I play GTA 5 Mobile APK offline?</b></li> -<p>GTA 5 Mobile APK requires an internet connection to download and install the game on your device. However, once you have installed the game, you can play it offline without any internet connection. You can play the story mode or the online mode offline, but you will not be able to access some features and updates that require an internet connection.</p> -<li><b>Can I play GTA 5 Mobile APK with other players?</b></li> -<p>GTA 5 Mobile APK allows you to play with other players online, as long as you have an internet connection and a Rockstar Games Social Club account. You can join or create online sessions with up to 30 players, where you can cooperate or compete in various missions and activities. You can also chat with other players using text or voice messages.</p> -</ol></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/8800 Nederlandse EPUB Boeken Collectie - DutchReleaseTeam Download en lees boeken van bekende en onbekende auteurs in het Nederlands.md b/spaces/contluForse/HuggingGPT/assets/8800 Nederlandse EPUB Boeken Collectie - DutchReleaseTeam Download en lees boeken van bekende en onbekende auteurs in het Nederlands.md deleted file mode 100644 index c6bb474831c51954a16986a7c5dac048514c9763..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/8800 Nederlandse EPUB Boeken Collectie - DutchReleaseTeam Download en lees boeken van bekende en onbekende auteurs in het Nederlands.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Velai Illa Pattathari Full Movie Hd Download Utorrent For 70</h2><br /><p><b><b>Download</b> 🆗 <a href="https://ssurll.com/2uzyNw">https://ssurll.com/2uzyNw</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/contluForse/HuggingGPT/assets/ATLAS.Translation.V14.0.By.Torrentspedia.md b/spaces/contluForse/HuggingGPT/assets/ATLAS.Translation.V14.0.By.Torrentspedia.md deleted file mode 100644 index abcc79f1e73cb8ba36cefd1adfa406fad066b146..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/ATLAS.Translation.V14.0.By.Torrentspedia.md +++ /dev/null @@ -1,98 +0,0 @@ -<h2>ATLAS.Translation.V14.0.By.Torrentspedia</h2><br /><p><b><b>Download File</b> • <a href="https://ssurll.com/2uzyQM">https://ssurll.com/2uzyQM</a></b></p><br /><br /> -<br /> -Based on this technique, the volume of the lateral, third, and fourth ventricles were calculated from the midsagittal views of each subject, resulting in 19 features. As the number of folds and convolutions is a sensitive measure of brain morphological differences between the groups, such parameters were also calculated by the FreeSurfer software. Moreover, the amount of sulcal and gyral deformation was analysed [19]. To better understand the evolution of the brain structural differences over time and to limit the number of features, we performed two repeated-measure ANOVAs for each feature and time-point (measurements performed twice at each time-point). - -All analyses were performed using the Statistical Package for Social Sciences (SPSS), Version 20.0 (Chicago, IL, USA). p-Values of less than 0.05 were considered significant. - -Results - -The general characteristics of the population are presented in Table 1. There were no differences between the groups in gender, age, or BMI. Two patients presented with arachnoid cysts (two SIVDs) and five with small tumours in the lateral ventricle (two SIVDs, two VDs, one SVA) (Table 1). - -Table 1 - -Patients characteristics - -Controls - -Cases - -VN - -VD - -SIVD - -SVA - -SIVD/VD - -SIVD/SVA - -SVA/VD - -Number of patients - -17 - -23 - -18 - -5 - -3 - -10 - -Gender - -Female - -12 - -9 - -6 - -1 - -Male - -14 - -2 - -4 - -Age (y) - -46.7 ± 2.7 - -46.8 ± 2.7 - -43.8 ± 2.7 - -48.6 ± 2.7 - -49.3 ± 1.8 - -49.2 ± 1.7 - -Body Mass Index - -26.1 ± 2.0 - -28.5 ± 4.1 - -25.4 ± 2.7 - -25.9 ± 2.4 - -24.6 ± 1.3 - -25.7 ± 1.3 - -One-way ANOVA of 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/contluForse/HuggingGPT/assets/Adobe Dreamweaver CC 2017 V17.1.0.9583 (x64) Portable [TechTools Serial Key The Ultimate Web Development Tool for Professionals.md b/spaces/contluForse/HuggingGPT/assets/Adobe Dreamweaver CC 2017 V17.1.0.9583 (x64) Portable [TechTools Serial Key The Ultimate Web Development Tool for Professionals.md deleted file mode 100644 index 0d7dbb2fa169d60a10f736bf724aa34bfe5afa05..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Adobe Dreamweaver CC 2017 V17.1.0.9583 (x64) Portable [TechTools Serial Key The Ultimate Web Development Tool for Professionals.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Adobe Dreamweaver CC 2017 V17.1.0.9583 (x64) Portable [TechTools Serial Key</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://ssurll.com/2uzxVJ">https://ssurll.com/2uzxVJ</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/contluForse/HuggingGPT/assets/Azami No Gotoku Toge Areba Full Movie Download In Italian UPDATED.md b/spaces/contluForse/HuggingGPT/assets/Azami No Gotoku Toge Areba Full Movie Download In Italian UPDATED.md deleted file mode 100644 index aa0ac92183fdf502072c56c3aa29af6bd70758c5..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Azami No Gotoku Toge Areba Full Movie Download In Italian UPDATED.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Azami no gotoku toge areba full movie download in italian</h2><br /><p><b><b>DOWNLOAD</b> ··· <a href="https://ssurll.com/2uzvFC">https://ssurll.com/2uzvFC</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cooelf/Multimodal-CoT/timm/optim/optim_factory.py b/spaces/cooelf/Multimodal-CoT/timm/optim/optim_factory.py deleted file mode 100644 index 2017d21f31e7223406b59e4193ce8696514a5383..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/optim/optim_factory.py +++ /dev/null @@ -1,174 +0,0 @@ -""" Optimizer Factory w/ Custom Weight Decay -Hacked together by / Copyright 2020 Ross Wightman -""" -from typing import Optional - -import torch -import torch.nn as nn -import torch.optim as optim - -from .adafactor import Adafactor -from .adahessian import Adahessian -from .adamp import AdamP -from .lookahead import Lookahead -from .nadam import Nadam -from .novograd import NovoGrad -from .nvnovograd import NvNovoGrad -from .radam import RAdam -from .rmsprop_tf import RMSpropTF -from .sgdp import SGDP -from .adabelief import AdaBelief - -try: - from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD - has_apex = True -except ImportError: - has_apex = False - - -def add_weight_decay(model, weight_decay=1e-5, skip_list=()): - decay = [] - no_decay = [] - for name, param in model.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: - no_decay.append(param) - else: - decay.append(param) - return [ - {'params': no_decay, 'weight_decay': 0.}, - {'params': decay, 'weight_decay': weight_decay}] - - -def optimizer_kwargs(cfg): - """ cfg/argparse to kwargs helper - Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. - """ - kwargs = dict( - optimizer_name=cfg.opt, - learning_rate=cfg.lr, - weight_decay=cfg.weight_decay, - momentum=cfg.momentum) - if getattr(cfg, 'opt_eps', None) is not None: - kwargs['eps'] = cfg.opt_eps - if getattr(cfg, 'opt_betas', None) is not None: - kwargs['betas'] = cfg.opt_betas - if getattr(cfg, 'opt_args', None) is not None: - kwargs.update(cfg.opt_args) - return kwargs - - -def create_optimizer(args, model, filter_bias_and_bn=True): - """ Legacy optimizer factory for backwards compatibility. - NOTE: Use create_optimizer_v2 for new code. - """ - return create_optimizer_v2( - model, - **optimizer_kwargs(cfg=args), - filter_bias_and_bn=filter_bias_and_bn, - ) - - -def create_optimizer_v2( - model: nn.Module, - optimizer_name: str = 'sgd', - learning_rate: Optional[float] = None, - weight_decay: float = 0., - momentum: float = 0.9, - filter_bias_and_bn: bool = True, - **kwargs): - """ Create an optimizer. - - TODO currently the model is passed in and all parameters are selected for optimization. - For more general use an interface that allows selection of parameters to optimize and lr groups, one of: - * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion - * expose the parameters interface and leave it up to caller - - Args: - model (nn.Module): model containing parameters to optimize - optimizer_name: name of optimizer to create - learning_rate: initial learning rate - weight_decay: weight decay to apply in optimizer - momentum: momentum for momentum based optimizers (others may use betas via kwargs) - filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay - **kwargs: extra optimizer specific kwargs to pass through - - Returns: - Optimizer - """ - opt_lower = optimizer_name.lower() - if weight_decay and filter_bias_and_bn: - skip = {} - if hasattr(model, 'no_weight_decay'): - skip = model.no_weight_decay() - parameters = add_weight_decay(model, weight_decay, skip) - weight_decay = 0. - else: - parameters = model.parameters() - if 'fused' in opt_lower: - assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' - - opt_args = dict(lr=learning_rate, weight_decay=weight_decay, **kwargs) - opt_split = opt_lower.split('_') - opt_lower = opt_split[-1] - if opt_lower == 'sgd' or opt_lower == 'nesterov': - opt_args.pop('eps', None) - optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) - elif opt_lower == 'momentum': - opt_args.pop('eps', None) - optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) - elif opt_lower == 'adam': - optimizer = optim.Adam(parameters, **opt_args) - elif opt_lower == 'adabelief': - optimizer = AdaBelief(parameters, rectify=False, **opt_args) - elif opt_lower == 'adamw': - optimizer = optim.AdamW(parameters, **opt_args) - elif opt_lower == 'nadam': - optimizer = Nadam(parameters, **opt_args) - elif opt_lower == 'radam': - optimizer = RAdam(parameters, **opt_args) - elif opt_lower == 'adamp': - optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) - elif opt_lower == 'sgdp': - optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) - elif opt_lower == 'adadelta': - optimizer = optim.Adadelta(parameters, **opt_args) - elif opt_lower == 'adafactor': - if not learning_rate: - opt_args['lr'] = None - optimizer = Adafactor(parameters, **opt_args) - elif opt_lower == 'adahessian': - optimizer = Adahessian(parameters, **opt_args) - elif opt_lower == 'rmsprop': - optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) - elif opt_lower == 'rmsproptf': - optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) - elif opt_lower == 'novograd': - optimizer = NovoGrad(parameters, **opt_args) - elif opt_lower == 'nvnovograd': - optimizer = NvNovoGrad(parameters, **opt_args) - elif opt_lower == 'fusedsgd': - opt_args.pop('eps', None) - optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) - elif opt_lower == 'fusedmomentum': - opt_args.pop('eps', None) - optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) - elif opt_lower == 'fusedadam': - optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) - elif opt_lower == 'fusedadamw': - optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) - elif opt_lower == 'fusedlamb': - optimizer = FusedLAMB(parameters, **opt_args) - elif opt_lower == 'fusednovograd': - opt_args.setdefault('betas', (0.95, 0.98)) - optimizer = FusedNovoGrad(parameters, **opt_args) - else: - assert False and "Invalid optimizer" - raise ValueError - - if len(opt_split) > 1: - if opt_split[0] == 'lookahead': - optimizer = Lookahead(optimizer) - - return optimizer diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/midas/midas/midas_net.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/midas/midas/midas_net.py deleted file mode 100644 index 8a954977800b0a0f48807e80fa63041910e33c1f..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/midas/midas/midas_net.py +++ /dev/null @@ -1,76 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, Interpolate, _make_encoder - - -class MidasNet(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=256, non_negative=True): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet, self).__init__() - - use_pretrained = False if path is None else True - - self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained) - - self.scratch.refinenet4 = FeatureFusionBlock(features) - self.scratch.refinenet3 = FeatureFusionBlock(features) - self.scratch.refinenet2 = FeatureFusionBlock(features) - self.scratch.refinenet1 = FeatureFusionBlock(features) - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), - nn.ReLU(True), - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - ) - - if path: - self.load(path) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/apis/inference.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/apis/inference.py deleted file mode 100644 index 515e459ff6e66e955624fedaf32d2076be750563..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/apis/inference.py +++ /dev/null @@ -1,138 +0,0 @@ -import matplotlib.pyplot as plt -import annotator.mmpkg.mmcv as mmcv -import torch -from annotator.mmpkg.mmcv.parallel import collate, scatter -from annotator.mmpkg.mmcv.runner import load_checkpoint - -from annotator.mmpkg.mmseg.datasets.pipelines import Compose -from annotator.mmpkg.mmseg.models import build_segmentor -from modules import devices - - -def init_segmentor(config, checkpoint=None, device=devices.get_device_for("controlnet")): - """Initialize a segmentor from config file. - - Args: - config (str or :obj:`mmcv.Config`): Config file path or the config - object. - checkpoint (str, optional): Checkpoint path. If left as None, the model - will not load any weights. - device (str, optional) CPU/CUDA device option. Default 'cuda:0'. - Use 'cpu' for loading model on CPU. - Returns: - nn.Module: The constructed segmentor. - """ - if isinstance(config, str): - config = mmcv.Config.fromfile(config) - elif not isinstance(config, mmcv.Config): - raise TypeError('config must be a filename or Config object, ' - 'but got {}'.format(type(config))) - config.model.pretrained = None - config.model.train_cfg = None - model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) - if checkpoint is not None: - checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') - model.CLASSES = checkpoint['meta']['CLASSES'] - model.PALETTE = checkpoint['meta']['PALETTE'] - model.cfg = config # save the config in the model for convenience - model.to(device) - model.eval() - return model - - -class LoadImage: - """A simple pipeline to load image.""" - - def __call__(self, results): - """Call function to load images into results. - - Args: - results (dict): A result dict contains the file name - of the image to be read. - - Returns: - dict: ``results`` will be returned containing loaded image. - """ - - if isinstance(results['img'], str): - results['filename'] = results['img'] - results['ori_filename'] = results['img'] - else: - results['filename'] = None - results['ori_filename'] = None - img = mmcv.imread(results['img']) - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - return results - - -def inference_segmentor(model, img): - """Inference image(s) with the segmentor. - - Args: - model (nn.Module): The loaded segmentor. - imgs (str/ndarray or list[str/ndarray]): Either image files or loaded - images. - - Returns: - (list[Tensor]): The segmentation result. - """ - cfg = model.cfg - device = next(model.parameters()).device # model device - # build the data pipeline - test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] - test_pipeline = Compose(test_pipeline) - # prepare data - data = dict(img=img) - data = test_pipeline(data) - data = collate([data], samples_per_gpu=1) - if next(model.parameters()).is_cuda: - # scatter to specified GPU - data = scatter(data, [device])[0] - else: - data['img'][0] = data['img'][0].to(devices.get_device_for("controlnet")) - data['img_metas'] = [i.data[0] for i in data['img_metas']] - - # forward the model - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - return result - - -def show_result_pyplot(model, - img, - result, - palette=None, - fig_size=(15, 10), - opacity=0.5, - title='', - block=True): - """Visualize the segmentation results on the image. - - Args: - model (nn.Module): The loaded segmentor. - img (str or np.ndarray): Image filename or loaded image. - result (list): The segmentation result. - palette (list[list[int]]] | None): The palette of segmentation - map. If None is given, random palette will be generated. - Default: None - fig_size (tuple): Figure size of the pyplot figure. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - title (str): The title of pyplot figure. - Default is ''. - block (bool): Whether to block the pyplot figure. - Default is True. - """ - if hasattr(model, 'module'): - model = model.module - img = model.show_result( - img, result, palette=palette, show=False, opacity=opacity) - # plt.figure(figsize=fig_size) - # plt.imshow(mmcv.bgr2rgb(img)) - # plt.title(title) - # plt.tight_layout() - # plt.show(block=block) - return mmcv.bgr2rgb(img) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/wrappers.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/wrappers.py deleted file mode 100644 index 4367f9ab50ce3ea47616e5c4c43ac4b78164b128..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/wrappers.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -""" -Wrappers around on some nn functions, mainly to support empty tensors. - -Ideally, add support directly in PyTorch to empty tensors in those functions. - -These can be removed once https://github.com/pytorch/pytorch/issues/12013 -is implemented -""" - -import warnings -from typing import List, Optional -import torch -from torch.nn import functional as F - -from annotator.oneformer.detectron2.utils.env import TORCH_VERSION - - -def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor: - """ - Turn a list of integer scalars or integer Tensor scalars into a vector, - in a way that's both traceable and scriptable. - - In tracing, `x` should be a list of scalar Tensor, so the output can trace to the inputs. - In scripting or eager, `x` should be a list of int. - """ - if torch.jit.is_scripting(): - return torch.as_tensor(x, device=device) - if torch.jit.is_tracing(): - assert all( - [isinstance(t, torch.Tensor) for t in x] - ), "Shape should be tensor during tracing!" - # as_tensor should not be used in tracing because it records a constant - ret = torch.stack(x) - if ret.device != device: # avoid recording a hard-coded device if not necessary - ret = ret.to(device=device) - return ret - return torch.as_tensor(x, device=device) - - -def check_if_dynamo_compiling(): - if TORCH_VERSION >= (1, 14): - from torch._dynamo import is_compiling - - return is_compiling() - else: - return False - - -def cat(tensors: List[torch.Tensor], dim: int = 0): - """ - Efficient version of torch.cat that avoids a copy if there is only a single element in a list - """ - assert isinstance(tensors, (list, tuple)) - if len(tensors) == 1: - return tensors[0] - return torch.cat(tensors, dim) - - -def empty_input_loss_func_wrapper(loss_func): - def wrapped_loss_func(input, target, *, reduction="mean", **kwargs): - """ - Same as `loss_func`, but returns 0 (instead of nan) for empty inputs. - """ - if target.numel() == 0 and reduction == "mean": - return input.sum() * 0.0 # connect the gradient - return loss_func(input, target, reduction=reduction, **kwargs) - - return wrapped_loss_func - - -cross_entropy = empty_input_loss_func_wrapper(F.cross_entropy) - - -class _NewEmptyTensorOp(torch.autograd.Function): - @staticmethod - def forward(ctx, x, new_shape): - ctx.shape = x.shape - return x.new_empty(new_shape) - - @staticmethod - def backward(ctx, grad): - shape = ctx.shape - return _NewEmptyTensorOp.apply(grad, shape), None - - -class Conv2d(torch.nn.Conv2d): - """ - A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. - """ - - def __init__(self, *args, **kwargs): - """ - Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: - - Args: - norm (nn.Module, optional): a normalization layer - activation (callable(Tensor) -> Tensor): a callable activation function - - It assumes that norm layer is used before activation. - """ - norm = kwargs.pop("norm", None) - activation = kwargs.pop("activation", None) - super().__init__(*args, **kwargs) - - self.norm = norm - self.activation = activation - - def forward(self, x): - # torchscript does not support SyncBatchNorm yet - # https://github.com/pytorch/pytorch/issues/40507 - # and we skip these codes in torchscript since: - # 1. currently we only support torchscript in evaluation mode - # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or - # later version, `Conv2d` in these PyTorch versions has already supported empty inputs. - if not torch.jit.is_scripting(): - # Dynamo doesn't support context managers yet - is_dynamo_compiling = check_if_dynamo_compiling() - if not is_dynamo_compiling: - with warnings.catch_warnings(record=True): - if x.numel() == 0 and self.training: - # https://github.com/pytorch/pytorch/issues/12013 - assert not isinstance( - self.norm, torch.nn.SyncBatchNorm - ), "SyncBatchNorm does not support empty inputs!" - - x = F.conv2d( - x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups - ) - if self.norm is not None: - x = self.norm(x) - if self.activation is not None: - x = self.activation(x) - return x - - -ConvTranspose2d = torch.nn.ConvTranspose2d -BatchNorm2d = torch.nn.BatchNorm2d -interpolate = F.interpolate -Linear = torch.nn.Linear - - -def nonzero_tuple(x): - """ - A 'as_tuple=True' version of torch.nonzero to support torchscript. - because of https://github.com/pytorch/pytorch/issues/38718 - """ - if torch.jit.is_scripting(): - if x.dim() == 0: - return x.unsqueeze(0).nonzero().unbind(1) - return x.nonzero().unbind(1) - else: - return x.nonzero(as_tuple=True) - - -@torch.jit.script_if_tracing -def move_device_like(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor: - """ - Tracing friendly way to cast tensor to another tensor's device. Device will be treated - as constant during tracing, scripting the casting process as whole can workaround this issue. - """ - return src.to(dst.device) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/modeling/backbone/mvit.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/modeling/backbone/mvit.py deleted file mode 100644 index 50667a8a836b933666761cc09d4175e64098c8aa..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/modeling/backbone/mvit.py +++ /dev/null @@ -1,448 +0,0 @@ -import logging -import numpy as np -import torch -import torch.nn as nn - -from .backbone import Backbone -from .utils import ( - PatchEmbed, - add_decomposed_rel_pos, - get_abs_pos, - window_partition, - window_unpartition, -) - -logger = logging.getLogger(__name__) - - -__all__ = ["MViT"] - - -def attention_pool(x, pool, norm=None): - # (B, H, W, C) -> (B, C, H, W) - x = x.permute(0, 3, 1, 2) - x = pool(x) - # (B, C, H1, W1) -> (B, H1, W1, C) - x = x.permute(0, 2, 3, 1) - if norm: - x = norm(x) - - return x - - -class MultiScaleAttention(nn.Module): - """Multiscale Multi-head Attention block.""" - - def __init__( - self, - dim, - dim_out, - num_heads, - qkv_bias=True, - norm_layer=nn.LayerNorm, - pool_kernel=(3, 3), - stride_q=1, - stride_kv=1, - residual_pooling=True, - window_size=0, - use_rel_pos=False, - rel_pos_zero_init=True, - input_size=None, - ): - """ - Args: - dim (int): Number of input channels. - dim_out (int): Number of output channels. - num_heads (int): Number of attention heads. - qkv_bias (bool: If True, add a learnable bias to query, key, value. - norm_layer (nn.Module): Normalization layer. - pool_kernel (tuple): kernel size for qkv pooling layers. - stride_q (int): stride size for q pooling layer. - stride_kv (int): stride size for kv pooling layer. - residual_pooling (bool): If true, enable residual pooling. - use_rel_pos (bool): If True, add relative postional embeddings to the attention map. - rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. - input_size (int or None): Input resolution. - """ - super().__init__() - self.num_heads = num_heads - head_dim = dim_out // num_heads - self.scale = head_dim**-0.5 - - self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) - self.proj = nn.Linear(dim_out, dim_out) - - # qkv pooling - pool_padding = [k // 2 for k in pool_kernel] - dim_conv = dim_out // num_heads - self.pool_q = nn.Conv2d( - dim_conv, - dim_conv, - pool_kernel, - stride=stride_q, - padding=pool_padding, - groups=dim_conv, - bias=False, - ) - self.norm_q = norm_layer(dim_conv) - self.pool_k = nn.Conv2d( - dim_conv, - dim_conv, - pool_kernel, - stride=stride_kv, - padding=pool_padding, - groups=dim_conv, - bias=False, - ) - self.norm_k = norm_layer(dim_conv) - self.pool_v = nn.Conv2d( - dim_conv, - dim_conv, - pool_kernel, - stride=stride_kv, - padding=pool_padding, - groups=dim_conv, - bias=False, - ) - self.norm_v = norm_layer(dim_conv) - - self.window_size = window_size - if window_size: - self.q_win_size = window_size // stride_q - self.kv_win_size = window_size // stride_kv - self.residual_pooling = residual_pooling - - self.use_rel_pos = use_rel_pos - if self.use_rel_pos: - # initialize relative positional embeddings - assert input_size[0] == input_size[1] - size = input_size[0] - rel_dim = 2 * max(size // stride_q, size // stride_kv) - 1 - self.rel_pos_h = nn.Parameter(torch.zeros(rel_dim, head_dim)) - self.rel_pos_w = nn.Parameter(torch.zeros(rel_dim, head_dim)) - - if not rel_pos_zero_init: - nn.init.trunc_normal_(self.rel_pos_h, std=0.02) - nn.init.trunc_normal_(self.rel_pos_w, std=0.02) - - def forward(self, x): - B, H, W, _ = x.shape - # qkv with shape (3, B, nHead, H, W, C) - qkv = self.qkv(x).reshape(B, H, W, 3, self.num_heads, -1).permute(3, 0, 4, 1, 2, 5) - # q, k, v with shape (B * nHead, H, W, C) - q, k, v = qkv.reshape(3, B * self.num_heads, H, W, -1).unbind(0) - - q = attention_pool(q, self.pool_q, self.norm_q) - k = attention_pool(k, self.pool_k, self.norm_k) - v = attention_pool(v, self.pool_v, self.norm_v) - - ori_q = q - if self.window_size: - q, q_hw_pad = window_partition(q, self.q_win_size) - k, kv_hw_pad = window_partition(k, self.kv_win_size) - v, _ = window_partition(v, self.kv_win_size) - q_hw = (self.q_win_size, self.q_win_size) - kv_hw = (self.kv_win_size, self.kv_win_size) - else: - q_hw = q.shape[1:3] - kv_hw = k.shape[1:3] - - q = q.view(q.shape[0], np.prod(q_hw), -1) - k = k.view(k.shape[0], np.prod(kv_hw), -1) - v = v.view(v.shape[0], np.prod(kv_hw), -1) - - attn = (q * self.scale) @ k.transpose(-2, -1) - - if self.use_rel_pos: - attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, q_hw, kv_hw) - - attn = attn.softmax(dim=-1) - x = attn @ v - - x = x.view(x.shape[0], q_hw[0], q_hw[1], -1) - - if self.window_size: - x = window_unpartition(x, self.q_win_size, q_hw_pad, ori_q.shape[1:3]) - - if self.residual_pooling: - x += ori_q - - H, W = x.shape[1], x.shape[2] - x = x.view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) - x = self.proj(x) - - return x - - -class MultiScaleBlock(nn.Module): - """Multiscale Transformer blocks""" - - def __init__( - self, - dim, - dim_out, - num_heads, - mlp_ratio=4.0, - qkv_bias=True, - drop_path=0.0, - norm_layer=nn.LayerNorm, - act_layer=nn.GELU, - qkv_pool_kernel=(3, 3), - stride_q=1, - stride_kv=1, - residual_pooling=True, - window_size=0, - use_rel_pos=False, - rel_pos_zero_init=True, - input_size=None, - ): - """ - Args: - dim (int): Number of input channels. - dim_out (int): Number of output channels. - num_heads (int): Number of attention heads in the MViT block. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool): If True, add a learnable bias to query, key, value. - drop_path (float): Stochastic depth rate. - norm_layer (nn.Module): Normalization layer. - act_layer (nn.Module): Activation layer. - qkv_pool_kernel (tuple): kernel size for qkv pooling layers. - stride_q (int): stride size for q pooling layer. - stride_kv (int): stride size for kv pooling layer. - residual_pooling (bool): If true, enable residual pooling. - window_size (int): Window size for window attention blocks. If it equals 0, then not - use window attention. - use_rel_pos (bool): If True, add relative postional embeddings to the attention map. - rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. - input_size (int or None): Input resolution. - """ - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = MultiScaleAttention( - dim, - dim_out, - num_heads=num_heads, - qkv_bias=qkv_bias, - norm_layer=norm_layer, - pool_kernel=qkv_pool_kernel, - stride_q=stride_q, - stride_kv=stride_kv, - residual_pooling=residual_pooling, - window_size=window_size, - use_rel_pos=use_rel_pos, - rel_pos_zero_init=rel_pos_zero_init, - input_size=input_size, - ) - - from timm.models.layers import DropPath, Mlp - - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim_out) - self.mlp = Mlp( - in_features=dim_out, - hidden_features=int(dim_out * mlp_ratio), - out_features=dim_out, - act_layer=act_layer, - ) - - if dim != dim_out: - self.proj = nn.Linear(dim, dim_out) - - if stride_q > 1: - kernel_skip = stride_q + 1 - padding_skip = int(kernel_skip // 2) - self.pool_skip = nn.MaxPool2d(kernel_skip, stride_q, padding_skip, ceil_mode=False) - - def forward(self, x): - x_norm = self.norm1(x) - x_block = self.attn(x_norm) - - if hasattr(self, "proj"): - x = self.proj(x_norm) - if hasattr(self, "pool_skip"): - x = attention_pool(x, self.pool_skip) - - x = x + self.drop_path(x_block) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class MViT(Backbone): - """ - This module implements Multiscale Vision Transformer (MViT) backbone in :paper:'mvitv2'. - """ - - def __init__( - self, - img_size=224, - patch_kernel=(7, 7), - patch_stride=(4, 4), - patch_padding=(3, 3), - in_chans=3, - embed_dim=96, - depth=16, - num_heads=1, - last_block_indexes=(0, 2, 11, 15), - qkv_pool_kernel=(3, 3), - adaptive_kv_stride=4, - adaptive_window_size=56, - residual_pooling=True, - mlp_ratio=4.0, - qkv_bias=True, - drop_path_rate=0.0, - norm_layer=nn.LayerNorm, - act_layer=nn.GELU, - use_abs_pos=False, - use_rel_pos=True, - rel_pos_zero_init=True, - use_act_checkpoint=False, - pretrain_img_size=224, - pretrain_use_cls_token=True, - out_features=("scale2", "scale3", "scale4", "scale5"), - ): - """ - Args: - img_size (int): Input image size. - patch_kernel (tuple): kernel size for patch embedding. - patch_stride (tuple): stride size for patch embedding. - patch_padding (tuple): padding size for patch embedding. - in_chans (int): Number of input image channels. - embed_dim (int): Patch embedding dimension. - depth (int): Depth of MViT. - num_heads (int): Number of base attention heads in each MViT block. - last_block_indexes (tuple): Block indexes for last blocks in each stage. - qkv_pool_kernel (tuple): kernel size for qkv pooling layers. - adaptive_kv_stride (int): adaptive stride size for kv pooling. - adaptive_window_size (int): adaptive window size for window attention blocks. - residual_pooling (bool): If true, enable residual pooling. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool): If True, add a learnable bias to query, key, value. - drop_path_rate (float): Stochastic depth rate. - norm_layer (nn.Module): Normalization layer. - act_layer (nn.Module): Activation layer. - use_abs_pos (bool): If True, use absolute positional embeddings. - use_rel_pos (bool): If True, add relative postional embeddings to the attention map. - rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. - window_size (int): Window size for window attention blocks. - use_act_checkpoint (bool): If True, use activation checkpointing. - pretrain_img_size (int): input image size for pretraining models. - pretrain_use_cls_token (bool): If True, pretrainig models use class token. - out_features (tuple): name of the feature maps from each stage. - """ - super().__init__() - self.pretrain_use_cls_token = pretrain_use_cls_token - - self.patch_embed = PatchEmbed( - kernel_size=patch_kernel, - stride=patch_stride, - padding=patch_padding, - in_chans=in_chans, - embed_dim=embed_dim, - ) - - if use_abs_pos: - # Initialize absoluate positional embedding with pretrain image size. - num_patches = (pretrain_img_size // patch_stride[0]) * ( - pretrain_img_size // patch_stride[1] - ) - num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches - self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) - else: - self.pos_embed = None - - # stochastic depth decay rule - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] - dim_out = embed_dim - stride_kv = adaptive_kv_stride - window_size = adaptive_window_size - input_size = (img_size // patch_stride[0], img_size // patch_stride[1]) - stage = 2 - stride = patch_stride[0] - self._out_feature_strides = {} - self._out_feature_channels = {} - self.blocks = nn.ModuleList() - for i in range(depth): - # Multiply stride_kv by 2 if it's the last block of stage2 and stage3. - if i == last_block_indexes[1] or i == last_block_indexes[2]: - stride_kv_ = stride_kv * 2 - else: - stride_kv_ = stride_kv - # hybrid window attention: global attention in last three stages. - window_size_ = 0 if i in last_block_indexes[1:] else window_size - block = MultiScaleBlock( - dim=embed_dim, - dim_out=dim_out, - num_heads=num_heads, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - drop_path=dpr[i], - norm_layer=norm_layer, - qkv_pool_kernel=qkv_pool_kernel, - stride_q=2 if i - 1 in last_block_indexes else 1, - stride_kv=stride_kv_, - residual_pooling=residual_pooling, - window_size=window_size_, - use_rel_pos=use_rel_pos, - rel_pos_zero_init=rel_pos_zero_init, - input_size=input_size, - ) - if use_act_checkpoint: - # TODO: use torch.utils.checkpoint - from fairscale.nn.checkpoint import checkpoint_wrapper - - block = checkpoint_wrapper(block) - self.blocks.append(block) - - embed_dim = dim_out - if i in last_block_indexes: - name = f"scale{stage}" - if name in out_features: - self._out_feature_channels[name] = dim_out - self._out_feature_strides[name] = stride - self.add_module(f"{name}_norm", norm_layer(dim_out)) - - dim_out *= 2 - num_heads *= 2 - stride_kv = max(stride_kv // 2, 1) - stride *= 2 - stage += 1 - if i - 1 in last_block_indexes: - window_size = window_size // 2 - input_size = [s // 2 for s in input_size] - - self._out_features = out_features - self._last_block_indexes = last_block_indexes - - if self.pos_embed is not None: - nn.init.trunc_normal_(self.pos_embed, std=0.02) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - nn.init.trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def forward(self, x): - x = self.patch_embed(x) - - if self.pos_embed is not None: - x = x + get_abs_pos(self.pos_embed, self.pretrain_use_cls_token, x.shape[1:3]) - - outputs = {} - stage = 2 - for i, blk in enumerate(self.blocks): - x = blk(x) - if i in self._last_block_indexes: - name = f"scale{stage}" - if name in self._out_features: - x_out = getattr(self, f"{name}_norm")(x) - outputs[name] = x_out.permute(0, 3, 1, 2) - stage += 1 - - return outputs diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/engine/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/engine/__init__.py deleted file mode 100644 index 3193b7f664e19ce2458d81c836597fa22e4bb082..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/engine/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test, - single_gpu_test) - -__all__ = [ - 'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test', - 'single_gpu_test' -] diff --git a/spaces/cymic/Waifu_Diffusion_Webui/modules/hypernetwork.py b/spaces/cymic/Waifu_Diffusion_Webui/modules/hypernetwork.py deleted file mode 100644 index dd198ee9cbae5ea0eae07e9a7ede0987a40daff8..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/modules/hypernetwork.py +++ /dev/null @@ -1,88 +0,0 @@ -import glob -import os -import sys -import traceback - -import torch - -from ldm.util import default -from modules import devices, shared -import torch -from torch import einsum -from einops import rearrange, repeat - - -class HypernetworkModule(torch.nn.Module): - def __init__(self, dim, state_dict): - super().__init__() - - self.linear1 = torch.nn.Linear(dim, dim * 2) - self.linear2 = torch.nn.Linear(dim * 2, dim) - - self.load_state_dict(state_dict, strict=True) - self.to(devices.device) - - def forward(self, x): - return x + (self.linear2(self.linear1(x))) - - -class Hypernetwork: - filename = None - name = None - - def __init__(self, filename): - self.filename = filename - self.name = os.path.splitext(os.path.basename(filename))[0] - self.layers = {} - - state_dict = torch.load(filename, map_location='cpu') - for size, sd in state_dict.items(): - self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1])) - - -def load_hypernetworks(path): - res = {} - - for filename in glob.iglob(path + '**/*.pt', recursive=True): - try: - hn = Hypernetwork(filename) - res[hn.name] = hn - except Exception: - print(f"Error loading hypernetwork {filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - return res - - -def attention_CrossAttention_forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - - hypernetwork = shared.selected_hypernetwork() - hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) - - if hypernetwork_layers is not None: - k = self.to_k(hypernetwork_layers[0](context)) - v = self.to_v(hypernetwork_layers[1](context)) - else: - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if mask is not None: - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) diff --git a/spaces/daddyjin/TalkingFaceGeneration/FONT/process_data.py b/spaces/daddyjin/TalkingFaceGeneration/FONT/process_data.py deleted file mode 100644 index 53af7055b122a7c08c764917b5e840d6c00b7c67..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/FONT/process_data.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Jun 24 11:36:01 2021 - -@author: Xinya -""" - -import os -import glob -import time -import numpy as np -import csv -import cv2 -import dlib - -from skimage import transform as tf - -detector = dlib.get_frontal_face_detector() -predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat') - - -import imageio - - - -def save(path, frames, format): - if format == '.mp4': - imageio.mimsave(path, frames) - elif format == '.png': - if not os.path.exists(path): - - - os.makedirs(path) - for j, frame in enumerate(frames): - cv2.imwrite(path+'/'+str(j)+'.png',frame) - # imageio.imsave(os.path.join(path, str(j) + '.png'), frames[j]) - else: - print ("Unknown format %s" % format) - exit() - -def crop_image(image_path, out_path): - template = np.load('./M003_template.npy') - image = cv2.imread(image_path) - gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - rects = detector(gray, 1) #detect human face - if len(rects) != 1: - return 0 - for (j, rect) in enumerate(rects): - shape = predictor(gray, rect) #detect 68 points - shape = shape_to_np(shape) - - pts2 = np.float32(template[:47,:]) - # pts2 = np.float32(template[17:35,:]) - # pts1 = np.vstack((landmark[27:36,:], landmark[39,:],landmark[42,:],landmark[45,:])) - pts1 = np.float32(shape[:47,:]) #eye and nose - # pts1 = np.float32(landmark[17:35,:]) - tform = tf.SimilarityTransform() - tform.estimate( pts2, pts1) #Set the transformation matrix with the explicit parameters. - - dst = tf.warp(image, tform, output_shape=(256, 256)) - - dst = np.array(dst * 255, dtype=np.uint8) - - - cv2.imwrite(out_path,dst) - -def shape_to_np(shape, dtype="int"): - # initialize the list of (x, y)-coordinates - coords = np.zeros((shape.num_parts, 2), dtype=dtype) - - # loop over all facial landmarks and convert them - # to a 2-tuple of (x, y)-coordinates - for i in range(0, shape.num_parts): - coords[i] = (shape.part(i).x, shape.part(i).y) - - # return the list of (x, y)-coordinates - return coords - - -def crop_image_tem(video_path, out_path): - image_all = [] - videoCapture = cv2.VideoCapture(video_path) - success, frame = videoCapture.read() - n = 0 - while success : - image_all.append(frame) - n = n + 1 - success, frame = videoCapture.read() - - if len(image_all)!=0 : - template = np.load('./M003_template.npy') - image=image_all[0] - gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - rects = detector(gray, 1) #detect human face - if len(rects) != 1: - return 0 - for (j, rect) in enumerate(rects): - shape = predictor(gray, rect) #detect 68 points - shape = shape_to_np(shape) - - pts2 = np.float32(template[:47,:]) - # pts2 = np.float32(template[17:35,:]) - # pts1 = np.vstack((landmark[27:36,:], landmark[39,:],landmark[42,:],landmark[45,:])) - pts1 = np.float32(shape[:47,:]) #eye and nose - # pts1 = np.float32(landmark[17:35,:]) - tform = tf.SimilarityTransform() - tform.estimate( pts2, pts1) #Set the transformation matrix with the explicit parameters. - out = [] - for i in range(len(image_all)): - image = image_all[i] - dst = tf.warp(image, tform, output_shape=(256, 256)) - - dst = np.array(dst * 255, dtype=np.uint8) - out.append(dst) - if not os.path.exists(out_path): - os.makedirs(out_path) - save(out_path,out,'.png') - -def proc_audio(src_mouth_path, dst_audio_path): - audio_command = 'ffmpeg -i \"{}\" -loglevel error -y -f wav -acodec pcm_s16le ' \ - '-ar 16000 \"{}\"'.format(src_mouth_path, dst_audio_path) - os.system(audio_command) - - - -if __name__ == "__main__": - #video alignment - video_path = './test/crop/M030_sad_3_001.mp4' - out_path = './test/crop/M030_sad_3_001' - crop_image_tem(video_path, out_path) - - #image alignment - image_path = './test/raw_image/brade2.jpg' - out_path = './test/image/brade2.jpg' - crop_image(image_path, out_path) - - #change_audio_sample_rate - src_mouth_path = './test/audio/00015.mp3' - dst_audio_path = './test/audio/00015.mov' - proc_audio(src_mouth_path, dst_audio_path) - - - diff --git a/spaces/datasciencedojo/Brain_Stroke_Prediction/README.md b/spaces/datasciencedojo/Brain_Stroke_Prediction/README.md deleted file mode 100644 index 6be2cb71095ca5e64ddfbaf621e6b63d727790bd..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/Brain_Stroke_Prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Brain Stroke Prediction -emoji: 📊 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/appearance_switcher.html b/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/appearance_switcher.html deleted file mode 100644 index 5fbf3b09b1c39de75c400514c9d0d81c807ea6bd..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/appearance_switcher.html +++ /dev/null @@ -1,6 +0,0 @@ -<div class="switch-checkbox" id="apSwitch"> - <label class="apSwitch"> - <input type="checkbox" id="apSwitch-checkbox" data-testid="checkbox" /> - <span class="apSwitch-span">{label}</span> - </label> -</div> diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/modules.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/modules.py deleted file mode 100644 index 6b2c3dca2d168fb5fbaff5acc4b5a06280a496a7..0000000000000000000000000000000000000000 --- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/modules.py +++ /dev/null @@ -1,1064 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np -from einops import rearrange - -from audioldm.utils import instantiate_from_config -from audioldm.latent_diffusion.attention import LinearAttention - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) - return emb - -def nonlinearity(x): - # swish - return x * torch.sigmoid(x) - - -def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm( - num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True - ) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=3, stride=1, padding=1 - ) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class UpsampleTimeStride4(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=5, stride=1, padding=2 - ) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=(4.0, 2.0), mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # Do time downsampling here - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=3, stride=2, padding=0 - ) - - def forward(self, x): - if self.with_conv: - pad = (0, 1, 0, 1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class DownsampleTimeStride4(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # Do time downsampling here - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=5, stride=(4, 2), padding=1 - ) - - def forward(self, x): - if self.with_conv: - pad = (0, 1, 0, 1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=(4, 2), stride=(4, 2)) - return x - - -class ResnetBlock(nn.Module): - def __init__( - self, - *, - in_channels, - out_channels=None, - conv_shortcut=False, - dropout, - temb_channels=512, - ): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d( - in_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d( - out_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d( - in_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) - else: - self.nin_shortcut = torch.nn.Conv2d( - in_channels, out_channels, kernel_size=1, stride=1, padding=0 - ) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x + h - - -class LinAttnBlock(LinearAttention): - """to match AttnBlock usage""" - - def __init__(self, in_channels): - super().__init__(dim=in_channels, heads=1, dim_head=in_channels) - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.k = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.v = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.proj_out = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b, c, h, w = q.shape - q = q.reshape(b, c, h * w).contiguous() - q = q.permute(0, 2, 1).contiguous() # b,hw,c - k = k.reshape(b, c, h * w).contiguous() # b,c,hw - w_ = torch.bmm(q, k).contiguous() # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c) ** (-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b, c, h * w).contiguous() - w_ = w_.permute(0, 2, 1).contiguous() # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm( - v, w_ - ).contiguous() # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b, c, h, w).contiguous() - - h_ = self.proj_out(h_) - - return x + h_ - - -def make_attn(in_channels, attn_type="vanilla"): - assert attn_type in ["vanilla", "linear", "none"], f"attn_type {attn_type} unknown" - # print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - return AttnBlock(in_channels) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - return LinAttnBlock(in_channels) - - -class Model(nn.Module): - def __init__( - self, - *, - ch, - out_ch, - ch_mult=(1, 2, 4, 8), - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - in_channels, - resolution, - use_timestep=True, - use_linear_attn=False, - attn_type="vanilla", - ): - super().__init__() - if use_linear_attn: - attn_type = "linear" - self.ch = ch - self.temb_ch = self.ch * 4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList( - [ - torch.nn.Linear(self.ch, self.temb_ch), - torch.nn.Linear(self.temb_ch, self.temb_ch), - ] - ) - - # downsampling - self.conv_in = torch.nn.Conv2d( - in_channels, self.ch, kernel_size=3, stride=1, padding=1 - ) - - curr_res = resolution - in_ch_mult = (1,) + tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch * in_ch_mult[i_level] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions - 1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch * ch_mult[i_level] - skip_in = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - if i_block == self.num_res_blocks: - skip_in = ch * in_ch_mult[i_level] - block.append( - ResnetBlock( - in_channels=block_in + skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, out_ch, kernel_size=3, stride=1, padding=1 - ) - - def forward(self, x, t=None, context=None): - # assert x.shape[2] == x.shape[3] == self.resolution - if context is not None: - # assume aligned context, cat along channel axis - x = torch.cat((x, context), dim=1) - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions - 1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb - ) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - def get_last_layer(self): - return self.conv_out.weight - - -class Encoder(nn.Module): - def __init__( - self, - *, - ch, - out_ch, - ch_mult=(1, 2, 4, 8), - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - in_channels, - resolution, - z_channels, - double_z=True, - use_linear_attn=False, - attn_type="vanilla", - downsample_time_stride4_levels=[], - **ignore_kwargs, - ): - super().__init__() - if use_linear_attn: - attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.downsample_time_stride4_levels = downsample_time_stride4_levels - - if len(self.downsample_time_stride4_levels) > 0: - assert max(self.downsample_time_stride4_levels) < self.num_resolutions, ( - "The level to perform downsample 4 operation need to be smaller than the total resolution number %s" - % str(self.num_resolutions) - ) - - # downsampling - self.conv_in = torch.nn.Conv2d( - in_channels, self.ch, kernel_size=3, stride=1, padding=1 - ) - - curr_res = resolution - in_ch_mult = (1,) + tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch * in_ch_mult[i_level] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions - 1: - if i_level in self.downsample_time_stride4_levels: - down.downsample = DownsampleTimeStride4(block_in, resamp_with_conv) - else: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, - 2 * z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1, - ) - - def forward(self, x): - # timestep embedding - temb = None - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions - 1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__( - self, - *, - ch, - out_ch, - ch_mult=(1, 2, 4, 8), - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - in_channels, - resolution, - z_channels, - give_pre_end=False, - tanh_out=False, - use_linear_attn=False, - downsample_time_stride4_levels=[], - attn_type="vanilla", - **ignorekwargs, - ): - super().__init__() - if use_linear_attn: - attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - self.downsample_time_stride4_levels = downsample_time_stride4_levels - - if len(self.downsample_time_stride4_levels) > 0: - assert max(self.downsample_time_stride4_levels) < self.num_resolutions, ( - "The level to perform downsample 4 operation need to be smaller than the total resolution number %s" - % str(self.num_resolutions) - ) - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,) + tuple(ch_mult) - block_in = ch * ch_mult[self.num_resolutions - 1] - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.z_shape = (1, z_channels, curr_res, curr_res) - # print("Working with z of shape {} = {} dimensions.".format( - # self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d( - z_channels, block_in, kernel_size=3, stride=1, padding=1 - ) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - if i_level - 1 in self.downsample_time_stride4_levels: - up.upsample = UpsampleTimeStride4(block_in, resamp_with_conv) - else: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, out_ch, kernel_size=3, stride=1, padding=1 - ) - - def forward(self, z): - # assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - return h - - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList( - [ - nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock( - in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, - dropout=0.0, - ), - ResnetBlock( - in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, - dropout=0.0, - ), - ResnetBlock( - in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, - dropout=0.0, - ), - nn.Conv2d(2 * in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True), - ] - ) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d( - in_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1, 2, 3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - ch, - num_res_blocks, - resolution, - ch_mult=(2, 2), - dropout=0.0, - ): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, out_channels, kernel_size=3, stride=1, padding=1 - ) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class LatentRescaler(nn.Module): - def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): - super().__init__() - # residual block, interpolate, residual block - self.factor = factor - self.conv_in = nn.Conv2d( - in_channels, mid_channels, kernel_size=3, stride=1, padding=1 - ) - self.res_block1 = nn.ModuleList( - [ - ResnetBlock( - in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0, - ) - for _ in range(depth) - ] - ) - self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList( - [ - ResnetBlock( - in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0, - ) - for _ in range(depth) - ] - ) - - self.conv_out = nn.Conv2d( - mid_channels, - out_channels, - kernel_size=1, - ) - - def forward(self, x): - x = self.conv_in(x) - for block in self.res_block1: - x = block(x, None) - x = torch.nn.functional.interpolate( - x, - size=( - int(round(x.shape[2] * self.factor)), - int(round(x.shape[3] * self.factor)), - ), - ) - x = self.attn(x).contiguous() - for block in self.res_block2: - x = block(x, None) - x = self.conv_out(x) - return x - - -class MergedRescaleEncoder(nn.Module): - def __init__( - self, - in_channels, - ch, - resolution, - out_ch, - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - ch_mult=(1, 2, 4, 8), - rescale_factor=1.0, - rescale_module_depth=1, - ): - super().__init__() - intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder( - in_channels=in_channels, - num_res_blocks=num_res_blocks, - ch=ch, - ch_mult=ch_mult, - z_channels=intermediate_chn, - double_z=False, - resolution=resolution, - attn_resolutions=attn_resolutions, - dropout=dropout, - resamp_with_conv=resamp_with_conv, - out_ch=None, - ) - self.rescaler = LatentRescaler( - factor=rescale_factor, - in_channels=intermediate_chn, - mid_channels=intermediate_chn, - out_channels=out_ch, - depth=rescale_module_depth, - ) - - def forward(self, x): - x = self.encoder(x) - x = self.rescaler(x) - return x - - -class MergedRescaleDecoder(nn.Module): - def __init__( - self, - z_channels, - out_ch, - resolution, - num_res_blocks, - attn_resolutions, - ch, - ch_mult=(1, 2, 4, 8), - dropout=0.0, - resamp_with_conv=True, - rescale_factor=1.0, - rescale_module_depth=1, - ): - super().__init__() - tmp_chn = z_channels * ch_mult[-1] - self.decoder = Decoder( - out_ch=out_ch, - z_channels=tmp_chn, - attn_resolutions=attn_resolutions, - dropout=dropout, - resamp_with_conv=resamp_with_conv, - in_channels=None, - num_res_blocks=num_res_blocks, - ch_mult=ch_mult, - resolution=resolution, - ch=ch, - ) - self.rescaler = LatentRescaler( - factor=rescale_factor, - in_channels=z_channels, - mid_channels=tmp_chn, - out_channels=tmp_chn, - depth=rescale_module_depth, - ) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Upsampler(nn.Module): - def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): - super().__init__() - assert out_size >= in_size - num_blocks = int(np.log2(out_size // in_size)) + 1 - factor_up = 1.0 + (out_size % in_size) - print( - f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}" - ) - self.rescaler = LatentRescaler( - factor=factor_up, - in_channels=in_channels, - mid_channels=2 * in_channels, - out_channels=in_channels, - ) - self.decoder = Decoder( - out_ch=out_channels, - resolution=out_size, - z_channels=in_channels, - num_res_blocks=2, - attn_resolutions=[], - in_channels=None, - ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)], - ) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode="bilinear"): - super().__init__() - self.with_conv = learned - self.mode = mode - if self.with_conv: - print( - f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode" - ) - raise NotImplementedError() - assert in_channels is not None - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=4, stride=2, padding=1 - ) - - def forward(self, x, scale_factor=1.0): - if scale_factor == 1.0: - return x - else: - x = torch.nn.functional.interpolate( - x, mode=self.mode, align_corners=False, scale_factor=scale_factor - ) - return x - - -class FirstStagePostProcessor(nn.Module): - def __init__( - self, - ch_mult: list, - in_channels, - pretrained_model: nn.Module = None, - reshape=False, - n_channels=None, - dropout=0.0, - pretrained_config=None, - ): - super().__init__() - if pretrained_config is None: - assert ( - pretrained_model is not None - ), 'Either "pretrained_model" or "pretrained_config" must not be None' - self.pretrained_model = pretrained_model - else: - assert ( - pretrained_config is not None - ), 'Either "pretrained_model" or "pretrained_config" must not be None' - self.instantiate_pretrained(pretrained_config) - - self.do_reshape = reshape - - if n_channels is None: - n_channels = self.pretrained_model.encoder.ch - - self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2) - self.proj = nn.Conv2d( - in_channels, n_channels, kernel_size=3, stride=1, padding=1 - ) - - blocks = [] - downs = [] - ch_in = n_channels - for m in ch_mult: - blocks.append( - ResnetBlock( - in_channels=ch_in, out_channels=m * n_channels, dropout=dropout - ) - ) - ch_in = m * n_channels - downs.append(Downsample(ch_in, with_conv=False)) - - self.model = nn.ModuleList(blocks) - self.downsampler = nn.ModuleList(downs) - - def instantiate_pretrained(self, config): - model = instantiate_from_config(config) - self.pretrained_model = model.eval() - # self.pretrained_model.train = False - for param in self.pretrained_model.parameters(): - param.requires_grad = False - - @torch.no_grad() - def encode_with_pretrained(self, x): - c = self.pretrained_model.encode(x) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - return c - - def forward(self, x): - z_fs = self.encode_with_pretrained(x) - z = self.proj_norm(z_fs) - z = self.proj(z) - z = nonlinearity(z) - - for submodel, downmodel in zip(self.model, self.downsampler): - z = submodel(z, temb=None) - z = downmodel(z) - - if self.do_reshape: - z = rearrange(z, "b c h w -> b (h w) c") - return z diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__main__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__main__.py deleted file mode 100644 index 29c802bcc83b3ca35bbd0e6521f47a368b5f9092..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -import sys -from fontTools.mtiLib import main - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_telemetry.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_telemetry.py deleted file mode 100644 index 5de988e2795188324f69232d1beb68191591715d..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_telemetry.py +++ /dev/null @@ -1,118 +0,0 @@ -from queue import Queue -from threading import Lock, Thread -from typing import Dict, Optional, Union -from urllib.parse import quote - -from .. import constants, logging -from . import build_hf_headers, get_session, hf_raise_for_status - - -logger = logging.get_logger(__name__) - -# Telemetry is sent by a separate thread to avoid blocking the main thread. -# A daemon thread is started once and consume tasks from the _TELEMETRY_QUEUE. -# If the thread stops for some reason -shouldn't happen-, we restart a new one. -_TELEMETRY_THREAD: Optional[Thread] = None -_TELEMETRY_THREAD_LOCK = Lock() # Lock to avoid starting multiple threads in parallel -_TELEMETRY_QUEUE: Queue = Queue() - - -def send_telemetry( - topic: str, - *, - library_name: Optional[str] = None, - library_version: Optional[str] = None, - user_agent: Union[Dict, str, None] = None, -) -> None: - """ - Sends telemetry that helps tracking usage of different HF libraries. - - This usage data helps us debug issues and prioritize new features. However, we understand that not everyone wants - to share additional information, and we respect your privacy. You can disable telemetry collection by setting the - `HF_HUB_DISABLE_TELEMETRY=1` as environment variable. Telemetry is also disabled in offline mode (i.e. when setting - `HF_HUB_OFFLINE=1`). - - Telemetry collection is run in a separate thread to minimize impact for the user. - - Args: - topic (`str`): - Name of the topic that is monitored. The topic is directly used to build the URL. If you want to monitor - subtopics, just use "/" separation. Examples: "gradio", "transformers/examples",... - library_name (`str`, *optional*): - The name of the library that is making the HTTP request. Will be added to the user-agent header. - library_version (`str`, *optional*): - The version of the library that is making the HTTP request. Will be added to the user-agent header. - user_agent (`str`, `dict`, *optional*): - The user agent info in the form of a dictionary or a single string. It will be completed with information about the installed packages. - - Example: - ```py - >>> from huggingface_hub.utils import send_telemetry - - # Send telemetry without library information - >>> send_telemetry("ping") - - # Send telemetry to subtopic with library information - >>> send_telemetry("gradio/local_link", library_name="gradio", library_version="3.22.1") - - # Send telemetry with additional data - >>> send_telemetry( - ... topic="examples", - ... library_name="transformers", - ... library_version="4.26.0", - ... user_agent={"pipeline": "text_classification", "framework": "flax"}, - ... ) - ``` - """ - if constants.HF_HUB_OFFLINE or constants.HF_HUB_DISABLE_TELEMETRY: - return - - _start_telemetry_thread() # starts thread only if doesn't exist yet - _TELEMETRY_QUEUE.put( - {"topic": topic, "library_name": library_name, "library_version": library_version, "user_agent": user_agent} - ) - - -def _start_telemetry_thread(): - """Start a daemon thread to consume tasks from the telemetry queue. - - If the thread is interrupted, start a new one. - """ - with _TELEMETRY_THREAD_LOCK: # avoid to start multiple threads if called concurrently - global _TELEMETRY_THREAD - if _TELEMETRY_THREAD is None or not _TELEMETRY_THREAD.is_alive(): - _TELEMETRY_THREAD = Thread(target=_telemetry_worker, daemon=True) - _TELEMETRY_THREAD.start() - - -def _telemetry_worker(): - """Wait for a task and consume it.""" - while True: - kwargs = _TELEMETRY_QUEUE.get() - _send_telemetry_in_thread(**kwargs) - _TELEMETRY_QUEUE.task_done() - - -def _send_telemetry_in_thread( - topic: str, - *, - library_name: Optional[str] = None, - library_version: Optional[str] = None, - user_agent: Union[Dict, str, None] = None, -) -> None: - """Contains the actual data sending data to the Hub.""" - path = "/".join(quote(part) for part in topic.split("/") if len(part) > 0) - try: - r = get_session().head( - f"{constants.ENDPOINT}/api/telemetry/{path}", - headers=build_hf_headers( - token=False, # no need to send a token for telemetry - library_name=library_name, - library_version=library_version, - user_agent=user_agent, - ), - ) - hf_raise_for_status(r) - except Exception as e: - # We don't want to error in case of connection errors of any kind. - logger.debug(f"Error while sending telemetry: {e}") diff --git a/spaces/dengmouren/minlik-chinese-alpaca-pro-33b-merged/app.py b/spaces/dengmouren/minlik-chinese-alpaca-pro-33b-merged/app.py deleted file mode 100644 index 91ac3768911cd8485c1f7de62d6c47f7967fd9d4..0000000000000000000000000000000000000000 --- a/spaces/dengmouren/minlik-chinese-alpaca-pro-33b-merged/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/minlik/chinese-alpaca-pro-33b-merged").launch() \ No newline at end of file diff --git a/spaces/devthedeveloper/Bark-with-Voice-Cloning/bark/model.py b/spaces/devthedeveloper/Bark-with-Voice-Cloning/bark/model.py deleted file mode 100644 index 457b49e749f396c47c6b35f44955fd512d233d79..0000000000000000000000000000000000000000 --- a/spaces/devthedeveloper/Bark-with-Voice-Cloning/bark/model.py +++ /dev/null @@ -1,218 +0,0 @@ -""" -Much of this code is adapted from Andrej Karpathy's NanoGPT -(https://github.com/karpathy/nanoGPT) -""" -import math -from dataclasses import dataclass - -import torch -import torch.nn as nn -from torch.nn import functional as F - -class LayerNorm(nn.Module): - """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """ - - def __init__(self, ndim, bias): - super().__init__() - self.weight = nn.Parameter(torch.ones(ndim)) - self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None - - def forward(self, input): - return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) - -class CausalSelfAttention(nn.Module): - - def __init__(self, config): - super().__init__() - assert config.n_embd % config.n_head == 0 - # key, query, value projections for all heads, but in a batch - self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) - # output projection - self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) - # regularization - self.attn_dropout = nn.Dropout(config.dropout) - self.resid_dropout = nn.Dropout(config.dropout) - self.n_head = config.n_head - self.n_embd = config.n_embd - self.dropout = config.dropout - # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary - self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') - if not self.flash: - # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0") - # causal mask to ensure that attention is only applied to the left in the input sequence - self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) - .view(1, 1, config.block_size, config.block_size)) - - def forward(self, x, past_kv=None, use_cache=False): - B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) - - # calculate query, key, values for all heads in batch and move head forward to be the batch dim - q, k ,v = self.c_attn(x).split(self.n_embd, dim=2) - k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) - q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) - v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) - - if past_kv is not None: - past_key = past_kv[0] - past_value = past_kv[1] - k = torch.cat((past_key, k), dim=-2) - v = torch.cat((past_value, v), dim=-2) - - FULL_T = k.shape[-2] - - if use_cache is True: - present = (k, v) - else: - present = None - - # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) - if self.flash: - # efficient attention using Flash Attention CUDA kernels - if past_kv is not None: - # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains - # the query for the last token. scaled_dot_product_attention interprets this as the first token in the - # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so - # to work around this we set is_causal=False. - is_causal = False - else: - is_causal = True - - y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal) - else: - # manual implementation of attention - att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) - att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf')) - att = F.softmax(att, dim=-1) - att = self.attn_dropout(att) - y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) - y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side - - # output projection - y = self.resid_dropout(self.c_proj(y)) - return (y, present) - -class MLP(nn.Module): - - def __init__(self, config): - super().__init__() - self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) - self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) - self.dropout = nn.Dropout(config.dropout) - self.gelu = nn.GELU() - - def forward(self, x): - x = self.c_fc(x) - x = self.gelu(x) - x = self.c_proj(x) - x = self.dropout(x) - return x - -class Block(nn.Module): - - def __init__(self, config, layer_idx): - super().__init__() - self.ln_1 = LayerNorm(config.n_embd, bias=config.bias) - self.attn = CausalSelfAttention(config) - self.ln_2 = LayerNorm(config.n_embd, bias=config.bias) - self.mlp = MLP(config) - self.layer_idx = layer_idx - - def forward(self, x, past_kv=None, use_cache=False): - attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache) - x = x + attn_output - x = x + self.mlp(self.ln_2(x)) - return (x, prev_kvs) - -@dataclass -class GPTConfig: - block_size: int = 1024 - input_vocab_size: int = 10_048 - output_vocab_size: int = 10_048 - n_layer: int = 12 - n_head: int = 12 - n_embd: int = 768 - dropout: float = 0.0 - bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster - -class GPT(nn.Module): - - def __init__(self, config): - super().__init__() - assert config.input_vocab_size is not None - assert config.output_vocab_size is not None - assert config.block_size is not None - self.config = config - - self.transformer = nn.ModuleDict(dict( - wte = nn.Embedding(config.input_vocab_size, config.n_embd), - wpe = nn.Embedding(config.block_size, config.n_embd), - drop = nn.Dropout(config.dropout), - h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]), - ln_f = LayerNorm(config.n_embd, bias=config.bias), - )) - self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False) - - def get_num_params(self, non_embedding=True): - """ - Return the number of parameters in the model. - For non-embedding count (default), the position embeddings get subtracted. - The token embeddings would too, except due to the parameter sharing these - params are actually used as weights in the final layer, so we include them. - """ - n_params = sum(p.numel() for p in self.parameters()) - if non_embedding: - n_params -= self.transformer.wte.weight.numel() - n_params -= self.transformer.wpe.weight.numel() - return n_params - - def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False): - device = idx.device - b, t = idx.size() - if past_kv is not None: - assert t == 1 - tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) - else: - if merge_context: - assert(idx.shape[1] >= 256+256+1) - t = idx.shape[1] - 256 - else: - assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" - - # forward the GPT model itself - if merge_context: - tok_emb = torch.cat([ - self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]), - self.transformer.wte(idx[:,256+256:]) - ], dim=1) - else: - tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) - - if past_kv is None: - past_length = 0 - past_kv = tuple([None] * len(self.transformer.h)) - else: - past_length = past_kv[0][0].size(-2) - - if position_ids is None: - position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device) - position_ids = position_ids.unsqueeze(0) # shape (1, t) - assert position_ids.shape == (1, t) - - pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd) - - x = self.transformer.drop(tok_emb + pos_emb) - - new_kv = () if use_cache else None - - for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)): - x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache) - - if use_cache: - new_kv = new_kv + (kv,) - - x = self.transformer.ln_f(x) - - # inference-time mini-optimization: only forward the lm_head on the very last position - logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim - - return (logits, new_kv) diff --git a/spaces/dfgnota/gpt-doc-mem/README.md b/spaces/dfgnota/gpt-doc-mem/README.md deleted file mode 100644 index 8b94bfa96f293884ffa651e516143fa7416d7fe9..0000000000000000000000000000000000000000 --- a/spaces/dfgnota/gpt-doc-mem/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gpt Doc Mem -emoji: 📉 -colorFrom: green -colorTo: purple -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: lgpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Adobe Acrobat Xi Pro V11 Multi-xforce UPD Keygen.md b/spaces/diacanFperku/AutoGPT/Adobe Acrobat Xi Pro V11 Multi-xforce UPD Keygen.md deleted file mode 100644 index a74fb4c23fe582034fb06b71851d911207b852f1..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Adobe Acrobat Xi Pro V11 Multi-xforce UPD Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>adobe acrobat xi pro v11 multi-xforce keygen</h2><br /><p><b><b>DOWNLOAD</b> ··· <a href="https://gohhs.com/2uFTsp">https://gohhs.com/2uFTsp</a></b></p><br /><br /> -<br /> -Adobe acrobat xi pro v11.0.x multi-lang universal crack patch. xi deployment guide ... Incl. Keygen by X-FORCE/Patch by MPT34M/Cracked-DLL by PainteR +. 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diacanFperku/AutoGPT/DownloadMaya2019crack.md b/spaces/diacanFperku/AutoGPT/DownloadMaya2019crack.md deleted file mode 100644 index ec22e609c81fe96bdd852d9e308c8e85789e1244..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/DownloadMaya2019crack.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>downloadMaya2019crack</h2><br /><p><b><b>Download</b> ->>> <a href="https://gohhs.com/2uFTYK">https://gohhs.com/2uFTYK</a></b></p><br /><br /> -<br /> -USB Disk Security Crack 6 7 Serial key 2019 Registration code. The contenders are liquid, fabric, skin, hide, hair and muscles. Free autodesk maya 2020 serial ... 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diacanFperku/AutoGPT/Real Hack 2.0 Download Solidworks [REPACK].md b/spaces/diacanFperku/AutoGPT/Real Hack 2.0 Download Solidworks [REPACK].md deleted file mode 100644 index 9b2a8eeaff596d8acf05afa815b0f164bce2bd5e..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Real Hack 2.0 Download Solidworks [REPACK].md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>real hack 2.0 download solidworks</h2><br /><p><b><b>DOWNLOAD</b> ☑ <a href="https://gohhs.com/2uFVB3">https://gohhs.com/2uFVB3</a></b></p><br /><br /> -<br /> -You can download a copy of the stable (non-beta) version, and the beta for ... They are typically geared for the true 3D modeling professional. ... If you are familiar with Solidworks, this free, online package will be very easy to use. ... Our October Hacker is using his knowledge of horses and 3D design to ... 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/digitalxingtong/Azuma-Bert-VITS2/text/japanese.py b/spaces/digitalxingtong/Azuma-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Azuma-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/double_roi_head.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/double_roi_head.py deleted file mode 100644 index a1aa6c8244a889fbbed312a89574c3e11be294f0..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/double_roi_head.py +++ /dev/null @@ -1,33 +0,0 @@ -from ..builder import HEADS -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class DoubleHeadRoIHead(StandardRoIHead): - """RoI head for Double Head RCNN. - - https://arxiv.org/abs/1904.06493 - """ - - def __init__(self, reg_roi_scale_factor, **kwargs): - super(DoubleHeadRoIHead, self).__init__(**kwargs) - self.reg_roi_scale_factor = reg_roi_scale_factor - - def _bbox_forward(self, x, rois): - """Box head forward function used in both training and testing time.""" - bbox_cls_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - bbox_reg_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], - rois, - roi_scale_factor=self.reg_roi_scale_factor) - if self.with_shared_head: - bbox_cls_feats = self.shared_head(bbox_cls_feats) - bbox_reg_feats = self.shared_head(bbox_reg_feats) - cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) - - bbox_results = dict( - cls_score=cls_score, - bbox_pred=bbox_pred, - bbox_feats=bbox_cls_feats) - return bbox_results diff --git a/spaces/dmeck/RVC-Speakers/vits/modules/attentions/attentions.py b/spaces/dmeck/RVC-Speakers/vits/modules/attentions/attentions.py deleted file mode 100644 index 4f87f022cd632def0235358300f124e77b7ba6a5..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/vits/modules/attentions/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from vits.modules import commons -from vits.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/dongyi/MMFS/models/modules/stylegan2/op/conv2d_gradfix.py b/spaces/dongyi/MMFS/models/modules/stylegan2/op/conv2d_gradfix.py deleted file mode 100644 index 855dcae32ae7a7767914bc0edc09962beb10437d..0000000000000000000000000000000000000000 --- a/spaces/dongyi/MMFS/models/modules/stylegan2/op/conv2d_gradfix.py +++ /dev/null @@ -1,227 +0,0 @@ -import contextlib -import warnings - -import torch -from torch import autograd -from torch.nn import functional as F - -enabled = True -weight_gradients_disabled = False - - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if could_use_op(input): - return conv2d_gradfix( - transpose=False, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=0, - dilation=dilation, - groups=groups, - ).apply(input, weight, bias) - - return F.conv2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - ) - - -def conv_transpose2d( - input, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, -): - if could_use_op(input): - return conv2d_gradfix( - transpose=True, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=output_padding, - groups=groups, - dilation=dilation, - ).apply(input, weight, bias) - - return F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - output_padding=output_padding, - dilation=dilation, - groups=groups, - ) - - -def could_use_op(input): - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - - if input.device.type != "cuda": - return False - - if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8.", "1.9.", "1.10."]): - return True - - warnings.warn( - f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." - ) - - return False - - -def ensure_tuple(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - - return xs - - -conv2d_gradfix_cache = dict() - - -def conv2d_gradfix( - transpose, weight_shape, stride, padding, output_padding, dilation, groups -): - ndim = 2 - weight_shape = tuple(weight_shape) - stride = ensure_tuple(stride, ndim) - padding = ensure_tuple(padding, ndim) - output_padding = ensure_tuple(output_padding, ndim) - dilation = ensure_tuple(dilation, ndim) - - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in conv2d_gradfix_cache: - return conv2d_gradfix_cache[key] - - common_kwargs = dict( - stride=stride, padding=padding, dilation=dilation, groups=groups - ) - - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - class Conv2d(autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - if not transpose: - out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - else: - out = F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - output_padding=output_padding, - **common_kwargs, - ) - - ctx.save_for_backward(input, weight) - - return out - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input, grad_weight, grad_bias = None, None, None - - if ctx.needs_input_grad[0]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, weight, None) - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum((0, 2, 3)) - - return grad_input, grad_weight, grad_bias - - class Conv2dGradWeight(autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation( - "aten::cudnn_convolution_backward_weight" - if not transpose - else "aten::cudnn_convolution_transpose_backward_weight" - ) - flags = [ - torch.backends.cudnn.benchmark, - torch.backends.cudnn.deterministic, - torch.backends.cudnn.allow_tf32, - ] - grad_weight = op( - weight_shape, - grad_output, - input, - padding, - stride, - dilation, - groups, - *flags, - ) - ctx.save_for_backward(grad_output, input) - - return grad_weight - - @staticmethod - def backward(ctx, grad_grad_weight): - grad_output, input = ctx.saved_tensors - grad_grad_output, grad_grad_input = None, None - - if ctx.needs_input_grad[0]: - grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) - - if ctx.needs_input_grad[1]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, grad_grad_weight, None) - - return grad_grad_output, grad_grad_input - - conv2d_gradfix_cache[key] = Conv2d - - return Conv2d diff --git a/spaces/ecarbo/paddleOCR-demo/app.py b/spaces/ecarbo/paddleOCR-demo/app.py deleted file mode 100644 index 9d60fffff6a32047e71d7dc7d15046a56f945dee..0000000000000000000000000000000000000000 --- a/spaces/ecarbo/paddleOCR-demo/app.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -os.system('pip install paddlepaddle') -os.system('pip install paddleocr') -from paddleocr import PaddleOCR, draw_ocr -from PIL import Image -import gradio as gr -import torch - -torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg') - -def inference(img, lang): - ocr = PaddleOCR(use_angle_cls=True, lang=lang,use_gpu=False) - img_path = img.name - result = ocr.ocr(img_path, cls=True) - image = Image.open(img_path).convert('RGB') - boxes = [line[0] for line in result] - txts = [line[1][0] for line in result] - scores = [line[1][1] for line in result] - im_show = draw_ocr(image, boxes, txts, scores, font_path='Roboto-Light.ttf') - im_show = Image.fromarray(im_show) - im_show.save('result.jpg') - return 'result.jpg', result[0][1][0] - - -title = 'PaddleOCR' -description = 'Gradio demo for PaddleOCR. PaddleOCR demo supports Chinese, English, French, German, Korean and Japanese.To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.' -article = "<p style='text-align: center'><a href='https://www.paddlepaddle.org.cn/hub/scene/ocr'>Awesome multilingual OCR toolkits based on PaddlePaddle (practical ultra lightweight OCR system, support 80+ languages recognition, provide data annotation and synthesis tools, support training and deployment among server, mobile, embedded and IoT devices)</a> | <a href='https://github.com/PaddlePaddle/PaddleOCR'>Github Repo</a></p>" -examples = [['example.jpg','en']] -css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}" -gr.Interface( - inference, - [gr.inputs.Image(type='file', label='Input'),gr.inputs.Dropdown(choices=['ch', 'en', 'fr', 'german', 'korean', 'japan'], type="value", default='en', label='language')], - [gr.outputs.Image(type='file', label='Output'), gr.outputs.Textbox(type='str', label='Prediction')], - title=title, - description=description, - article=article, - examples=examples, - css=css, - enable_queue=True - ).launch(debug=True) diff --git a/spaces/eldobbins/coral-spawning-detector/README.md b/spaces/eldobbins/coral-spawning-detector/README.md deleted file mode 100644 index 8e2c0f7c953ae10ce02bd7bf33c95b3ab2b17311..0000000000000000000000000000000000000000 --- a/spaces/eldobbins/coral-spawning-detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Coral Spawning Detector -emoji: 👀 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: cc-by-nd-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/elkraken/Video-Object-Detection/deploy/triton-inference-server/labels.py b/spaces/elkraken/Video-Object-Detection/deploy/triton-inference-server/labels.py deleted file mode 100644 index ba6c5c516fcd1149233f34d73bb46d472a2bfed4..0000000000000000000000000000000000000000 --- a/spaces/elkraken/Video-Object-Detection/deploy/triton-inference-server/labels.py +++ /dev/null @@ -1,83 +0,0 @@ -from enum import Enum - -class COCOLabels(Enum): - PERSON = 0 - BICYCLE = 1 - CAR = 2 - MOTORBIKE = 3 - AEROPLANE = 4 - BUS = 5 - TRAIN = 6 - TRUCK = 7 - BOAT = 8 - TRAFFIC_LIGHT = 9 - FIRE_HYDRANT = 10 - STOP_SIGN = 11 - PARKING_METER = 12 - BENCH = 13 - BIRD = 14 - CAT = 15 - DOG = 16 - HORSE = 17 - SHEEP = 18 - COW = 19 - ELEPHANT = 20 - BEAR = 21 - ZEBRA = 22 - GIRAFFE = 23 - BACKPACK = 24 - UMBRELLA = 25 - HANDBAG = 26 - TIE = 27 - SUITCASE = 28 - FRISBEE = 29 - SKIS = 30 - SNOWBOARD = 31 - SPORTS_BALL = 32 - KITE = 33 - BASEBALL_BAT = 34 - BASEBALL_GLOVE = 35 - SKATEBOARD = 36 - SURFBOARD = 37 - TENNIS_RACKET = 38 - BOTTLE = 39 - WINE_GLASS = 40 - CUP = 41 - FORK = 42 - KNIFE = 43 - SPOON = 44 - BOWL = 45 - BANANA = 46 - APPLE = 47 - SANDWICH = 48 - ORANGE = 49 - BROCCOLI = 50 - CARROT = 51 - HOT_DOG = 52 - PIZZA = 53 - DONUT = 54 - CAKE = 55 - CHAIR = 56 - SOFA = 57 - POTTEDPLANT = 58 - BED = 59 - DININGTABLE = 60 - TOILET = 61 - TVMONITOR = 62 - LAPTOP = 63 - MOUSE = 64 - REMOTE = 65 - KEYBOARD = 66 - CELL_PHONE = 67 - MICROWAVE = 68 - OVEN = 69 - TOASTER = 70 - SINK = 71 - REFRIGERATOR = 72 - BOOK = 73 - CLOCK = 74 - VASE = 75 - SCISSORS = 76 - TEDDY_BEAR = 77 - HAIR_DRIER = 78 - TOOTHBRUSH = 79 diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/build_tokenizer_chinese.py b/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/build_tokenizer_chinese.py deleted file mode 100644 index 091a6bc30041defe4a42c84e5b56c8e095ccd64b..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/build_tokenizer_chinese.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -merge 是干嘛的? - -## 结果 - -共merge 4357 个 token -""" - -import json -from tokenizers import Tokenizer -from data_sample.oov_base import jd_vocab_tokens -from zhon.hanzi import punctuation as zh_punc - -def load_base_tokenizer(vocab_path): - data = json.load(open(vocab_path, "r", encoding="utf-8")) - tokenizer = Tokenizer.from_file(vocab_path) - print("vocab_size with added_tokens:", ) - return data, tokenizer - -data, base_tokenizer = load_base_tokenizer("../gpt_nexo_20b/20B_tokenizer.json") -vocab = data["model"]["vocab"] -merges = data["model"]["merges"] -vocab_size = base_tokenizer.get_vocab_size(with_added_tokens=True) - - -""" -方式一:原有的added_tokens保持id不变。方式二:原有的added_tokens进行id移位。 -以下采用方式一。 -""" -new_added_tokens = {} -for word in jd_vocab_tokens + list(zh_punc): - if len(word) > 1 or word in new_added_tokens: - continue - encoding = base_tokenizer.encode(word) - # if len(encoding.ids) > 1: - if len(encoding.ids) == 2: # 3个的,怎么处理? - tokens = [base_tokenizer.id_to_token(token_id) for token_id in encoding.ids] - # print("merging", vocab_size, word, json.dumps(tokens)) - vocab["".join(tokens)] = vocab_size - new_added_tokens[word] = vocab_size - vocab_size += 1 - merges.append(" ".join(tokens)) - - - -print("共merge %d 个 token" % (len(new_added_tokens))) - -with open("20B_tokenizer_chinese.json", "w", encoding="utf-8") as f_out: - json.dump(data, f_out, indent=2) - -## check -tokenizer = Tokenizer.from_file("20B_tokenizer_chinese.json") -all_error_ids = [] -for word, idx in new_added_tokens.items(): - decode_str = tokenizer.decode([idx]) - if word != decode_str: - all_error_ids.append(idx) - print(idx, word, decode_str) - -print(all_error_ids) - diff --git a/spaces/exbert-project/exbert/client/src/ts/etc/apiHelpers.ts b/spaces/exbert-project/exbert/client/src/ts/etc/apiHelpers.ts deleted file mode 100644 index 7dd0220eeb308a3cc13fb762988bf85ff9e7e442..0000000000000000000000000000000000000000 --- a/spaces/exbert-project/exbert/client/src/ts/etc/apiHelpers.ts +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Convert a JS object into GET URL parameters - * - * @param base Base URL atop which to add GET parameters - * @param params Object to insert into a URL string - */ -export function makeUrl(base: string, params?: object):string { - if (params){ - let out: string = base + "?"; - - Object.keys(params).forEach( k => { - out += k; - out += '='; - out += params[k]; - out += "&"; - }) - return out.replace(/&$/g, ""); - } - else { - return base; - } -}; - -/** - * Convert information in GET request into the message for a POST request - */ -export const toPayload = (toSend) => {return { - method:"POST", - body:JSON.stringify(toSend), - headers: { - "Content-type": "application/json; charset=UTF-8" - } -}} \ No newline at end of file diff --git a/spaces/ezioruan/roop/roop/typing.py b/spaces/ezioruan/roop/roop/typing.py deleted file mode 100644 index 1cff7440616e20bfe7b8bc287f86d11bf1b0f083..0000000000000000000000000000000000000000 --- a/spaces/ezioruan/roop/roop/typing.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Any - -from insightface.app.common import Face -import numpy - -Face = Face -Frame = numpy.ndarray[Any, Any] diff --git a/spaces/facebook/MusicGen/audiocraft/utils/__init__.py b/spaces/facebook/MusicGen/audiocraft/utils/__init__.py deleted file mode 100644 index 75e25a0212f98e4a18d97c86c6cda225636a3215..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/utils/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Utilities.""" diff --git a/spaces/falterWliame/Face_Mask_Detection/Luxor 5th Passage TOP Download Low Pc.md b/spaces/falterWliame/Face_Mask_Detection/Luxor 5th Passage TOP Download Low Pc.md deleted file mode 100644 index 87d94205640611d552e820bfdbbb631d6eab990f..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Luxor 5th Passage TOP Download Low Pc.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Luxor: 5th Passage download low pc</h2><br /><p><b><b>Download</b> 🆓 <a href="https://urlca.com/2uDdMG">https://urlca.com/2uDdMG</a></b></p><br /><br /> - -Luxor 5th Passage [Download | Purchase high quality products online with Ubuy Mauritius with best possible prices. We are best online shopping website in ... 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/fatiXbelha/sd/Call of Duty Mod APK Download How to Get Unlimited Money in COD Mobile.md b/spaces/fatiXbelha/sd/Call of Duty Mod APK Download How to Get Unlimited Money in COD Mobile.md deleted file mode 100644 index 2636a12f8ce97e5ec496cef9c80ec7cfdf9062c2..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Call of Duty Mod APK Download How to Get Unlimited Money in COD Mobile.md +++ /dev/null @@ -1,128 +0,0 @@ - -<h1>How to Download Call of Duty Mobile Mod Apk</h1> -<p>Call of Duty Mobile is one of the most popular and exciting first-person shooter games on mobile devices. It offers a variety of game modes, maps, weapons, and operators that you can enjoy with your friends or other players online. However, if you want to unlock more features and have an edge over your opponents, you might want to try the Call of Duty Mobile mod apk. This is a modified version of the game that gives you access to unlimited resources, unlocked items, and other advantages. In this article, we will show you how to download and install the Call of Duty Mobile mod apk on your Android device. We will also explain how to update and uninstall the mod apk if you need to.</p> -<h2>how to download call of duty mobile mod apk</h2><br /><p><b><b>Download</b> ····· <a href="https://urllie.com/2uNvNU">https://urllie.com/2uNvNU</a></b></p><br /><br /> - <h2>What is Call of Duty Mobile?</h2> -<p>Call of Duty Mobile is a free-to-play mobile game developed by Activision and Tencent Games. It is based on the popular Call of Duty franchise that has been around since 2003. The game features multiplayer modes such as Team Deathmatch, Domination, and Kill-Confirmed on iconic maps such as Shipment, Raid, and Standoff. It also has a 100-player battle royale mode where you can fight for survival on a large map with vehicles, weapons, and loot. You can customize your loadout with different operators, weapons, outfits, scorestreaks, and gear that you can unlock and earn as you play. The game also has seasonal content that adds new game modes, maps, events, and rewards every month.</p> - <h3>Features of Call of Duty Mobile</h3> -<p>Some of the main features of Call of Duty Mobile are:</p> -<ul> -<li>Console quality HD graphics and sound</li> -<li>Customizable and intuitive controls</li> -<li>Voice and text chat with your friends</li> -<li>Thrilling multiplayer and battle royale modes</li> -<li>Iconic maps from Call of Duty: Black Ops and Modern Warfare</li> -<li>Diverse operators, weapons, outfits, scorestreaks, and gear</li> -<li>New seasonal content every month</li> -</ul> - <h3>Benefits of Call of Duty Mobile Mod Apk</h3> -<p>The Call of Duty Mobile mod apk is a modified version of the game that gives you some extra benefits that are not available in the original version. Some of the benefits are:</p> -<ul> -<li>Unlimited resources such as coins, credits, CP, and gems</li> -<li>Unlocked items such as operators, weapons, outfits, scorestreaks, and gear</li> -<li>No ads or pop-ups</li> -<li>No root or jailbreak required</li> -<li>No ban or detection risk</li> -<li>Easy to download and install</li> -</ul> - <h2>How to Download and Install Call of Duty Mobile Mod Apk</h2> -<p>If you want to download and install the Call of Duty Mobile mod apk on your Android device, you need to follow these steps:</p> - <h3>Step 1: Enable Unknown Sources</h3> -<p>Before you can install any mod apk file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this:</p> -<p>How to get call of duty mobile season 5 mod apk for free<br /> -How to install call of duty mobile unlocked mod apk on android<br /> -How to download call of duty mobile mod apk with happymod<br /> -How to update call of duty mobile mod apk to latest version<br /> -How to play call of duty mobile mod apk offline<br /> -How to download call of duty mobile mod apk with unlimited money<br /> -How to fix call of duty mobile mod apk not working<br /> -How to download call of duty mobile mod apk on pc<br /> -How to download call of duty mobile mod apk with obb file<br /> -How to download call of duty mobile mod apk from google drive<br /> -How to download call of duty mobile mod apk without root<br /> -How to download call of duty mobile mod apk with aimbot<br /> -How to download call of duty mobile mod apk for ios<br /> -How to download call of duty mobile mod apk with anti ban<br /> -How to download call of duty mobile mod apk with all operators<br /> -How to download call of duty mobile mod apk with zombies mode<br /> -How to download call of duty mobile mod apk with custom maps<br /> -How to download call of duty mobile mod apk with voice chat<br /> -How to download call of duty mobile mod apk with hd graphics<br /> -How to download call of duty mobile mod apk with multiplayer mode<br /> -How to download call of duty mobile mod apk with battle royale mode<br /> -How to download call of duty mobile mod apk with clan wars feature<br /> -How to download call of duty mobile mod apk with new weapons and skins<br /> -How to download call of duty mobile mod apk with no ads<br /> -How to download call of duty mobile mod apk with fast download speed<br /> -How to verify call of duty mobile mod apk before downloading<br /> -How to backup and restore call of duty mobile mod apk data<br /> -How to uninstall and reinstall call of duty mobile mod apk safely<br /> -How to share call of duty mobile mod apk with friends<br /> -How to hack call of duty mobile mod apk using game guardian<br /> -How to change language in call of duty mobile mod apk<br /> -How to change server in call of duty mobile mod apk<br /> -How to change controls in call of duty mobile mod apk<br /> -How to change sensitivity in call of duty mobile mod apk<br /> -How to change graphics settings in call of duty mobile mod apk<br /> -How to optimize performance in call of duty mobile mod apk<br /> -How to improve battery life in call of duty mobile mod apk<br /> -How to reduce lag and ping in call of duty mobile mod apk<br /> -How to report bugs and glitches in call of duty mobile mod apk<br /> -How to request new features and updates in call of duty mobile mod apk</p> -<ol> -<li>Go to your device's settings.</li> -<li>Tap on security or privacy.</li> -<li>Find and enable unknown sources or allow installation of apps from unknown sources.</li> -</ol> - <h3>Step 2: Download the Mod Apk File</h3> -<p>Next, you need to download the mod apk file of Call of Duty Mobile from a reliable source. There are many websites that offer mod apk files, but some of them may contain viruses or malware that can harm your device. Therefore, you should be careful and only download from trusted sources. One of the websites that we recommend is HappyMod, which is a platform that provides modded apps and games for Android users. To download the mod apk file from HappyMod:</p> -<ol> -<li>Go to the HappyMod website using your browser.</li> -<li>Search for Call of Duty Mobile in the search bar.</li> -<li>Select the latest version of the mod apk file that has the features you want.</li> -<li>Tap on the download button and wait for the file to be downloaded.</li> -</ol> - <h3>Step 3: Install the Mod Apk File</h3> -<p>Once you have downloaded the mod apk file, you need to install it on your device. To do this:</p> -<ol> -<li>Locate the mod apk file in your device's storage using a file manager app.</li> -<li>Tap on the mod apk file and select install.</li> -<li>Wait for the installation process to finish.</li> -</ol> - <h3>Step 4: Launch the Game and Enjoy</h3> -<p>After installing the mod apk file, you can launch the game and enjoy the benefits of the modded version. You will see that you have unlimited resources, unlocked items, and other advantages. You can also play with your friends or other players online without any problems. However, you should be careful not to abuse the mod features or use them in a way that can ruin the game experience for others. Otherwise, you may face some consequences such as being reported or banned by the game developers.</p> - <h2>How to Update Call of Duty Mobile Mod Apk</h2> -<p>If you want to update your Call of Duty Mobile mod apk to the latest version, you have two options:</p> - <h3>Method 1: Use HappyMod App</h3> -<p>The easiest way to update your mod apk is to use the HappyMod app, which is an app store that provides modded apps and games for Android users. You can download and install the HappyMod app from its official website or from other sources. Once you have the HappyMod app on your device, you can use it to update your Call of Duty Mobile mod apk. To do this:</p> -<ol> -<li>Open the HappyMod app on your device.</li> -<li>Go to the My Games section and find Call of Duty Mobile.</li> -<li>If there is a new version available, you will see an update button next to it.</li> -<li>Tap on the update button and wait for the new version to be downloaded and installed.</li> -</ol> - <h3>Method 2: Check for Updates Manually</h3> -<p>The other way to update your mod apk is to check for updates manually from the website where you downloaded it. You can visit the website regularly and see if there is a new version of the mod apk file available. If there is, you can download and install it following the same steps as before. However, this method may be more time-consuming and less convenient than using the HappyMod app.</p> - <h2>How to Uninstall Call of Duty Mobile Mod Apk</h2> -<p>If you want to uninstall your Call of Duty Mobile mod apk from your device, you can follow these steps:</p> - <h3>Step 1: Go to Settings</h3> -<p>Go to your device's settings and find the apps or applications section.</p> - <h3>Step 2: Find and Tap on Call of Duty Mobile</h3> -<p>Find and tap on Call of Duty Mobile in the list of apps installed on your device.</p> - <h3>Step 3: Tap on Uninstall and Confirm</h3> -<p>Tap on uninstall and confirm your action. This will remove the mod apk file from your device.</p> - <h2>Conclusion</h2> -<p>In this article, we have shown you how to download and install Call of Duty Mobile mod apk on your Android device. We have also explained how to update and uninstall the mod apk if you need to. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming! <h2>FAQs</h2> -<p>Here are some frequently asked questions about Call of Duty Mobile mod apk:</p> - <h3>Q: Is Call of Duty Mobile mod apk safe to use?</h3> -<p>A: Yes, Call of Duty Mobile mod apk is safe to use as long as you download it from a reliable source and follow the instructions carefully. However, you should be aware that using mod apk files may violate the terms and conditions of the game and may result in some risks such as being banned or detected by the game developers. Therefore, you should use the mod apk at your own discretion and responsibility.</p> - <h3>Q: Can I play Call of Duty Mobile mod apk with my friends?</h3> -<p>A: Yes, you can play Call of Duty Mobile mod apk with your friends or other players online without any problems. However, you should be careful not to abuse the mod features or use them in a way that can ruin the game experience for others. Otherwise, you may face some consequences such as being reported or banned by the game developers.</p> - <h3>Q: Do I need to root or jailbreak my device to use Call of Duty Mobile mod apk?</h3> -<p>A: No, you do not need to root or jailbreak your device to use Call of Duty Mobile mod apk. You just need to enable unknown sources in your settings and install the mod apk file on your device.</p> - <h3>Q: How can I get more resources, items, and features in Call of Duty Mobile mod apk?</h3> -<p>A: You can get more resources, items, and features in Call of Duty Mobile mod apk by downloading the latest version of the mod apk file that has the features you want. You can also use the HappyMod app to update your mod apk easily and quickly.</p> - <h3>Q: How can I contact the developer of Call of Duty Mobile mod apk?</h3> -<p>A: You can contact the developer of Call of Duty Mobile mod apk by visiting their website or social media pages. You can also leave a comment or feedback on their website or app store page.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Chess Download APK Android Play Online with Millions of Players.md b/spaces/fatiXbelha/sd/Chess Download APK Android Play Online with Millions of Players.md deleted file mode 100644 index 8d85b98c9d5bd5f07706fdfee7d28807437aba4e..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Chess Download APK Android Play Online with Millions of Players.md +++ /dev/null @@ -1,167 +0,0 @@ - -<h1>Chess Download APK Android: How to Play Chess on Your Phone</h1> -<p>Chess is one of the oldest and most popular board games in the world. It is a game of strategy, logic, and skill that can challenge your mind and improve your cognitive abilities. Whether you are a beginner or a master, chess can offer you endless hours of fun and entertainment.</p> -<h2>chess download apk android</h2><br /><p><b><b>Download</b> ->>> <a href="https://urllie.com/2uNwoq">https://urllie.com/2uNwoq</a></b></p><br /><br /> -<p>But what if you don't have a chess board or a partner to play with? Don't worry, you can still enjoy chess on your phone. Thanks to technology, there are many chess apps for android that you can download and play anytime, anywhere. You can play chess online with millions of players from around the world, or offline with your friends or against the computer. You can also learn new skills, solve puzzles, take lessons, and join tournaments.</p> -<p>In this article, we will show you how to download chess apk android from different sources. We will also review three of the best chess apps for android that you can try today. Let's get started!</p> - <h2>Chess.com: The Best Chess App for Android</h2> -<p>If you are looking for the ultimate chess app for android, look no further than <a href="(^1^)">Chess.com</a>. Chess.com is the most popular chess website in the world, with over 130 million members and 5 million games played every day. It has everything you need to enjoy chess on your phone.</p> -<p>chess game download apk for android<br /> -chess online apk free download android<br /> -chess app apk download for android<br /> -chess master apk download android<br /> -chess 3d apk download android<br /> -chess tactics apk download android<br /> -chess lessons apk download android<br /> -chess puzzles apk download android<br /> -chess offline apk download android<br /> -chess with friends apk download android<br /> -chess strategy apk download android<br /> -chess analysis apk download android<br /> -chess clock apk download android<br /> -chess trainer apk download android<br /> -chess engine apk download android<br /> -chess notation apk download android<br /> -chess variants apk download android<br /> -chess board apk download android<br /> -chess simulator apk download android<br /> -chess editor apk download android<br /> -chess beginner apk download android<br /> -chess advanced apk download android<br /> -chess pro apk download android<br /> -chess genius apk download android<br /> -chess fun apk download android<br /> -chess challenge apk download android<br /> -chess tournament apk download android<br /> -chess rating apk download android<br /> -chess history apk download android<br /> -chess rules apk download android<br /> -chess tips apk download android<br /> -chess guide apk download android<br /> -chess news apk download android<br /> -chess books apk download android<br /> -chess videos apk download android<br /> -chess themes apk download android<br /> -chess sounds apk download android<br /> -chess wallpaper apk download android<br /> -chess widget apk download android<br /> -chess icon pack apk download android<br /> -chess launcher apk download android<br /> -chess keyboard apk download android<br /> -chess emoji apk download android<br /> -chess stickers apk download android<br /> -chess live wallpaper apk download android<br /> -chess lock screen apk download android<br /> -chess camera apk download android<br /> -chess photo editor apk download android<br /> -chess voice assistant apk download android</p> -<p>Some of the features and advantages of Chess.com app are:</p> -<ul> -<li>It is free to download and use.</li> -<li>It has a beautiful and user-friendly interface.</li> -<li>It has hundreds of themes and customizations for your board and pieces.</li> -<li>It has over 350,000 tactics puzzles and 1,500 lessons to help you improve your chess skills.</li> -<li>It has a variety of game modes, such as blitz, bullet, rapid, daily, live, casual, rated, and more.</li> -<li>It has a social aspect, where you can chat with other players, join clubs, follow streamers, and watch live events.</li> -</ul> -<p>To install and use Chess.com app on your phone, follow these steps:</p> -<ol> -<li>Go to <a href="(^1^)">Chess.com</a> or search for "Chess.com" on Google Play Store.</li> -<li>Tap on "Install" and wait for the app to download.</li> -<li>Open the app and sign up with your email or Facebook account.</li> -<li>Select your skill level and preferences.</li> -<li>Start playing chess!</li> -</ol> -<p>To play chess on Chess.com app, you have several options:</p> -<ul> -<li>To play online with other players, tap on "Play Online" and choose your game mode, time control, rating range, color preference, etc. Then tap on "Play" and wait for an opponent.</li> -<li>To play with your friends, tap on "Play Friends" and choose whether you want to play by username, email address, or QR code. Then invite your friend or accept their invitation.</li> -<li>To play against the computer, tap on "vs Computer" and choose your difficulty level, color preference , etc. Then tap on "Play" and start the game.</li> -<li>To play in tournaments, tap on "Tournaments" and choose a tournament that suits your level, time control, format, etc. Then tap on "Join" and wait for the tournament to start.</li> -</ul> - <h2>Chess APK: A Free and Simple Chess Game for Android</h2> -<p>If you are looking for a free and simple chess game for android, you might want to try <a href="">Chess APK</a>. Chess APK is a lightweight and minimalist chess game that you can play offline or online. It has a basic and easy-to-use interface that lets you focus on the game.</p> -<p>Some of the features and advantages of Chess APK game are:</p> -<ul> -<li>It is free to download and use.</li> -<li>It has a small size and does not require much storage space or internet connection.</li> -<li>It has a simple and elegant design that does not distract you from the game.</li> -<li>It has three levels of difficulty: easy, medium, and hard.</li> -<li>It has an undo option that lets you take back your moves.</li> -<li>It has a hint option that gives you suggestions for your next move.</li> -</ul> -<p>To install and use Chess APK game on your phone, follow these steps:</p> -<ol> -<li>Go to <a href="">Chess APK</a> or search for "Chess APK" on Google Play Store.</li> -<li>Tap on "Install" and wait for the game to download.</li> -<li>Open the game and choose whether you want to play offline or online.</li> -<li>Select your difficulty level and color preference.</li> -<li>Start playing chess!</li> -</ol> -<p>To play chess on Chess APK game, you have two options:</p> -<ul> -<li>To play offline, tap on "Offline" and choose your difficulty level and color preference. Then tap on "Start" and start the game. You can also tap on "Options" to access the undo, hint, sound, and reset options.</li> -<li>To play online, tap on "Online" and choose whether you want to create a room or join a room. Then enter a room name or code and wait for another player to join. You can also chat with your opponent during the game.</li> -</ul> - <h2>Chess for Android: A Versatile Chess Application for Android</h2> -<p>If you are looking for a versatile chess application for android, you might want to check out <a href="">Chess for Android</a>. Chess for Android is a comprehensive chess app that supports both online and offline play. It also supports third-party chess engines, electronic boards, and online servers.</p> -<p>Some of the features and advantages of Chess for Android app are:</p> -<ul> -<li>It is free to download and use.</li> -<li>It has a clean and intuitive interface that allows you to customize your board size, colors, notation, etc.</li> -<li>It has a built-in chess engine that can play at various levels of strength.</li> -<li>It supports third-party chess engines such as Stockfish, Komodo, Houdini, etc. that you can download and install separately.</li> -<li>It supports electronic boards such as DGT Bluetooth e-Board, Millennium ChessGenius Exclusive Chess Computer, etc. that you can connect via Bluetooth or USB.</li> -<li>It supports online servers such as FICS (Free Internet Chess Server) and ICC (Internet Chess Club) that you can log in and play with other players or watch live games.</li> -</ul> -<p>To install and use Chess for Android app on your phone, follow these steps:</p> -<ol> -<li>Go to <a href="">Chess for Android</a> or search for "Chess for Android" on Google Play Store.</li> -<li>Tap on "Install" and wait for the app to download.</li> -<li>Open the app and choose whether you want to play offline or online.</li> -<li>Select your board size, colors, notation, etc. from the menu options.</li> -<li>Start playing chess!</li> -</ol> - <h2>Conclusion</h2> - <p>In this article, we have shown you how to download chess apk android from different sources. We have also reviewed three of the best chess apps for android that you can try today: Chess.com, Chess APK, and Chess for Android. Each of these apps has its own features and advantages that suit different preferences and needs. Here is a table that summarizes their main differences:</p> - <table border="1"> -<tr><th></th><th>Chess.com</th><th>Chess APK</th><th>Chess for Android</th></tr> -<tr ><td>Size</td><td>Varies with device</td><td>2.5 MB</td><td>1.1 MB</td></tr> -<tr><td>Online play</td><td>Yes</td><td>Yes</td><td>Yes</td></tr> -<tr><td>Offline play</td><td>Yes</td><td>Yes</td><td>Yes</td></tr> -<tr><td>Puzzles and lessons</td><td>Yes</td><td>No</td><td>No</td></tr> -<tr><td>Game modes and options</td><td>Many</td><td>Few</td><td>Few</td></tr> -<tr><td>Social and live features</td><td>Many</td><td>No</td><td>No</td></tr> -<tr><td>Third-party engines support</td><td>No</td><td>No</td><td>Yes</td></tr> -<tr><td>Electronic boards support</td><td>No</td><td>No</td><td>Yes</t d></td></tr> -<tr><td>Online servers support</td><td>No</td><td>No</td><td>Yes</td></tr> -</table> - <p>So, which chess app for android is the best for you? That depends on your personal preference and goal. If you want a comprehensive and social chess app that offers you many features and options, you might want to go with Chess.com. If you want a simple and lightweight chess game that you can play offline or online, you might want to try Chess APK. If you want a versatile and advanced chess app that supports third-party engines, electronic boards, and online servers, you might want to check out Chess for Android.</p> -<p>Whatever you choose, we hope you have fun and learn something new from playing chess on your phone. Chess is a wonderful game that can enrich your mind and your life. So, what are you waiting for? Download chess apk android today and start playing!</p> - <h2>Frequently Asked Questions</h2> -<p>Here are some of the frequently asked questions about chess download apk android:</p> -<h3>What is an APK file?</h3> -<p>An APK file is an Android Package file that contains the installation files for an Android app. You can download APK files from various sources, such as Google Play Store, third-party websites, or directly from the app developers. To install an APK file on your phone, you need to enable the "Unknown sources" option in your settings and follow the instructions on the screen.</p> -<h3>Is it safe to download chess apk android from third-party websites?</h3> -<p>It depends on the source and the file. Some third-party websites may offer malicious or infected APK files that can harm your phone or steal your data. Therefore, you should always be careful and cautious when downloading APK files from unknown sources. You should also scan the file with an antivirus software before installing it.</p> -<h3>How can I update my chess apk android?</h3> -<p>If you downloaded your chess apk android from Google Play Store, you can update it automatically or manually through the store. If you downloaded it from another source, you need to check the website or the app developer for any updates and download the latest version of the APK file.</p> -<h3>How can I uninstall my chess apk android?</h3> -<p>If you want to uninstall your chess apk android, you can do so by following these steps:</p> -<ol> -<li>Go to your phone settings and tap on "Apps" or "Applications".</li> -<li>Find and tap on the chess app that you want to uninstall.</li> -<li>Tap on "Uninstall" and confirm your action.</li> -<li>Wait for the app to be removed from your phone.</li> -</ol> -<h3>How can I improve my chess skills?</h3> -<p>If you want to improve your chess skills, there are many ways to do so. Here are some tips that can help you:</p> -<ul> -<li>Play regularly and practice with different opponents and levels of difficulty.</li> -<li>Solve puzzles and take lessons to learn new strategies and tactics.</li> -<li>Analyze your games and mistakes and learn from them.</li> -<li>Read books and watch videos about chess theory and history.</li> -<li>Join clubs and tournaments and interact with other chess players.</li> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Festival 2016 Nightwishs Epic Setlist and Performance.md b/spaces/fatiXbelha/sd/Download Festival 2016 Nightwishs Epic Setlist and Performance.md deleted file mode 100644 index 2171de368dbfe1aaa26f38e64f23eb25e2b5ebab..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Festival 2016 Nightwishs Epic Setlist and Performance.md +++ /dev/null @@ -1,74 +0,0 @@ - -<h1>Nightwish Download 2016 Setlist: A Review of the Symphonic Metal Giants' Performance</h1> - <p>Nightwish is a Finnish symphonic metal band that has been making waves in the music scene since 1996. They are known for their epic soundscapes, operatic vocals, and lyrical themes inspired by fantasy, mythology, and science. They have sold over nine million records worldwide and won numerous awards, including five number one albums and 13 chart-topping singles in Finland.</p> - <p>Download Festival is a British rock festival that takes place every year at Donington Park in Leicestershire. It is one of the most popular and prestigious festivals in Europe, attracting over 100,000 fans and featuring some of the biggest names in rock and metal music. In 2016, Download Festival celebrated its 14th edition with headliners Rammstein, Black Sabbath, and Iron Maiden.</p> -<h2>nightwish download 2016 setlist</h2><br /><p><b><b>DOWNLOAD</b> 🌟 <a href="https://urllie.com/2uNHdo">https://urllie.com/2uNHdo</a></b></p><br /><br /> - <p>On June 12, 2016, Nightwish took the stage at Download Festival as the main stage sub-headliners to Iron Maiden. It was their third appearance at the festival and their first with their current vocalist Floor Jansen, who joined the band in 2013. How did they perform and what songs did they play? Let's find out.</p> - <h2>How did Nightwish perform at Download Festival 2016?</h2> - <p>Nightwish delivered a stunning performance that showcased their musical prowess, versatility, and charisma. They played a 75-minute set that spanned their entire career, from their debut album Angels Fall First to their latest release Endless Forms Most Beautiful. They also featured a special guest appearance by renowned scientist and author Richard Dawkins, who narrated parts of their songs "Weak Fantasy" and "The Greatest Show on Earth".</p> - <p>The band was in top form, playing with precision, passion, and power. Jansen proved to be a worthy successor to the previous singers Tarja Turunen and Anette Olzon, displaying her impressive vocal range, emotion, and stage presence. She also interacted well with the crowd, encouraging them to sing along, clap, and jump. The other members of the band were equally impressive: Tuomas Holopainen on keyboards, Emppu Vuorinen on guitars, Marco Hietala on bass and vocals, Troy Donockley on pipes and whistles, and Kai Hahto on drums.</p> - <h2>What songs did they play and what was the reaction of the crowd?</h2> - <p>Nightwish played a total of 14 songs, covering different styles and moods. They opened with "Shudder Before the Beautiful", a bombastic anthem that set the tone for the rest of the show. They followed with "Yours Is an Empty Hope", a heavy and aggressive song that contrasted with Jansen's angelic voice. They then played "Storytime", one of their most popular songs from their 2011 album Imaginaerum, which was greeted with cheers and applause by the fans.</p> - <p>The band then slowed down with "My Walden", a folk-inspired song that featured Donockley's pipes and whistles. They continued with "Élan", a catchy and uplifting song that had the crowd singing along to the chorus. They then played "Weak Fantasy", a dark and powerful song that featured Dawkins' narration about religion and superstition.</p> - <p>The next song was "Ever Dream", a classic ballad from their 2002 album Century Child, which showcased Jansen's soaring vocals and Vuorinen's melodic guitar solo. They then played "I Want My Tears Back", a lively and fun song that had the crowd clapping and jumping. They followed with "Nemo", another ballad from their 2004 album Once, which was one of their biggest hits.</p> -<p>nightwish download festival 2016 setlist<br /> -nightwish graspop metal meeting 2016 setlist<br /> -nightwish donington park 2016 setlist<br /> -nightwish endless forms most beautiful tour 2016 setlist<br /> -nightwish boeretang dessel belgium 2016 setlist<br /> -nightwish download 2016 shudder before the beautiful<br /> -nightwish download 2016 yours is an empty hope<br /> -nightwish download 2016 storytime<br /> -nightwish download 2016 my walden<br /> -nightwish download 2016 élan<br /> -nightwish download 2016 weak fantasy<br /> -nightwish download 2016 ever dream<br /> -nightwish download 2016 i want my tears back<br /> -nightwish download 2016 nemo<br /> -nightwish download 2016 ghost love score<br /> -nightwish download 2016 last ride of the day<br /> -nightwish download 2016 the greatest show on earth<br /> -nightwish graspop 2016 sahara<br /> -nightwish graspop 2016 roll tide hans zimmer song<br /> -nightwish graspop 2016 dawkins narration<br /> -nightwish donington park castle donington england setlist<br /> -nightwish download festival sunday june 12 setlist<br /> -nightwish main stage sub headliners to iron maiden setlist<br /> -nightwish third appearance at download festival setlist<br /> -nightwish boeretang dessel belgium saturday june 18 setlist<br /> -nightwish free setlists on setlist fm website<br /> -nightwish edit this setlist more nightwish setlists link<br /> -nightwish share or embed this setlist html code link<br /> -nightwish concert photos videos reviews from fans link<br /> -nightwish live at download festival youtube video link<br /> -how to get tickets for nightwish download festival 2016<br /> -where to buy merch for nightwish download festival 2016<br /> -what to expect from nightwish download festival 2016 performance<br /> -who are the other bands playing with nightwish at download festival 2016<br /> -how to meet and greet with nightwish at download festival 2016<br /> -what are the best songs from nightwish endless forms most beautiful album<br /> -what are the lyrics and meanings of nightwish songs at download festival 2016<br /> -what are the reviews and ratings of nightwish download festival 2016 show<br /> -what are the facts and trivia about nightwish and their members <br /> -what are the influences and inspirations of nightwish music and lyrics</p> - <p>The band then played "Ghost Love Score", a 10-minute epic from their 2004 album Once, which was one of the highlights of the show. The song featured multiple sections, ranging from soft piano melodies to heavy guitar riffs, from operatic vocals to growls. The song ended with a stunning climax that had Jansen hitting a high note that lasted for several seconds, earning her a standing ovation from the crowd.</p> - <p>The band then played "Last Ride of the Day", a fast and energetic song from their 2011 album Imaginaerum, which was dedicated to the fans and the festival. They closed their set with "The Greatest Show on Earth", a 24-minute masterpiece from their 2015 album Endless Forms Most Beautiful, which was inspired by the evolution of life on Earth. The song featured Dawkins' narration again, as well as samples of animal sounds and orchestral arrangements. The song was a fitting finale for the band's performance, as it celebrated the beauty and diversity of nature and music.</p> - <h2>How did their performance compare to their previous shows and albums?</h2> - <p>Nightwish's performance at Download Festival 2016 was one of their best shows ever, according to many fans and critics. They proved that they are still one of the leading bands in the symphonic metal genre, despite the changes in their lineup and style over the years. They also showed that they are not afraid to experiment with different sounds and concepts, while staying true to their roots and identity.</p> - <p>Their performance also reflected their musical evolution and growth, as they played songs from different eras and albums. They demonstrated their versatility and diversity, as they played songs that ranged from folk to metal, from ballads to epics, from simple to complex. They also displayed their cohesion and chemistry, as they played with passion and harmony.</p> - <h2>Conclusion</h2> - <p>Nightwish's performance at Download Festival 2016 was a memorable and impressive show that delighted the fans and impressed the critics. They played a setlist that covered their entire career, from their debut album to their latest release. They also featured a special guest appearance by Richard Dawkins, who added an extra dimension to their songs. They played with skill, energy, and emotion, and delivered a stunning performance that showcased their musical prowess, versatility, and charisma.</p> - <p>If you are a fan of Nightwish or symphonic metal in general, you should definitely check out their performance at Download Festival 2016. You can watch it on YouTube or buy the DVD or Blu-ray of their live album Vehicle of Spirit, which includes their show at Download Festival 2016 as well as their show at Wembley Arena in London. You will not regret it.</p> - <h2>FAQs</h2> - <h3>Who is the current singer of Nightwish?</h3> -<p>The current singer of Nightwish is Floor Jansen, who joined the band in 2013. She is a Dutch singer who has also sung for other bands such as After Forever and ReVamp. She is known for her powerful and versatile voice, which can sing in different styles and languages.</p> - <h3>What is the meaning of the name Nightwish?</h3> -<p>The name Nightwish was chosen by Tuomas Holopainen, the founder and keyboardist of the band. He said that he got the idea from a song called "Nightwish" by a Finnish band called Nattvindens Gråt. He liked the sound and the feeling of the word, and thought it suited his band's style and vision.</p> - <h3>What is symphonic metal?</h3> -<p>Symphonic metal is a subgenre of metal music that combines elements of classical music, such as orchestral arrangements, choirs, operatic vocals, and keyboards, with elements of heavy metal, such as distorted guitars, drums, bass, and growls. Symphonic metal often features lyrical themes inspired by fantasy, mythology, literature, history, and science.</p> - <h3>What are some other bands that play symphonic metal?</h3> -<p>Some other bands that play symphonic metal are Epica, Within Temptation, Therion, Kamelot, Delain, Sonata Arctica, Rhapsody of Fire, Apocalyptica, Dimmu Borgir, and Amaranthe.</p> - <h3>When will Nightwish release their next album?</h3> -<p>Nightwish has not announced any official plans for their next album yet. However, they have said that they are working on new material and that they hope to release it in 2021 or 2022. They have also said that they will tour again once the COVID-19 pandemic is over.</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" deleted file mode 100644 index 26e61b1b3032c180b4cb59625eba00d5f7b7c441..0000000000000000000000000000000000000000 --- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\225\260\345\255\246\345\212\250\347\224\273\347\224\237\346\210\220manim.py" +++ /dev/null @@ -1,187 +0,0 @@ -from toolbox import CatchException, update_ui, gen_time_str -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import input_clipping - -def inspect_dependency(chatbot, history): - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import manim - return True - except: - chatbot.append(["导入依赖失败", "使用该模块需要额外依赖,安装方法:```pip install manim manimgl```"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return False - -def eval_manim(code): - import subprocess, sys, os, shutil - - with open('gpt_log/MyAnimation.py', 'w', encoding='utf8') as f: - f.write(code) - - def get_class_name(class_string): - import re - # Use regex to extract the class name - class_name = re.search(r'class (\w+)\(', class_string).group(1) - return class_name - - class_name = get_class_name(code) - - try: - subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"]) - shutil.move('media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{gen_time_str()}.mp4') - return f'gpt_log/{gen_time_str()}.mp4' - except subprocess.CalledProcessError as e: - output = e.output.decode() - print(f"Command returned non-zero exit status {e.returncode}: {output}.") - return f"Evaluating python script failed: {e.output}." - except: - print('generating mp4 failed') - return "Generating mp4 failed." - - -def get_code_block(reply): - import re - pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks - matches = re.findall(pattern, reply) # find all code blocks in text - if len(matches) != 1: - raise RuntimeError("GPT is not generating proper code.") - return matches[0].strip('python') # code block - -@CatchException -def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - # 清空历史,以免输入溢出 - history = [] - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "生成数学动画, 此插件处于开发阶段, 建议暂时不要使用, 作者: binary-husky, 插件初始化中 ..." - ]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖, 如果缺少依赖, 则给出安装建议 - dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面 - if not dep_ok: return - - # 输入 - i_say = f'Generate a animation to show: ' + txt - demo = ["Here is some examples of manim", examples_of_manim()] - _, demo = input_clipping(inputs="", history=demo, max_token_limit=2560) - # 开始 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo, - sys_prompt= - r"Write a animation script with 3blue1brown's manim. "+ - r"Please begin with `from manim import *`. " + - r"Answer me with a code block wrapped by ```." - ) - chatbot.append(["开始生成动画", "..."]) - history.extend([i_say, gpt_say]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - # 将代码转为动画 - code = get_code_block(gpt_say) - res = eval_manim(code) - - chatbot.append(("生成的视频文件路径", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - -# 在这里放一些网上搜集的demo,辅助gpt生成代码 -def examples_of_manim(): - return r""" - - -``` - -class MovingGroupToDestination(Scene): - def construct(self): - group = VGroup(Dot(LEFT), Dot(ORIGIN), Dot(RIGHT, color=RED), Dot(2 * RIGHT)).scale(1.4) - dest = Dot([4, 3, 0], color=YELLOW) - self.add(group, dest) - self.play(group.animate.shift(dest.get_center() - group[2].get_center())) - self.wait(0.5) - -``` - - -``` - -class LatexWithMovingFramebox(Scene): - def construct(self): - text=MathTex( - "\\frac{d}{dx}f(x)g(x)=","f(x)\\frac{d}{dx}g(x)","+", - "g(x)\\frac{d}{dx}f(x)" - ) - self.play(Write(text)) - framebox1 = SurroundingRectangle(text[1], buff = .1) - framebox2 = SurroundingRectangle(text[3], buff = .1) - self.play( - Create(framebox1), - ) - self.wait() - self.play( - ReplacementTransform(framebox1,framebox2), - ) - self.wait() - -``` - - - -``` - -class PointWithTrace(Scene): - def construct(self): - path = VMobject() - dot = Dot() - path.set_points_as_corners([dot.get_center(), dot.get_center()]) - def update_path(path): - previous_path = path.copy() - previous_path.add_points_as_corners([dot.get_center()]) - path.become(previous_path) - path.add_updater(update_path) - self.add(path, dot) - self.play(Rotating(dot, radians=PI, about_point=RIGHT, run_time=2)) - self.wait() - self.play(dot.animate.shift(UP)) - self.play(dot.animate.shift(LEFT)) - self.wait() - -``` - -``` - -# do not use get_graph, this funciton is deprecated - -class ExampleFunctionGraph(Scene): - def construct(self): - cos_func = FunctionGraph( - lambda t: np.cos(t) + 0.5 * np.cos(7 * t) + (1 / 7) * np.cos(14 * t), - color=RED, - ) - - sin_func_1 = FunctionGraph( - lambda t: np.sin(t) + 0.5 * np.sin(7 * t) + (1 / 7) * np.sin(14 * t), - color=BLUE, - ) - - sin_func_2 = FunctionGraph( - lambda t: np.sin(t) + 0.5 * np.sin(7 * t) + (1 / 7) * np.sin(14 * t), - x_range=[-4, 4], - color=GREEN, - ).move_to([0, 1, 0]) - - self.add(cos_func, sin_func_1, sin_func_2) - -``` -""" \ No newline at end of file diff --git a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/data/image_folder.py b/spaces/fb700/chatglm-fitness-RLHF/src/face3d/data/image_folder.py deleted file mode 100644 index efadc2ecbe2fb4b53b78230aba25ec505eff0e55..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/data/image_folder.py +++ /dev/null @@ -1,66 +0,0 @@ -"""A modified image folder class - -We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) -so that this class can load images from both current directory and its subdirectories. -""" -import numpy as np -import torch.utils.data as data - -from PIL import Image -import os -import os.path - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', - '.tif', '.TIF', '.tiff', '.TIFF', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset(dir, max_dataset_size=float("inf")): - images = [] - assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir - - for root, _, fnames in sorted(os.walk(dir, followlinks=True)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - return images[:min(max_dataset_size, len(images))] - - -def default_loader(path): - return Image.open(path).convert('RGB') - - -class ImageFolder(data.Dataset): - - def __init__(self, root, transform=None, return_paths=False, - loader=default_loader): - imgs = make_dataset(root) - if len(imgs) == 0: - raise(RuntimeError("Found 0 images in: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) - - self.root = root - self.imgs = imgs - self.transform = transform - self.return_paths = return_paths - self.loader = loader - - def __getitem__(self, index): - path = self.imgs[index] - img = self.loader(path) - if self.transform is not None: - img = self.transform(img) - if self.return_paths: - return img, path - else: - return img - - def __len__(self): - return len(self.imgs) diff --git a/spaces/felixfrosch/deep_learning_assignment/README.md b/spaces/felixfrosch/deep_learning_assignment/README.md deleted file mode 100644 index 14d8abd9ee4b9922c100ef32bf4b70138b073690..0000000000000000000000000000000000000000 --- a/spaces/felixfrosch/deep_learning_assignment/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Assignment 4 -emoji: 📚 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/editings/ganspace.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/editings/ganspace.py deleted file mode 100644 index 0c286a421280c542e9776a75e64bb65409da8fc7..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/editings/ganspace.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch - - -def edit(latents, pca, edit_directions): - edit_latents = [] - for latent in latents: - for pca_idx, start, end, strength in edit_directions: - delta = get_delta(pca, latent, pca_idx, strength) - delta_padded = torch.zeros(latent.shape).to('cuda') - delta_padded[start:end] += delta.repeat(end - start, 1) - edit_latents.append(latent + delta_padded) - return torch.stack(edit_latents) - - -def get_delta(pca, latent, idx, strength): - # pca: ganspace checkpoint. latent: (16, 512) w+ - w_centered = latent - pca['mean'].to('cuda') - lat_comp = pca['comp'].to('cuda') - lat_std = pca['std'].to('cuda') - w_coord = torch.sum(w_centered[0].reshape(-1)*lat_comp[idx].reshape(-1)) / lat_std[idx] - delta = (strength - w_coord)*lat_comp[idx]*lat_std[idx] - return delta diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Apk Day MovieStarPlanet APK Download - Free Simulation Game for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Apk Day MovieStarPlanet APK Download - Free Simulation Game for Android.md deleted file mode 100644 index cd0954a9989c0d2c159b5793ba213d3cafe8d1ef..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Apk Day MovieStarPlanet APK Download - Free Simulation Game for Android.md +++ /dev/null @@ -1,153 +0,0 @@ -<br /> -<h1>Apk Dayı Moviestarplanet: A Guide for Beginners</h1> -<p>Are you looking for a fun and creative game that lets you become a star and interact with other players? If so, you might have heard of Moviestarplanet, a popular social network and game for kids. But did you know that there is a modded version of the game called Apk Dayı Moviestarplanet that offers more features and benefits? In this article, we will explain what Moviestarplanet is, what Apk Dayı Moviestarplanet is, how to download and install it, how to play it, and some tips and tricks to make the most of it. We will also answer some frequently asked questions about the game.</p> -<h2>apk dayı moviestarplanet</h2><br /><p><b><b>Download</b> ✓ <a href="https://gohhs.com/2uPnmN">https://gohhs.com/2uPnmN</a></b></p><br /><br /> - <h2>What is Moviestarplanet?</h2> -<p>Moviestarplanet is a game that was launched in 2009 by MovieStarPlanet Apps, a Danish company. It is available for mobile devices and PC, and it has millions of users worldwide. The game is aimed at children and teens who want to express their creativity and socialize with other players. Here are some of the main features of Moviestarplanet:</p> - <h3>A social network and game for kids</h3> -<p>Moviestarplanet is not just a game, but also a social network where players can create their own avatars, called moviestars, and customize them with clothes, accessories, hairstyles, and more. They can also chat with other moviestars in various chat rooms, make friends, send messages, join clubs, and participate in events. The game is designed to be family-friendly and safe for kids, as it has strict rules and moderation systems to prevent inappropriate content and behavior.</p> - <h3>A platform to create and share content</h3> -<p>Moviestarplanet is also a platform where players can unleash their creativity and make their own content. They can create movies, artbooks, photos, looks, fashion designs, animations, stickers, and more using the tools provided by the game. They can also watch and rate other players' content, earn fame and starcoins (the in-game currency), get awards, and rise to stardom.</p> - <h3>A place to become a celebrity and make friends</h3> -<p>Moviestarplanet is a place where players can fulfill their dreams of becoming famous celebrities. They can dress up like their favorite stars, play mini-games, watch YouTube videos, shop for new items, remodel their rooms, take care of their pets, and more. They can also make friends with other players who share their interests and hobbies, join clubs, chat with them, send gifts, trade items, and have fun together.</p> - <h2>What is Apk Dayı Moviestarplanet?</h2> -<p>Apk Dayı Moviestarplanet is a modded version of the original game that offers some extra features and benefits that are not available in the official version. A mod apk is an altered or modified version of an app that changes some aspects of the app's functionality or appearance. Apk Dayı Moviestarplanet is one of the many mod apks that exist for Moviestarplanet. Here are some of the features and benefits of Apk Dayı Moviestarplanet:</p> - <h3>A modded version of the original game</h3> -<p>Apk Dayı Moviestarplanet is based on the original game but has some changes and additions that make it different from the original game. For example, Apk Dayı Moviestarplanet has a different interface, more items, more options, and more features than the official game. It also has some cheats and hacks that allow players to get unlimited starcoins, diamonds, VIP status, and more for free.</p> - <h3>The features and benefits of the mod apk</h3> -<p>Some of the features and benefits of Apk Dayı Moviestarplanet are:</p> -<ul> -<li>It is free to download and use.</li> -<li>It has unlimited starcoins, diamonds, VIP status, and other resources that can be used to buy items, upgrade the avatar, and access premium features.</li> -<li>It has more items, clothes, accessories, hairstyles, pets, and more than the official game.</li> -<li>It has more options and features to customize the avatar, the room, the content, and the game settings.</li> -<li>It has some cheats and hacks that can help players to win mini-games, get awards, earn fame, and become celebrities faster.</li> -<li>It has a different interface that is more user-friendly and attractive than the official game.</li> -</ul> - <h3>The risks and drawbacks of the mod apk</h3> -<p>However, Apk Dayı Moviestarplanet also has some risks and drawbacks that players should be aware of before downloading and installing it. Some of the risks and drawbacks of Apk Dayı Moviestarplanet are:</p> -<p>apk dayı moviestarplanet vip hilesi<br /> -apk dayı moviestarplanet mod menu<br /> -apk dayı moviestarplanet indir<br /> -apk dayı moviestarplanet hile nasıl yapılır<br /> -apk dayı moviestarplanet starcoins hack<br /> -apk dayı moviestarplanet diamonds cheat<br /> -apk dayı moviestarplanet vip olma<br /> -apk dayı moviestarplanet son sürüm<br /> -apk dayı moviestarplanet bedava vip<br /> -apk dayı moviestarplanet güncel hile<br /> -apk dayı moviestarplanet para hilesi<br /> -apk dayı moviestarplanet android oyun club<br /> -apk dayı moviestarplanet vip hack 2023<br /> -apk dayı moviestarplanet mod apk download<br /> -apk dayı moviestarplanet yükle<br /> -apk dayı moviestarplanet hileli indir<br /> -apk dayı moviestarplanet vip kodları<br /> -apk dayı moviestarplanet oyunu oyna<br /> -apk dayı moviestarplanet fame hack<br /> -apk dayı moviestarplanet vip üyelik hilesi<br /> -apk dayı moviestarplanet yeni sürüm indir<br /> -apk dayı moviestarplanet online hack tool<br /> -apk dayı moviestarplanet kurulumu<br /> -apk dayı moviestarplanet hile yapma<br /> -apk dayı moviestarplanet vip generator<br /> -apk dayı moviestarplanet mod menu indir<br /> -apk dayı moviestarplanet hesap çalma hilesi<br /> -apk dayı moviestarplanet oyun indir club<br /> -apk dayı moviestarplanet vip hack no human verification<br /> -apk dayı moviestarplanet mod apk 2023<br /> -apk dayı moviestarplanet kaydol<br /> -apk dayı moviestarplanet hile programı indir<br /> -apk dayı moviestarplanet vip hack download<br /> -apk dayı moviestarplanet mod menu 2023<br /> -apk dayı moviestarplanet giriş yap<br /> -apk dayı moviestarplanet hile kodları<br /> -apk dayı moviestarplanet vip hack online<br /> -apk dayı moviestarplanet mod menu nasıl yapılır<br /> -apk dayı moviestarplanet oyna ücretsiz<br /> -apk dayı moviestarplanet hileli oyun indir club<br /> -apk dayı moviestarplanet vip hack 2023 no survey<br /> -apk dayı moviestarplanet mod menu yükleme<br /> -apk dayı moviestarplanet üye olma hilesi<br /> -apk dayı moviestarplanet oyunu indir gezginler<br /> -apk dayı moviestarplanet vip hack without human verification<br /> -apk dayı moviestarplanet mod menu kurulumu 2023<br /> -apk dayı moviestarplanet hesap silme hilesi<br /> -apk dayı moviestarplanet oyunu indir uptodown<br /> -apk dayı moviestarplanet vip hack no verification 2023</p> -<ul> -<li>It is not an official version of the game and it is not endorsed or supported by MovieStarPlanet Apps.</li> -<li>It may contain viruses, malware, spyware, or other harmful software that can damage the device or compromise the personal data of the user.</li> -<li>It may not be compatible with some devices or operating systems and it may cause crashes, glitches, errors, or bugs in the game.</li> -<li>It may violate the terms and conditions of the original game and it may result in the account being banned, suspended, or deleted by the game moderators.</li> -<li>It may not be updated regularly and it may not work with the latest version of the original game or with new features or content added by the game developers.</li> -<li>It may ruin the fun and challenge of the game by making it too easy or unfair for other players who play with the official version of the game.</li> -</ul> - <h2>How to download and install Apk Dayı Moviestarplanet?</h2> -<p>If you want to try Apk Dayı Moviestarplanet, you will need to download and install it on your device. However, you should be careful and follow some steps and precautions to avoid any problems or issues. Here are some steps and precautions to download and install Apk Dayı Moviestarplanet:</p> - <h3>The steps to follow</h3> -<ol> -<li>First, you will need to uninstall the official version of Moviestarplanet from your device if you have it installed. This is because you cannot have both versions of the game on your device at the same time.</li> -<li>Second, you will need to enable the option to install apps from unknown sources on your device. This is because Apk Dayı Moviestarplanet is not available on the official app stores like Google Play or App Store. To enable this option, you will need to go to your device settings, security settings, and toggle on the option to allow installation from unknown sources.</li> -<li>Third, you will need to find a reliable source to download Apk Dayı Moviestarplanet. You can search online for websites that offer mod apks for various games. However, you should be careful and check the reviews, ratings, comments, and feedback of other users before downloading anything. You should also scan the file with an antivirus software before opening it.</li> -<li>Fourth, you will need to download Apk Dayı Moviestarplanet on your device. You can do this by clicking on the download link or button provided by the website. You may also need to complete some verification steps or surveys before downloading. You should also make sure that you have enough storage space on your device for the file.</li> -<li>Fifth, you will need to install Apk Dayı Moviestarplanet on your device. You can do this by opening the file that you downloaded and following the instructions on the screen. You may need to agree to some terms and conditions or grant some permissions to the app. You should also make sure that you have a stable internet connection for the installation process.</li> -<li>Sixth, you will need to launch Apk Dayı Moviestarplanet on your device. You can do this by tapping on the app icon on your home screen or app drawer. You may need to sign in with your existing account or create a new one. You may also need to update the app or download some additional data before playing.</li> -</ol> - <h3>The precautions to take</h3> -<p>As mentioned before, Apk Dayı Moviestarplanet is not an official version of the game and it may have some risks and drawbacks. Therefore, you should take some precautions to avoid any problems or issues. Here are some precautions to take:</p> -<ul> -<li>You should backup your device data before downloading and installing Apk Dayı Moviestarplanet. This is because the app may cause some damage or loss of data on your device. You can use a cloud service or an external storage device to backup your data.</li> -<li>You should not use your main account or personal information to play Apk Dayı Moviestarplanet. This is because the app may compromise your account security or privacy. You can use a fake or secondary account or information to play the game.</li> -<li>You should not use Apk Dayı Moviestarplanet to cheat or hack the original game or other players. This is because the app may violate the rules and ethics of the game and it may result in your account being banned, suspended, or deleted by the game moderators. You can use Apk Dayı Moviestarplanet for fun and entertainment purposes only.</li> -<li>You should not download or install Apk Dayı Moviestarplanet from untrusted sources or websites. This is because the app may contain viruses, malware, spyware, or other harmful software that can damage your device or compromise your personal data. You can use reputable sources or websites that have positive reviews, ratings, comments, and feedback from other users.</li> -<li>You should not update Apk Dayı Moviestarplanet from the official app stores like Google Play or App Store. This is because the app may not be compatible with the latest version of the original game or with new features or content added by the game developers. You can update Apk Dayı Moviestarplanet from the same source or website that you downloaded it from.</li> -</ul> - <h3>The alternatives to consider</h3> -<p>If you are not comfortable with downloading and installing Apk Dayı Moviestarplanet, you can consider some alternatives that are similar to the game but are official and safe. Here are some alternatives to consider:</p> -<table> -<tr><th>Name</th><th>Description</th></tr> -<tr><td>Moviestarplanet 2</td><td>A sequel to Moviestarplanet that has improved graphics, gameplay, features, and content. It is available for mobile devices and PC.</td></tr> -<tr><td>Roblox</td><td>A platform where players can create and play games of various genres and themes. It is available for mobile devices, PC, and consoles.</td></tr> -<tr><td>Avakin Life</td><td>A virtual world where players can create and customize their avatars, explore different locations, chat with other players, and shop for items. It is available for mobile devices.</td></tr> -<tr><td>IMVU</td><td>A social network where players can create and personalize their avatars, join chat rooms, make friends, and express themselves. It is available for mobile devices and PC.</td></tr> -<tr><td>Gacha Life</td><td>A game where players can create and dress up their characters, play mini-games, watch videos, chat with other players, and make stories. It is available for mobile devices and PC.</td></tr> -</table> - <h2>How to play Apk Dayı Moviestarplanet?</h2> -<p>Once you have downloaded and installed Apk Dayı Moviestarplanet on your device, you can start playing it and enjoy its features and benefits. However, you should also know how to play it properly and safely. Here are some basics of the game and some tips and tricks to succeed:</p> - <h3>The basics of the game</h3> -<p>The basics of Apk Dayı Moviestarplanet are similar to those of the original game. You will need to create your own moviestar avatar and customize it with clothes, accessories, hairstyles, and more. You will also need to create your own content such as movies, artbooks, photos, looks, fashion designs, animations, stickers, and more using the tools provided by the game. You will also need to watch and rate other players' content, earn fame and starcoins, get awards, and rise to stardom. You will also need to chat with other moviestars in various chat rooms, make friends, send messages, join clubs, and participate in events. You will also need to play mini-games, watch YouTube videos, shop for new items, remodel your room, take care of your pet, and more. You will also need to use the mod apk features and benefits to enhance your game experience.</p> - <h3>The tips and tricks to succeed</h3> -<p>Some of the tips and tricks to succeed in Apk Dayı Moviestarplanet are:</p> -<ul> -<li>Use the unlimited starcoins, diamonds, VIP status, and other resources wisely. Don't spend them all at once or waste them on unnecessary items. Save them for the items that you really want or need.</li> -<li>Use the cheats and hacks sparingly. Don't use them too often or too blatantly. Don't abuse them to harass or bully other players or to gain an unfair advantage. Use them for fun and entertainment purposes only.</li> -<li>Use the more items, options, and features creatively. Don't copy or imitate other players' content or style. Be original and unique. Express your personality and preferences. Showcase your talents and skills.</li> -<li>Use the different interface intuitively. Don't get confused or overwhelmed by the new layout or design. Learn how to navigate and use the app smoothly and efficiently.</li> -<li>Be friendly and respectful to other players. Don't spam, scam, troll, or flame them. Don't use profanity, hate speech, or personal attacks. Don't share your personal information or ask for theirs. Be polite, kind, and helpful.</li> -<li>Be careful and cautious when playing the game. Don't download or install anything from suspicious sources or websites. Don't click on any links or pop-ups that appear on the app. Don't give your account details or password to anyone. Don't update the app from the official app stores.</li> -</ul> - <h3>The best practices to stay safe and have fun</h3> -<p>Some of the best practices to stay safe and have fun in Apk Dayı Moviestarplanet are:</p> -<ul> -<li>Play the game in moderation. Don't spend too much time or money on the game. Don't neglect your other responsibilities or hobbies. Don't let the game affect your physical or mental health.</li> -<li>Play the game with parental supervision or permission. If you are under 18 years old, you should ask your parents or guardians before downloading and installing Apk Dayı Moviestarplanet. You should also follow their rules and guidelines when playing the game.</li> -<li>Play the game with friends or family. You can invite your friends or family members to play Apk Dayı Moviestarplanet with you. You can also join clubs or chat rooms that are related to your interests or hobbies. You can also make new friends with other players who are friendly and respectful.</li> -<li>Play the game with a positive attitude. Don't let the game stress you out or frustrate you. Don't take the game too seriously or personally. Don't compare yourself with other players or feel jealous of their achievements. Enjoy the game for what it is: a fun and creative game that lets you become a star and interact with other players.</li> -</ul> - <h2>Conclusion</h2> -<p>In conclusion, Apk Dayı Moviestarplanet is a modded version of Moviestarplanet that offers more features and benefits than the original game. However, it also has some risks and drawbacks that players should be aware of before downloading and installing it. Therefore, players should be careful and follow some steps and precautions to avoid any problems or issues. They should also know how to play it properly and safely, and follow some tips and tricks to succeed in it. They should also consider some alternatives that are similar to the game but are official and safe.</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about Apk Dayı Moviestarplanet:</p> - <h4>Q: Is Apk Dayı Moviestarplanet legal?</h4> -<p>A: Apk Dayı Moviestarplanet is not legal, as it is not an official version of the game and it violates the terms and conditions of the original game. It may also infringe on the intellectual property rights of MovieStarPlanet Apps.</p> - <h4>Q: Is Apk Dayı Moviestarplanet safe?</h4> -<p>A: Apk Dayı Moviestarplanet is not safe, as it may contain viruses, malware, spyware, or other harmful software that can damage the device or compromise the personal data of the user. It may also expose the user to inappropriate content or behavior from other players or from the app itself.</ p> - <h4>Q: Is Apk Dayı Moviestarplanet fun?</h4> -<p>A: Apk Dayı Moviestarplanet can be fun, as it offers more features and benefits than the original game. It can also enhance the game experience and make it more enjoyable and exciting. However, it can also ruin the fun and challenge of the game by making it too easy or unfair for other players. It can also cause some problems or issues that can affect the game performance or quality.</p> - <h4>Q: How can I get Apk Dayı Moviestarplanet?</h4> -<p>A: You can get Apk Dayı Moviestarplanet by downloading and installing it from a reliable source or website that offers mod apks for various games. However, you should be careful and follow some steps and precautions to avoid any problems or issues. You should also backup your device data, enable the option to install apps from unknown sources, scan the file with an antivirus software, uninstall the official version of Moviestarplanet, and follow the instructions on the screen.</p> - <h4>Q: How can I update Apk Dayı Moviestarplanet?</h4> -<p>A: You can update Apk Dayı Moviestarplanet by downloading and installing the latest version of the app from the same source or website that you downloaded it from. However, you should not update Apk Dayı Moviestarplanet from the official app stores like Google Play or App Store, as it may not be compatible with the latest version of the original game or with new features or content added by the game developers.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Car Parking 3D Online Drift v5.3.1 Mod APK - The Ultimate Car Parking and Drifting Simulator with Money Hack and Car Mods.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Car Parking 3D Online Drift v5.3.1 Mod APK - The Ultimate Car Parking and Drifting Simulator with Money Hack and Car Mods.md deleted file mode 100644 index ba09cefd39a0406201cb4dd03cfee635dfb5e2fd..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Car Parking 3D Online Drift v5.3.1 Mod APK - The Ultimate Car Parking and Drifting Simulator with Money Hack and Car Mods.md +++ /dev/null @@ -1,93 +0,0 @@ -<br /> -<h1>Car Parking 3D: Online Drift Mod APK v5.3.1 - A Fun and Challenging Racing Game</h1> - <h2>Introduction</h2> - <p>Do you love racing games that test your skills and thrill your senses? Do you want to experience the excitement of drifting and parking in different scenarios? If yes, then you should try Car Parking 3D: Online Drift, a popular racing game that has recently updated to version 5.3.1 with a mod feature that makes it even more fun and rewarding.</p> - <h3>What is Car Parking 3D: Online Drift?</h3> - <p>Car Parking 3D: Online Drift is an engaging racing game that offers players the opportunity to show off their drifting and parking skills. The game has various modes, such as free mode, career mode, online mode, and drift mode, where players can choose from different cars and customize them according to their preferences. The game also has realistic physics, dynamic weather, and stunning graphics that make the gameplay more immersive and enjoyable.</p> -<h2>car parking 3d online drift mod apk v5.3.1</h2><br /><p><b><b>DOWNLOAD</b> ➡ <a href="https://gohhs.com/2uPpHm">https://gohhs.com/2uPpHm</a></b></p><br /><br /> - <h3>What are the features of Car Parking 3D: Online Drift?</h3> - <p>Some of the features of Car Parking 3D: Online Drift are:</p> -<ul> -<li>More than 100 cars to choose from, including sports cars, muscle cars, SUVs, trucks, and more.</li> -<li>More than 150 levels to complete, each with different challenges and objectives.</li> -<li>Online multiplayer mode where players can compete with other players from around the world.</li> -<li>Drift mode where players can perform amazing drifts and earn points.</li> -<li>Free mode where players can explore the open world and practice their skills.</li> -<li>Career mode where players can progress through various stages and unlock new cars and upgrades.</li> -<li>Car customization options where players can change the color, wheels, spoilers, exhausts, and more of their cars.</li> -<li>Realistic car sounds, engine noises, tire screeches, and horn sounds.</li> -<li>Different camera angles, including cockpit view, rear view, top view, and more.</li> -<li>Different control options, including tilt, steering wheel, buttons, and gyroscope.</li> -</ul> - <h2>How to download and install Car Parking 3D: Online Drift Mod APK v5.3.1?</h2> - <p>If you want to download and install Car Parking 3D: Online Drift Mod APK v5.3.1 on your Android device, you need to follow these simple steps:</p> - <h3>Step 1: Download the mod APK file from a trusted source</h3> - <p>You can download the mod APK file from a trusted source such as <a href="(^1^)">HappyMod</a> or <a href="(^2^)">HappyMod Download</a>. These sources provide safe and verified mod APK files that are free from viruses and malware. You can also scan the file with an antivirus app before installing it.</p> - <h3>Step 2: Enable unknown sources on your device</h <p>Step 3: Install the mod APK file and enjoy the game</p> - <p>Once you have downloaded the mod APK file and enabled unknown sources on your device, you can install the file by tapping on it and following the instructions. After the installation is complete, you can launch the game and enjoy the mod features.</p> - <h2>What are the benefits of using Car Parking 3D: Online Drift Mod APK v5.3.1?</h2> - <p>By using Car Parking 3D: Online Drift Mod APK v5.3.1, you can enjoy some benefits that are not available in the original version of the game. Some of these benefits are:</p> - <h3>Unlimited money and unlocked cars</h3> - <p>With the mod APK, you can get unlimited money that you can use to buy and upgrade any car you want. You can also unlock all the cars in the game without having to complete the levels or pay real money. This way, you can enjoy the game with your favorite cars and customize them as you wish.</p> -<p>car parking 3d online drift mod apk download<br /> -car parking 3d online drift mod apk unlimited money<br /> -car parking 3d online drift mod apk latest version<br /> -car parking 3d online drift mod apk android 1<br /> -car parking 3d online drift mod apk happymod<br /> -car parking 3d online drift mod apk free download<br /> -car parking 3d online drift mod apk unlocked all cars<br /> -car parking 3d online drift mod apk revdl<br /> -car parking 3d online drift mod apk rexdl<br /> -car parking 3d online drift mod apk no ads<br /> -car parking 3d online drift mod apk offline<br /> -car parking 3d online drift mod apk hack<br /> -car parking 3d online drift mod apk cheat<br /> -car parking 3d online drift mod apk obb<br /> -car parking 3d online drift mod apk data<br /> -car parking 3d online drift mod apk fgames<br /> -car parking 3d online drift mod apk nryaydn95<br /> -car parking 3d online drift mod apk update<br /> -car parking 3d online drift mod apk old version<br /> -car parking 3d online drift mod apk new version<br /> -car parking 3d online drift mod apk for pc<br /> -car parking 3d online drift mod apk for ios<br /> -car parking 3d online drift mod apk for windows<br /> -car parking 3d online drift mod apk for mac<br /> -car parking 3d online drift mod apk for laptop<br /> -car parking 3d online drift mod apk gameplay<br /> -car parking 3d online drift mod apk features<br /> -car parking 3d online drift mod apk review<br /> -car parking 3d online drift mod apk rating<br /> -car parking 3d online drift mod apk size<br /> -car parking 3d online drift mod apk requirements<br /> -car parking 3d online drift mod apk installation<br /> -car parking 3d online drift mod apk tutorial<br /> -car parking 3d online drift mod apk tips and tricks<br /> -car parking 3d online drift mod apk best settings<br /> -car parking 3d online drift mod apk best cars<br /> -car parking 3d online drift mod apk best tracks<br /> -car parking 3d online drift mod apk best mods<br /> -car parking 3d online drift mod apk best graphics<br /> -car parking 3d online drift mod apk best sound effects</p> - <h3>No ads and no root required</h3> - <p>The mod APK also removes all the annoying ads that interrupt your gameplay and distract you from the action. You can play the game without any interruptions or distractions. Moreover, you do not need to root your device to use the mod APK, which means you do not have to risk damaging your device or voiding your warranty.</p> - <h3>Smooth gameplay and realistic graphics</h3> - <p>The mod APK also improves the gameplay and graphics of the game, making it smoother and more realistic. You can experience the thrill of drifting and parking in different environments, such as city streets, parking lots, airports, deserts, and more. You can also enjoy the realistic car sounds, weather effects, and shadows that make the game more immersive and enjoyable.</p> - <h2>Conclusion</h2> - <p>Car Parking 3D: Online Drift is a fun and challenging racing game that lets you show off your drifting and parking skills in various modes and scenarios. By using Car Parking 3D: Online Drift Mod APK v5.3.1, you can enhance your gaming experience with unlimited money, unlocked cars, no ads, no root required, smooth gameplay, and realistic graphics. If you are looking for a racing game that is both entertaining and rewarding, you should download and install Car Parking 3D: Online Drift Mod APK v5.3.1 today.</p> - <h2>FAQs</h2> - <p>Here are some frequently asked questions about Car Parking 3D: Online Drift Mod APK v5.3.1:</p> -<ul> -<li><b>Is Car Parking 3D: Online Drift Mod APK v5.3.1 safe to use?</b></li> -<li>Yes, Car Parking 3D: Online Drift Mod APK v5.3.1 is safe to use as long as you download it from a trusted source such as <a href="">HappyMod</a> or <a href="">HappyMod Download</a>. These sources provide verified and secure mod APK files that are free from viruses and malware.</li> -<li><b>Is Car Parking 3D: Online Drift Mod APK v5.3.1 compatible with my device?</b></li> -<li>Car Parking 3D: Online Drift Mod APK v5.3.1 is compatible with most Android devices that have Android 4.4 or higher versions. However, some devices may not support some features or functions of the game due to hardware limitations or software issues.</li> -<li><b>How can I update Car Parking 3D: Online Drift Mod APK v5.3.1?</b></li> -<li>You can update Car Parking 3D: Online Drift Mod APK v5.3.1 by downloading the latest version of the mod APK file from a trusted source such as <a href="">HappyMod</a> or <a href="">HappyMod Download</a>. You can then install the new version over the old one without losing your progress or data.</li> -<li><b>How can I contact the developer of Car Parking 3D: Online Drift?</b></li> -<li>You can contact the developer of Car Parking 3D: Online Drift by visiting their official website at <a href="">https://www.olzhass.com/</a> or by sending them an email at <a href="mailto:support@olzhass.com">support@olzhass.com</a>.</li> -<li><b>How can I rate and review Car Parking 3D: Online Drift?</b></li> -<li>You can rate and review Car Parking 3D: Online Drift by visiting its Google Play Store page at <a href="">https://play.google.com/store/apps/details?id=com.olzhass.carParking3d</a> or by following the link in the game. You can also share your feedback and suggestions with the developer and other players on their Facebook page at <a href="">https://www.facebook.com/olzhassgames</a>.</li> -</ul></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 APK el juego de carreras ms realista y emocionante para Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 APK el juego de carreras ms realista y emocionante para Android.md deleted file mode 100644 index 21dcca51cdf5f8c3c68fb8699a2699df153e4a2b..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 APK el juego de carreras ms realista y emocionante para Android.md +++ /dev/null @@ -1,152 +0,0 @@ - -<h1>Descargar CarX Drift Racing 2 APK: El Mejor Juego de Drifting para Android</h1> -<p>Si te gustan los juegos de carreras y el drifting, seguramente has oído hablar de CarX Drift Racing 2, uno de los juegos más populares y descargados del género. Este juego te ofrece una experiencia de conducción única, con gráficos impresionantes, físicas realistas y un modo online para competir con otros jugadores. En este artículo, te vamos a explicar qué es CarX Drift Racing 2, cómo descargarlo e instalarlo en tu dispositivo Android, y por qué vale la pena hacerlo. También te daremos algunos consejos y trucos para que puedas disfrutar al máximo de este juego de drifting.</p> - <h2>¿Qué es CarX Drift Racing 2?</h2> -<p>CarX Drift Racing 2 es un juego de carreras de drifting desarrollado por CarX Technologies, una empresa especializada en juegos de simulación de conducción. Este juego es la secuela del exitoso CarX Drift Racing, que cuenta con más de 100 millones de fans en todo el mundo. CarX Drift Racing 2 te ofrece la oportunidad de participar en emocionantes carreras de drifting, donde tendrás que controlar tu coche con precisión y estilo para impresionar a tus fans y ganar puntos. El juego tiene varias características que lo hacen único y divertido:</p> -<h2>descargar carx drift racing 2 apk</h2><br /><p><b><b>Download File</b> 🗸🗸🗸 <a href="https://gohhs.com/2uPrV7">https://gohhs.com/2uPrV7</a></b></p><br /><br /> - <h3>Un juego de carreras de drifting con gráficos realistas y físicas avanzadas</h3> -<p>CarX Drift Racing 2 te sorprenderá por su calidad gráfica y su nivel de detalle. El juego cuenta con más de 70 coches diferentes, inspirados en modelos reales, que podrás ver desde diferentes ángulos y con efectos de luz y sombra. Los escenarios también son muy variados y detallados, con pistas que van desde circuitos urbanos hasta paisajes naturales. Además, el juego tiene un sistema de físicas avanzado, que simula el comportamiento real de los coches, las ruedas, el asfalto, la arena, la hierba y la nieve. Podrás sentir cómo tu coche se desliza, se balancea y se adapta a cada superficie, lo que te dará una sensación de inmersión increíble.</p> - <h3>Un modo online para competir con tus amigos y otros jugadores</h3> -<p>CarX Drift Racing 2 no solo te permite jugar en modo individual, sino que también tiene un modo online donde podrás competir con tus amigos y otros jugadores en tiempo real. Podrás crear o unirte a salas online, elegir una pista, hacer drifting y ganar puntos. También podrás ver cómo hacen drifting otros jugadores usando la cámara dron. Además, el juego tiene un sistema de clasificación y recompensas, que te permitirá obtener premios valiosos por alcanzar diferentes rangos. El modo online es una forma divertida y desafiante de demostrar tu habilidad en el drifting y conocer a otros aficionados al género.</p> - <h3>Un sistema de personal ización y afinación de tu coche</h3> -<p>Otra de las características más atractivas de CarX Drift Racing 2 es que te permite personalizar y afinar tu coche a tu gusto. Podrás cambiar el color, las llantas, los vinilos, los faros, el alerón y otros elementos de tu coche para darle un aspecto único. También podrás modificar el motor, la suspensión, los frenos, el peso, la tracción y otros parámetros de tu coche para mejorar su rendimiento y adaptarlo a tu estilo de conducción. El juego tiene un sistema de pruebas que te permitirá ver cómo afectan los cambios que hagas a tu coche en la pista. Así podrás crear el coche de tus sueños y hacerlo el rey del drifting.</p> - <h2>¿Cómo descargar CarX Drift Racing 2 APK?</h2> -<p>CarX Drift Racing 2 es un juego gratuito que puedes descargar desde Google Play Store. Sin embargo, si por alguna razón no puedes acceder a la tienda oficial o quieres tener la última versión del juego antes que nadie, puedes optar por descargar el archivo APK del juego desde una fuente externa. El archivo APK es el formato que usan los dispositivos Android para instalar aplicaciones. Para descargar CarX Drift Racing 2 APK, debes seguir estos pasos:</p> - <h3>Los requisitos mínimos para instalar el juego</h3> -<p>Antes de descargar el archivo APK, debes asegurarte de que tu dispositivo Android cumple con los requisitos mínimos para instalar el juego. Estos son los requisitos mínimos según la página oficial del juego:</p> -<table> -<tr> -<th>Requisito</th> -<th>Valor</th> -</tr> -<tr> -<td>Versión de Android</td> -<td>6.0 o superior</td> -</tr> -<tr> -<td>Espacio libre</td> -<td>1.5 GB o más</td> -</tr> -<tr> -<td>Memoria RAM</td> -<td>2 GB o más</td> -</tr> -<tr> -<td>Procesador</td> -<td>Quad-core o superior</td> -</tr> -<tr> -<td>Conexión a internet</td> -<td>Necesaria para el modo online y las actualizaciones</td> -</tr> -</table> - <h3>Los pasos para descargar e instalar el archivo APK</h3> -<p>Una vez que hayas comprobado que tu dispositivo cumple con los requisitos mínimos, puedes proceder a descargar e instalar el archivo APK siguiendo estos pasos:</p> -<p>descargar carx drift racing 2 apk mod<br /> -descargar carx drift racing 2 apk ultima version<br /> -descargar carx drift racing 2 apk hackeado<br /> -descargar carx drift racing 2 apk para android<br /> -descargar carx drift racing 2 apk gratis<br /> -descargar carx drift racing 2 apk full<br /> -descargar carx drift racing 2 apk mega<br /> -descargar carx drift racing 2 apk sin internet<br /> -descargar carx drift racing 2 apk obb<br /> -descargar carx drift racing 2 apk mediafire<br /> -descargar carx drift racing 2 apk mod dinero infinito<br /> -descargar carx drift racing 2 apk mod menu<br /> -descargar carx drift racing 2 apk actualizado<br /> -descargar carx drift racing 2 apk premium<br /> -descargar carx drift racing 2 apk datos sd<br /> -descargar carx drift racing 2 apk offline<br /> -descargar carx drift racing 2 apk original<br /> -descargar carx drift racing 2 apk uptodown<br /> -descargar carx drift racing 2 apk android oyun club<br /> -descargar carx drift racing 2 apk revdl<br /> -descargar carx drift racing 2 apk rexdl<br /> -descargar carx drift racing 2 apk android 1<br /> -descargar carx drift racing 2 apk ultima actualizacion<br /> -descargar carx drift racing 2 apk por mega<br /> -descargar carx drift racing 2 apk sin anuncios<br /> -descargar carx drift racing 2 apk modificado<br /> -descargar carx drift racing 2 apk hack<br /> -descargar carx drift racing 2 apk todo desbloqueado<br /> -descargar carx drift racing 2 apk ultima version hackeada<br /> -descargar carx drift racing 2 apk mod monedas infinitas<br /> -descargar carx drift racing 2 apk mod ultima version<br /> -descargar carx drift racing 2 apk mod dinero y oro ilimitado<br /> -descargar carx drift racing 2 apk mod diamantes infinitos<br /> -descargar carx drift racing 2 apk mod todo gratis<br /> -descargar carx drift racing 2 apk mod compras gratis<br /> -descargar carx drift racing 2 apk mod vip unlocked<br /> -descargar carx drift racing 2 apk mod unlimited money and gold<br /> -descargar carx drift racing 2 apk mod no root<br /> -descargar carx drift racing 2 apk mod anti ban<br /> -descargar carx drift racing 2 apk mod online/offline</p> - <ol> -<li>Busca una fuente confiable que ofrezca el archivo APK de CarX Drift Racing 2. Puedes usar un buscador como Google o Bing para encontrar diferentes opciones. Algunas de las páginas más populares y seguras para descargar archivos APK son APKPure, Uptodown y APKMirror. </li> -<li>Entra en la página que hayas elegido y busca el archivo APK de CarX Drift Racing 2. Asegúrate de que se trata de la versión más reciente del juego y que es compatible con tu dispositivo. También puedes leer los comentarios y valoraciones de otros usuarios para verificar la calidad y seguridad del archivo.</li> -<li>Descarga el archivo APK en tu dispositivo Android. Puede que tengas que aceptar algunos permisos o advertencias para iniciar la descarga. Recuerda el nombre y la ubicación del archivo para poder encontrarlo luego.</li> -<li>Antes de instalar el archivo APK, debes habilitar la opción de "Orígenes desconocidos" o "Fuentes desconocidas" en tu dispositivo. Esta opción te permite instalar aplicaciones que no provienen de Google Play Store. Para habilitarla, debes ir a Ajustes > Seguridad > Orígenes desconocidos (o Fuentes desconocidas) y activarla.</li> -<li>Busca el archivo APK que has descargado en tu dispositivo y ábrelo. Sigue las instrucciones que aparezcan en la pantalla para completar la instalación del juego.</li> -<li>Una vez instalado el juego, podrás abrirlo y disfrutarlo en tu dispositivo Android.</li> -</ol> - <h3>Las precauciones que debes tomar al descargar archivos APK</h3> -<p>Aunque descargar archivos APK puede tener algunas ventajas, también implica algunos riesgos que debes tener en cuenta. Algunas de las precauciones que debes tomar al descargar archivos APK son las siguientes:</ <p>- Verifica la fuente del archivo APK. No descargues archivos APK de páginas desconocidas o sospechosas, ya que pueden contener virus, malware o software malicioso que dañe tu dispositivo o robe tu información. Busca páginas confiables y con buena reputación, que ofrezcan archivos APK seguros y actualizados.</p> -<p>- Comprueba los permisos que solicita el archivo APK. Al instalar una aplicación, debes prestar atención a los permisos que te pide, como acceder a tu cámara, micrófono, contactos, ubicación, etc. Si el archivo APK te pide permisos que no tienen relación con el juego o que te parecen excesivos, no lo instales, ya que puede tratarse de una aplicación maliciosa.</p> -<p>- Mantén tu dispositivo actualizado y protegido. Para evitar posibles problemas al descargar e instalar archivos APK, es recomendable que mantengas tu dispositivo Android actualizado con la última versión del sistema operativo y que uses un antivirus o una aplicación de seguridad que te ayude a detectar y eliminar posibles amenazas.</p> - <h2>¿Por qué descargar CarX Drift Racing 2 APK?</h2> -<p>Ahora que ya sabes qué es CarX Drift Racing 2 y cómo descargarlo e instalarlo en tu dispositivo Android, te preguntarás por qué deberías hacerlo. La respuesta es simple: porque es uno de los mejores juegos de drifting que existen para Android. Descargar CarX Drift Racing 2 APK te ofrece varias ventajas, como las siguientes:</p> - <h3>Las ventajas de jugar al juego en tu dispositivo Android</h3> -<p>Jugar a CarX Drift Racing 2 en tu dispositivo Android tiene muchas ventajas, como las siguientes:</p> -<ul> -<li>Puedes jugar al juego en cualquier lugar y momento, sin necesidad de una consola o un ordenador. Solo necesitas tu dispositivo Android y una conexión a internet.</li> -<li>Puedes disfrutar de una experiencia de juego óptima, gracias a la pantalla táctil de tu dispositivo, que te permite controlar tu coche con facilidad y precisión.</li> -<li>Puedes aprovechar las características de tu dispositivo Android, como el giroscopio, el acelerómetro, el altavoz y la vibración, para tener una sensación de inmersión más realista.</li> -<li>Puedes ahorrar espacio y dinero, ya que el juego es gratuito y ocupa menos espacio que otros juegos similares.</li> -</ul> - <h3>Las características más destacadas del juego</h3> -<p>CarX Drift Racing 2 es un juego que tiene muchas características que lo hacen único y divertido. Algunas de las más destacadas son las siguientes:</p> -<ul> -<li>Tiene más de 70 coches diferentes para elegir, cada uno con sus propias características y prestaciones. Podrás conducir desde coches deportivos hasta coches clásicos o exóticos.</li> -<li>Tiene más de 30 pistas diferentes para correr, cada una con sus propios desafíos y obstáculos. Podrás correr desde circuitos urbanos hasta pistas rurales o montañosas.</li> -<li>Tiene un modo carrera donde podrás progresar como piloto de drifting, ganando fans, dinero y reputación. Podrás participar en diferentes eventos y torneos, y desbloquear nuevos coches y pistas.</li> -<li>Tiene un modo online donde podrás competir con otros jugadores en tiempo real, crear o unirte a salas online, ver cómo hacen drifting otros jugadores usando la cámara dron, y obtener premios por alcanzar diferentes rangos.</li> -<li>Tiene un sistema de personalización y afinación de tu coche, donde podrás cambiar el aspecto y el rendimiento de tu coche a tu gusto. Podrás modificar el color, las llantas, los vinilos, los faros, el alerón y otros elementos de tu coche. También podrás modificar el motor, la suspensión, los frenos, el peso, la tracción y otros parámetros de tu coche.</li> -<li>Tiene un sistema de físicas avanzado, que simula el comportamiento real de los coches, las ruedas , el asfalto, la arena, la hierba y la nieve. Podrás sentir cómo tu coche se desliza, se balancea y se adapta a cada superficie.</li> -<li>Tiene un sistema de sonido de alta calidad, que reproduce el rugido de los motores, el chirrido de los neumáticos, el choque de los coches y otros efectos sonoros. También tiene una banda sonora con música electrónica y rock que te acompañará en tus carreras.</li> -<li>Tiene un sistema de control adaptable, que te permite elegir entre diferentes opciones de control, como el volante, el acelerómetro, los botones o el joystick. También podrás ajustar la sensibilidad y la posición de los controles en la pantalla.</li> -</ul> - <h2>Conclusión</h2> -<p>CarX Drift Racing 2 es un juego de carreras de drifting que te ofrece una experiencia de conducción única, con gráficos impresionantes, físicas realistas y un modo online para competir con otros jugadores. Si quieres disfrutar de este juego en tu dispositivo Android, puedes descargar el archivo APK desde una fuente externa siguiendo los pasos que te hemos explicado. Recuerda tomar las precauciones necesarias para evitar posibles problemas al descargar e instalar archivos APK. También te recomendamos que sigas algunos consejos y trucos para mejorar tu habilidad en el drifting y divertirte al máximo con este juego. Esperamos que este artículo te haya sido útil y que disfrutes de CarX Drift Racing 2.</p> - <h2>Preguntas frecuentes</h2> -<p>A continuación, te presentamos algunas preguntas frecuentes sobre CarX Drift Racing 2 y sus respuestas:</p> - <h3>¿Qué es el drifting?</h3> -<p>El drifting es una técnica de conducción que consiste en hacer derrapar el coche de forma controlada, manteniendo el ángulo y la velocidad adecuados. El drifting se usa tanto en competiciones deportivas como en exhibiciones artísticas. El drifting requiere de mucha habilidad y práctica para dominarlo.</p> - <h3>¿Qué es un archivo APK?</h3> -<p>Un archivo APK es el formato que usan los dispositivos Android para instalar aplicaciones. Un archivo APK contiene todos los datos necesarios para ejecutar una aplicación en tu dispositivo. Puedes descargar archivos APK desde Google Play Store o desde fuentes externas.</p> - <h3>¿Qué ventajas tiene descargar archivos APK?</h3> -<p>Descargar archivos APK puede tener algunas ventajas, como las siguientes:</p> -<ul> -<li>Puedes acceder a aplicaciones que no están disponibles en tu región o en tu tienda oficial.</li> -<li>Puedes obtener la última versión de una aplicación antes que nadie.</li> -<li>Puedes instalar aplicaciones que no son compatibles con tu dispositivo o con tu versión de Android.</li> -</ul> - <h3>¿Qué riesgos tiene descargar archivos APK?</h3> -<p>Descargar archivos APK también implica algunos riesgos, como los siguientes:</p> -<ul> -<li>Puedes infectar tu dispositivo con virus, malware o software malicioso que dañe tu dispositivo o robe tu información.</li> -<li>Puedes violar los derechos de autor o las condiciones de uso de una aplicación al descargarla desde una fuente no autorizada.</li> -<li>Puedes perder la garantía o el soporte técnico de tu dispositivo o de la aplicación al instalar una versión no oficial o modificada.</li> -</ul> - <h3>¿Cómo mejorar mi habilidad en el drifting?</h3> -<p>Para mejorar tu habilidad en el drifting, puedes seguir algunos consejos y trucos, como los siguientes:</p> -<ul> -<li>Practica mucho en diferentes pistas y con diferentes coches para familiarizarte con sus características y comportamientos.</li> -<li>Ajusta la configuración de tu coche a tu gusto y a las condiciones de la pista. Modifica el motor, la suspensión, los frenos, el peso, la tracción y otros parámetros para mejorar el rendimiento y el control de tu coche.</li> -<li>Usa el freno de mano para iniciar el derrape y mantenerlo. El freno de mano te ayuda a bloquear las ruedas traseras y hacer girar el coche.</li> -<li>Usa el acelerador para controlar el ángulo y la velocidad del derrape. El acelerador te ayuda a mantener la inercia y la tracción del coche.</li> -<li>Usa el volante para corregir el rumbo y la dirección del derrape. El volante te ayuda a girar el coche y a mantenerlo en la trayectoria deseada.</li> -<li>Observa y aprende de otros jugadores que hagan drifting. Puedes ver cómo hacen drifting otros jugadores usando la cámara dron o el modo espectador. También puedes ver vídeos o tutoriales de drifting en internet.</li> -</ul></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fffffu/bing/src/lib/hooks/use-enter-submit.tsx b/spaces/fffffu/bing/src/lib/hooks/use-enter-submit.tsx deleted file mode 100644 index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000 --- a/spaces/fffffu/bing/src/lib/hooks/use-enter-submit.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { useRef, type RefObject } from 'react' - -export function useEnterSubmit(): { - formRef: RefObject<HTMLFormElement> - onKeyDown: (event: React.KeyboardEvent<HTMLTextAreaElement>) => void -} { - const formRef = useRef<HTMLFormElement>(null) - - const handleKeyDown = ( - event: React.KeyboardEvent<HTMLTextAreaElement> - ): void => { - if ( - event.key === 'Enter' && - !event.shiftKey && - !event.nativeEvent.isComposing - ) { - formRef.current?.requestSubmit() - event.preventDefault() - } - } - - return { formRef, onKeyDown: handleKeyDown } -} diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/assert.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/assert.d.ts deleted file mode 100644 index e8595e637123b36d6796d5e159ebbb5320254cb2..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/assert.d.ts +++ /dev/null @@ -1,961 +0,0 @@ -/** - * The `assert` module provides a set of assertion functions for verifying - * invariants. - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/assert.js) - */ -declare module 'assert' { - /** - * An alias of {@link ok}. - * @since v0.5.9 - * @param value The input that is checked for being truthy. - */ - function assert(value: unknown, message?: string | Error): asserts value; - namespace assert { - /** - * Indicates the failure of an assertion. All errors thrown by the `assert` module - * will be instances of the `AssertionError` class. - */ - class AssertionError extends Error { - actual: unknown; - expected: unknown; - operator: string; - generatedMessage: boolean; - code: 'ERR_ASSERTION'; - constructor(options?: { - /** If provided, the error message is set to this value. */ - message?: string | undefined; - /** The `actual` property on the error instance. */ - actual?: unknown | undefined; - /** The `expected` property on the error instance. */ - expected?: unknown | undefined; - /** The `operator` property on the error instance. */ - operator?: string | undefined; - /** If provided, the generated stack trace omits frames before this function. */ - // tslint:disable-next-line:ban-types - stackStartFn?: Function | undefined; - }); - } - /** - * This feature is currently experimental and behavior might still change. - * @since v14.2.0, v12.19.0 - * @experimental - */ - class CallTracker { - /** - * The wrapper function is expected to be called exactly `exact` times. If the - * function has not been called exactly `exact` times when `tracker.verify()` is called, then `tracker.verify()` will throw an - * error. - * - * ```js - * import assert from 'assert'; - * - * // Creates call tracker. - * const tracker = new assert.CallTracker(); - * - * function func() {} - * - * // Returns a function that wraps func() that must be called exact times - * // before tracker.verify(). - * const callsfunc = tracker.calls(func); - * ``` - * @since v14.2.0, v12.19.0 - * @param [fn='A no-op function'] - * @param [exact=1] - * @return that wraps `fn`. - */ - calls(exact?: number): () => void; - calls<Func extends (...args: any[]) => any>(fn?: Func, exact?: number): Func; - /** - * Example: - * - * ```js - * import assert from 'node:assert'; - * - * const tracker = new assert.CallTracker(); - * - * function func() {} - * const callsfunc = tracker.calls(func); - * callsfunc(1, 2, 3); - * - * assert.deepStrictEqual(tracker.getCalls(callsfunc), - * [{ thisArg: this, arguments: [1, 2, 3 ] }]); - * ``` - * - * @since v18.8.0, v16.18.0 - * @params fn - * @returns An Array with the calls to a tracked function. - */ - getCalls(fn: Function): CallTrackerCall[]; - /** - * The arrays contains information about the expected and actual number of calls of - * the functions that have not been called the expected number of times. - * - * ```js - * import assert from 'assert'; - * - * // Creates call tracker. - * const tracker = new assert.CallTracker(); - * - * function func() {} - * - * function foo() {} - * - * // Returns a function that wraps func() that must be called exact times - * // before tracker.verify(). - * const callsfunc = tracker.calls(func, 2); - * - * // Returns an array containing information on callsfunc() - * tracker.report(); - * // [ - * // { - * // message: 'Expected the func function to be executed 2 time(s) but was - * // executed 0 time(s).', - * // actual: 0, - * // expected: 2, - * // operator: 'func', - * // stack: stack trace - * // } - * // ] - * ``` - * @since v14.2.0, v12.19.0 - * @return of objects containing information about the wrapper functions returned by `calls`. - */ - report(): CallTrackerReportInformation[]; - /** - * Reset calls of the call tracker. - * If a tracked function is passed as an argument, the calls will be reset for it. - * If no arguments are passed, all tracked functions will be reset. - * - * ```js - * import assert from 'node:assert'; - * - * const tracker = new assert.CallTracker(); - * - * function func() {} - * const callsfunc = tracker.calls(func); - * - * callsfunc(); - * // Tracker was called once - * tracker.getCalls(callsfunc).length === 1; - * - * tracker.reset(callsfunc); - * tracker.getCalls(callsfunc).length === 0; - * ``` - * - * @since v18.8.0, v16.18.0 - * @param fn a tracked function to reset. - */ - reset(fn?: Function): void; - /** - * Iterates through the list of functions passed to `tracker.calls()` and will throw an error for functions that - * have not been called the expected number of times. - * - * ```js - * import assert from 'assert'; - * - * // Creates call tracker. - * const tracker = new assert.CallTracker(); - * - * function func() {} - * - * // Returns a function that wraps func() that must be called exact times - * // before tracker.verify(). - * const callsfunc = tracker.calls(func, 2); - * - * callsfunc(); - * - * // Will throw an error since callsfunc() was only called once. - * tracker.verify(); - * ``` - * @since v14.2.0, v12.19.0 - */ - verify(): void; - } - interface CallTrackerCall { - thisArg: object; - arguments: unknown[]; - } - interface CallTrackerReportInformation { - message: string; - /** The actual number of times the function was called. */ - actual: number; - /** The number of times the function was expected to be called. */ - expected: number; - /** The name of the function that is wrapped. */ - operator: string; - /** A stack trace of the function. */ - stack: object; - } - type AssertPredicate = RegExp | (new () => object) | ((thrown: unknown) => boolean) | object | Error; - /** - * Throws an `AssertionError` with the provided error message or a default - * error message. If the `message` parameter is an instance of an `Error` then - * it will be thrown instead of the `AssertionError`. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.fail(); - * // AssertionError [ERR_ASSERTION]: Failed - * - * assert.fail('boom'); - * // AssertionError [ERR_ASSERTION]: boom - * - * assert.fail(new TypeError('need array')); - * // TypeError: need array - * ``` - * - * Using `assert.fail()` with more than two arguments is possible but deprecated. - * See below for further details. - * @since v0.1.21 - * @param [message='Failed'] - */ - function fail(message?: string | Error): never; - /** @deprecated since v10.0.0 - use fail([message]) or other assert functions instead. */ - function fail( - actual: unknown, - expected: unknown, - message?: string | Error, - operator?: string, - // tslint:disable-next-line:ban-types - stackStartFn?: Function - ): never; - /** - * Tests if `value` is truthy. It is equivalent to`assert.equal(!!value, true, message)`. - * - * If `value` is not truthy, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is `undefined`, a default - * error message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`. - * If no arguments are passed in at all `message` will be set to the string:`` 'No value argument passed to `assert.ok()`' ``. - * - * Be aware that in the `repl` the error message will be different to the one - * thrown in a file! See below for further details. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.ok(true); - * // OK - * assert.ok(1); - * // OK - * - * assert.ok(); - * // AssertionError: No value argument passed to `assert.ok()` - * - * assert.ok(false, 'it\'s false'); - * // AssertionError: it's false - * - * // In the repl: - * assert.ok(typeof 123 === 'string'); - * // AssertionError: false == true - * - * // In a file (e.g. test.js): - * assert.ok(typeof 123 === 'string'); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert.ok(typeof 123 === 'string') - * - * assert.ok(false); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert.ok(false) - * - * assert.ok(0); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert.ok(0) - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * // Using `assert()` works the same: - * assert(0); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert(0) - * ``` - * @since v0.1.21 - */ - function ok(value: unknown, message?: string | Error): asserts value; - /** - * **Strict assertion mode** - * - * An alias of {@link strictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link strictEqual} instead. - * - * Tests shallow, coercive equality between the `actual` and `expected` parameters - * using the [`==` operator](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Equality). `NaN` is specially handled - * and treated as being identical if both sides are `NaN`. - * - * ```js - * import assert from 'assert'; - * - * assert.equal(1, 1); - * // OK, 1 == 1 - * assert.equal(1, '1'); - * // OK, 1 == '1' - * assert.equal(NaN, NaN); - * // OK - * - * assert.equal(1, 2); - * // AssertionError: 1 == 2 - * assert.equal({ a: { b: 1 } }, { a: { b: 1 } }); - * // AssertionError: { a: { b: 1 } } == { a: { b: 1 } } - * ``` - * - * If the values are not equal, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is undefined, a default - * error message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`. - * @since v0.1.21 - */ - function equal(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * **Strict assertion mode** - * - * An alias of {@link notStrictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link notStrictEqual} instead. - * - * Tests shallow, coercive inequality with the [`!=` operator](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Inequality). `NaN` is - * specially handled and treated as being identical if both sides are `NaN`. - * - * ```js - * import assert from 'assert'; - * - * assert.notEqual(1, 2); - * // OK - * - * assert.notEqual(1, 1); - * // AssertionError: 1 != 1 - * - * assert.notEqual(1, '1'); - * // AssertionError: 1 != '1' - * ``` - * - * If the values are equal, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is undefined, a default error - * message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`. - * @since v0.1.21 - */ - function notEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * **Strict assertion mode** - * - * An alias of {@link deepStrictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link deepStrictEqual} instead. - * - * Tests for deep equality between the `actual` and `expected` parameters. Consider - * using {@link deepStrictEqual} instead. {@link deepEqual} can have - * surprising results. - * - * _Deep equality_ means that the enumerable "own" properties of child objects - * are also recursively evaluated by the following rules. - * @since v0.1.21 - */ - function deepEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * **Strict assertion mode** - * - * An alias of {@link notDeepStrictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link notDeepStrictEqual} instead. - * - * Tests for any deep inequality. Opposite of {@link deepEqual}. - * - * ```js - * import assert from 'assert'; - * - * const obj1 = { - * a: { - * b: 1 - * } - * }; - * const obj2 = { - * a: { - * b: 2 - * } - * }; - * const obj3 = { - * a: { - * b: 1 - * } - * }; - * const obj4 = Object.create(obj1); - * - * assert.notDeepEqual(obj1, obj1); - * // AssertionError: { a: { b: 1 } } notDeepEqual { a: { b: 1 } } - * - * assert.notDeepEqual(obj1, obj2); - * // OK - * - * assert.notDeepEqual(obj1, obj3); - * // AssertionError: { a: { b: 1 } } notDeepEqual { a: { b: 1 } } - * - * assert.notDeepEqual(obj1, obj4); - * // OK - * ``` - * - * If the values are deeply equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a default - * error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v0.1.21 - */ - function notDeepEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * Tests strict equality between the `actual` and `expected` parameters as - * determined by [`Object.is()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is). - * - * ```js - * import assert from 'assert/strict'; - * - * assert.strictEqual(1, 2); - * // AssertionError [ERR_ASSERTION]: Expected inputs to be strictly equal: - * // - * // 1 !== 2 - * - * assert.strictEqual(1, 1); - * // OK - * - * assert.strictEqual('Hello foobar', 'Hello World!'); - * // AssertionError [ERR_ASSERTION]: Expected inputs to be strictly equal: - * // + actual - expected - * // - * // + 'Hello foobar' - * // - 'Hello World!' - * // ^ - * - * const apples = 1; - * const oranges = 2; - * assert.strictEqual(apples, oranges, `apples ${apples} !== oranges ${oranges}`); - * // AssertionError [ERR_ASSERTION]: apples 1 !== oranges 2 - * - * assert.strictEqual(1, '1', new TypeError('Inputs are not identical')); - * // TypeError: Inputs are not identical - * ``` - * - * If the values are not strictly equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a - * default error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v0.1.21 - */ - function strictEqual<T>(actual: unknown, expected: T, message?: string | Error): asserts actual is T; - /** - * Tests strict inequality between the `actual` and `expected` parameters as - * determined by [`Object.is()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is). - * - * ```js - * import assert from 'assert/strict'; - * - * assert.notStrictEqual(1, 2); - * // OK - * - * assert.notStrictEqual(1, 1); - * // AssertionError [ERR_ASSERTION]: Expected "actual" to be strictly unequal to: - * // - * // 1 - * - * assert.notStrictEqual(1, '1'); - * // OK - * ``` - * - * If the values are strictly equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a - * default error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v0.1.21 - */ - function notStrictEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * Tests for deep equality between the `actual` and `expected` parameters. - * "Deep" equality means that the enumerable "own" properties of child objects - * are recursively evaluated also by the following rules. - * @since v1.2.0 - */ - function deepStrictEqual<T>(actual: unknown, expected: T, message?: string | Error): asserts actual is T; - /** - * Tests for deep strict inequality. Opposite of {@link deepStrictEqual}. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.notDeepStrictEqual({ a: 1 }, { a: '1' }); - * // OK - * ``` - * - * If the values are deeply and strictly equal, an `AssertionError` is thrown - * with a `message` property set equal to the value of the `message` parameter. If - * the `message` parameter is undefined, a default error message is assigned. If - * the `message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v1.2.0 - */ - function notDeepStrictEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * Expects the function `fn` to throw an error. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), a validation function, - * a validation object where each property will be tested for strict deep equality, - * or an instance of error where each property will be tested for strict deep - * equality including the non-enumerable `message` and `name` properties. When - * using an object, it is also possible to use a regular expression, when - * validating against a string property. See below for examples. - * - * If specified, `message` will be appended to the message provided by the`AssertionError` if the `fn` call fails to throw or in case the error validation - * fails. - * - * Custom validation object/error instance: - * - * ```js - * import assert from 'assert/strict'; - * - * const err = new TypeError('Wrong value'); - * err.code = 404; - * err.foo = 'bar'; - * err.info = { - * nested: true, - * baz: 'text' - * }; - * err.reg = /abc/i; - * - * assert.throws( - * () => { - * throw err; - * }, - * { - * name: 'TypeError', - * message: 'Wrong value', - * info: { - * nested: true, - * baz: 'text' - * } - * // Only properties on the validation object will be tested for. - * // Using nested objects requires all properties to be present. Otherwise - * // the validation is going to fail. - * } - * ); - * - * // Using regular expressions to validate error properties: - * throws( - * () => { - * throw err; - * }, - * { - * // The `name` and `message` properties are strings and using regular - * // expressions on those will match against the string. If they fail, an - * // error is thrown. - * name: /^TypeError$/, - * message: /Wrong/, - * foo: 'bar', - * info: { - * nested: true, - * // It is not possible to use regular expressions for nested properties! - * baz: 'text' - * }, - * // The `reg` property contains a regular expression and only if the - * // validation object contains an identical regular expression, it is going - * // to pass. - * reg: /abc/i - * } - * ); - * - * // Fails due to the different `message` and `name` properties: - * throws( - * () => { - * const otherErr = new Error('Not found'); - * // Copy all enumerable properties from `err` to `otherErr`. - * for (const [key, value] of Object.entries(err)) { - * otherErr[key] = value; - * } - * throw otherErr; - * }, - * // The error's `message` and `name` properties will also be checked when using - * // an error as validation object. - * err - * ); - * ``` - * - * Validate instanceof using constructor: - * - * ```js - * import assert from 'assert/strict'; - * - * assert.throws( - * () => { - * throw new Error('Wrong value'); - * }, - * Error - * ); - * ``` - * - * Validate error message using [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions): - * - * Using a regular expression runs `.toString` on the error object, and will - * therefore also include the error name. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.throws( - * () => { - * throw new Error('Wrong value'); - * }, - * /^Error: Wrong value$/ - * ); - * ``` - * - * Custom error validation: - * - * The function must return `true` to indicate all internal validations passed. - * It will otherwise fail with an `AssertionError`. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.throws( - * () => { - * throw new Error('Wrong value'); - * }, - * (err) => { - * assert(err instanceof Error); - * assert(/value/.test(err)); - * // Avoid returning anything from validation functions besides `true`. - * // Otherwise, it's not clear what part of the validation failed. Instead, - * // throw an error about the specific validation that failed (as done in this - * // example) and add as much helpful debugging information to that error as - * // possible. - * return true; - * }, - * 'unexpected error' - * ); - * ``` - * - * `error` cannot be a string. If a string is provided as the second - * argument, then `error` is assumed to be omitted and the string will be used for`message` instead. This can lead to easy-to-miss mistakes. Using the same - * message as the thrown error message is going to result in an`ERR_AMBIGUOUS_ARGUMENT` error. Please read the example below carefully if using - * a string as the second argument gets considered: - * - * ```js - * import assert from 'assert/strict'; - * - * function throwingFirst() { - * throw new Error('First'); - * } - * - * function throwingSecond() { - * throw new Error('Second'); - * } - * - * function notThrowing() {} - * - * // The second argument is a string and the input function threw an Error. - * // The first case will not throw as it does not match for the error message - * // thrown by the input function! - * assert.throws(throwingFirst, 'Second'); - * // In the next example the message has no benefit over the message from the - * // error and since it is not clear if the user intended to actually match - * // against the error message, Node.js throws an `ERR_AMBIGUOUS_ARGUMENT` error. - * assert.throws(throwingSecond, 'Second'); - * // TypeError [ERR_AMBIGUOUS_ARGUMENT] - * - * // The string is only used (as message) in case the function does not throw: - * assert.throws(notThrowing, 'Second'); - * // AssertionError [ERR_ASSERTION]: Missing expected exception: Second - * - * // If it was intended to match for the error message do this instead: - * // It does not throw because the error messages match. - * assert.throws(throwingSecond, /Second$/); - * - * // If the error message does not match, an AssertionError is thrown. - * assert.throws(throwingFirst, /Second$/); - * // AssertionError [ERR_ASSERTION] - * ``` - * - * Due to the confusing error-prone notation, avoid a string as the second - * argument. - * @since v0.1.21 - */ - function throws(block: () => unknown, message?: string | Error): void; - function throws(block: () => unknown, error: AssertPredicate, message?: string | Error): void; - /** - * Asserts that the function `fn` does not throw an error. - * - * Using `assert.doesNotThrow()` is actually not useful because there - * is no benefit in catching an error and then rethrowing it. Instead, consider - * adding a comment next to the specific code path that should not throw and keep - * error messages as expressive as possible. - * - * When `assert.doesNotThrow()` is called, it will immediately call the `fn`function. - * - * If an error is thrown and it is the same type as that specified by the `error`parameter, then an `AssertionError` is thrown. If the error is of a - * different type, or if the `error` parameter is undefined, the error is - * propagated back to the caller. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) or a validation - * function. See {@link throws} for more details. - * - * The following, for instance, will throw the `TypeError` because there is no - * matching error type in the assertion: - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotThrow( - * () => { - * throw new TypeError('Wrong value'); - * }, - * SyntaxError - * ); - * ``` - * - * However, the following will result in an `AssertionError` with the message - * 'Got unwanted exception...': - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotThrow( - * () => { - * throw new TypeError('Wrong value'); - * }, - * TypeError - * ); - * ``` - * - * If an `AssertionError` is thrown and a value is provided for the `message`parameter, the value of `message` will be appended to the `AssertionError` message: - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotThrow( - * () => { - * throw new TypeError('Wrong value'); - * }, - * /Wrong value/, - * 'Whoops' - * ); - * // Throws: AssertionError: Got unwanted exception: Whoops - * ``` - * @since v0.1.21 - */ - function doesNotThrow(block: () => unknown, message?: string | Error): void; - function doesNotThrow(block: () => unknown, error: AssertPredicate, message?: string | Error): void; - /** - * Throws `value` if `value` is not `undefined` or `null`. This is useful when - * testing the `error` argument in callbacks. The stack trace contains all frames - * from the error passed to `ifError()` including the potential new frames for`ifError()` itself. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.ifError(null); - * // OK - * assert.ifError(0); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: 0 - * assert.ifError('error'); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: 'error' - * assert.ifError(new Error()); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: Error - * - * // Create some random error frames. - * let err; - * (function errorFrame() { - * err = new Error('test error'); - * })(); - * - * (function ifErrorFrame() { - * assert.ifError(err); - * })(); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: test error - * // at ifErrorFrame - * // at errorFrame - * ``` - * @since v0.1.97 - */ - function ifError(value: unknown): asserts value is null | undefined; - /** - * Awaits the `asyncFn` promise or, if `asyncFn` is a function, immediately - * calls the function and awaits the returned promise to complete. It will then - * check that the promise is rejected. - * - * If `asyncFn` is a function and it throws an error synchronously,`assert.rejects()` will return a rejected `Promise` with that error. If the - * function does not return a promise, `assert.rejects()` will return a rejected`Promise` with an `ERR_INVALID_RETURN_VALUE` error. In both cases the error - * handler is skipped. - * - * Besides the async nature to await the completion behaves identically to {@link throws}. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), a validation function, - * an object where each property will be tested for, or an instance of error where - * each property will be tested for including the non-enumerable `message` and`name` properties. - * - * If specified, `message` will be the message provided by the `AssertionError` if the `asyncFn` fails to reject. - * - * ```js - * import assert from 'assert/strict'; - * - * await assert.rejects( - * async () => { - * throw new TypeError('Wrong value'); - * }, - * { - * name: 'TypeError', - * message: 'Wrong value' - * } - * ); - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * await assert.rejects( - * async () => { - * throw new TypeError('Wrong value'); - * }, - * (err) => { - * assert.strictEqual(err.name, 'TypeError'); - * assert.strictEqual(err.message, 'Wrong value'); - * return true; - * } - * ); - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * assert.rejects( - * Promise.reject(new Error('Wrong value')), - * Error - * ).then(() => { - * // ... - * }); - * ``` - * - * `error` cannot be a string. If a string is provided as the second - * argument, then `error` is assumed to be omitted and the string will be used for`message` instead. This can lead to easy-to-miss mistakes. Please read the - * example in {@link throws} carefully if using a string as the second - * argument gets considered. - * @since v10.0.0 - */ - function rejects(block: (() => Promise<unknown>) | Promise<unknown>, message?: string | Error): Promise<void>; - function rejects(block: (() => Promise<unknown>) | Promise<unknown>, error: AssertPredicate, message?: string | Error): Promise<void>; - /** - * Awaits the `asyncFn` promise or, if `asyncFn` is a function, immediately - * calls the function and awaits the returned promise to complete. It will then - * check that the promise is not rejected. - * - * If `asyncFn` is a function and it throws an error synchronously,`assert.doesNotReject()` will return a rejected `Promise` with that error. If - * the function does not return a promise, `assert.doesNotReject()` will return a - * rejected `Promise` with an `ERR_INVALID_RETURN_VALUE` error. In both cases - * the error handler is skipped. - * - * Using `assert.doesNotReject()` is actually not useful because there is little - * benefit in catching a rejection and then rejecting it again. Instead, consider - * adding a comment next to the specific code path that should not reject and keep - * error messages as expressive as possible. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) or a validation - * function. See {@link throws} for more details. - * - * Besides the async nature to await the completion behaves identically to {@link doesNotThrow}. - * - * ```js - * import assert from 'assert/strict'; - * - * await assert.doesNotReject( - * async () => { - * throw new TypeError('Wrong value'); - * }, - * SyntaxError - * ); - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotReject(Promise.reject(new TypeError('Wrong value'))) - * .then(() => { - * // ... - * }); - * ``` - * @since v10.0.0 - */ - function doesNotReject(block: (() => Promise<unknown>) | Promise<unknown>, message?: string | Error): Promise<void>; - function doesNotReject(block: (() => Promise<unknown>) | Promise<unknown>, error: AssertPredicate, message?: string | Error): Promise<void>; - /** - * Expects the `string` input to match the regular expression. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.match('I will fail', /pass/); - * // AssertionError [ERR_ASSERTION]: The input did not match the regular ... - * - * assert.match(123, /pass/); - * // AssertionError [ERR_ASSERTION]: The "string" argument must be of type string. - * - * assert.match('I will pass', /pass/); - * // OK - * ``` - * - * If the values do not match, or if the `string` argument is of another type than`string`, an `AssertionError` is thrown with a `message` property set equal - * to the value of the `message` parameter. If the `message` parameter is - * undefined, a default error message is assigned. If the `message` parameter is an - * instance of an `Error` then it will be thrown instead of the `AssertionError`. - * @since v13.6.0, v12.16.0 - */ - function match(value: string, regExp: RegExp, message?: string | Error): void; - /** - * Expects the `string` input not to match the regular expression. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotMatch('I will fail', /fail/); - * // AssertionError [ERR_ASSERTION]: The input was expected to not match the ... - * - * assert.doesNotMatch(123, /pass/); - * // AssertionError [ERR_ASSERTION]: The "string" argument must be of type string. - * - * assert.doesNotMatch('I will pass', /different/); - * // OK - * ``` - * - * If the values do match, or if the `string` argument is of another type than`string`, an `AssertionError` is thrown with a `message` property set equal - * to the value of the `message` parameter. If the `message` parameter is - * undefined, a default error message is assigned. If the `message` parameter is an - * instance of an `Error` then it will be thrown instead of the `AssertionError`. - * @since v13.6.0, v12.16.0 - */ - function doesNotMatch(value: string, regExp: RegExp, message?: string | Error): void; - const strict: Omit<typeof assert, 'equal' | 'notEqual' | 'deepEqual' | 'notDeepEqual' | 'ok' | 'strictEqual' | 'deepStrictEqual' | 'ifError' | 'strict'> & { - (value: unknown, message?: string | Error): asserts value; - equal: typeof strictEqual; - notEqual: typeof notStrictEqual; - deepEqual: typeof deepStrictEqual; - notDeepEqual: typeof notDeepStrictEqual; - // Mapped types and assertion functions are incompatible? - // TS2775: Assertions require every name in the call target - // to be declared with an explicit type annotation. - ok: typeof ok; - strictEqual: typeof strictEqual; - deepStrictEqual: typeof deepStrictEqual; - ifError: typeof ifError; - strict: typeof strict; - }; - } - export = assert; -} -declare module 'node:assert' { - import assert = require('assert'); - export = assert; -} diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/stream.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/stream.d.ts deleted file mode 100644 index a0df689e1d9625be848a093c22e862ac7b43380c..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/stream.d.ts +++ /dev/null @@ -1,1340 +0,0 @@ -/** - * A stream is an abstract interface for working with streaming data in Node.js. - * The `stream` module provides an API for implementing the stream interface. - * - * There are many stream objects provided by Node.js. For instance, a `request to an HTTP server` and `process.stdout` are both stream instances. - * - * Streams can be readable, writable, or both. All streams are instances of `EventEmitter`. - * - * To access the `stream` module: - * - * ```js - * const stream = require('stream'); - * ``` - * - * The `stream` module is useful for creating new types of stream instances. It is - * usually not necessary to use the `stream` module to consume streams. - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/stream.js) - */ -declare module 'stream' { - import { EventEmitter, Abortable } from 'node:events'; - import { Blob as NodeBlob } from "node:buffer"; - import * as streamPromises from 'node:stream/promises'; - import * as streamConsumers from 'node:stream/consumers'; - import * as streamWeb from 'node:stream/web'; - class internal extends EventEmitter { - pipe<T extends NodeJS.WritableStream>( - destination: T, - options?: { - end?: boolean | undefined; - } - ): T; - } - namespace internal { - class Stream extends internal { - constructor(opts?: ReadableOptions); - } - interface StreamOptions<T extends Stream> extends Abortable { - emitClose?: boolean | undefined; - highWaterMark?: number | undefined; - objectMode?: boolean | undefined; - construct?(this: T, callback: (error?: Error | null) => void): void; - destroy?(this: T, error: Error | null, callback: (error: Error | null) => void): void; - autoDestroy?: boolean | undefined; - } - interface ReadableOptions extends StreamOptions<Readable> { - encoding?: BufferEncoding | undefined; - read?(this: Readable, size: number): void; - } - /** - * @since v0.9.4 - */ - class Readable extends Stream implements NodeJS.ReadableStream { - /** - * A utility method for creating Readable Streams out of iterators. - */ - static from(iterable: Iterable<any> | AsyncIterable<any>, options?: ReadableOptions): Readable; - /** - * A utility method for creating a `Readable` from a web `ReadableStream`. - * @since v17.0.0 - * @experimental - */ - static fromWeb(readableStream: streamWeb.ReadableStream, options?: Pick<ReadableOptions, 'encoding' | 'highWaterMark' | 'objectMode' | 'signal'>): Readable; - /** - * Returns whether the stream has been read from or cancelled. - * @since v16.8.0 - */ - static isDisturbed(stream: Readable | NodeJS.ReadableStream): boolean; - /** - * A utility method for creating a web `ReadableStream` from a `Readable`. - * @since v17.0.0 - * @experimental - */ - static toWeb(streamReadable: Readable): streamWeb.ReadableStream; - /** - * Returns whether the stream was destroyed or errored before emitting `'end'`. - * @since v16.8.0 - * @experimental - */ - readonly readableAborted: boolean; - /** - * Is `true` if it is safe to call `readable.read()`, which means - * the stream has not been destroyed or emitted `'error'` or `'end'`. - * @since v11.4.0 - */ - readable: boolean; - /** - * Returns whether `'data'` has been emitted. - * @since v16.7.0, v14.18.0 - * @experimental - */ - readonly readableDidRead: boolean; - /** - * Getter for the property `encoding` of a given `Readable` stream. The `encoding`property can be set using the `readable.setEncoding()` method. - * @since v12.7.0 - */ - readonly readableEncoding: BufferEncoding | null; - /** - * Becomes `true` when `'end'` event is emitted. - * @since v12.9.0 - */ - readonly readableEnded: boolean; - /** - * This property reflects the current state of a `Readable` stream as described - * in the `Three states` section. - * @since v9.4.0 - */ - readonly readableFlowing: boolean | null; - /** - * Returns the value of `highWaterMark` passed when creating this `Readable`. - * @since v9.3.0 - */ - readonly readableHighWaterMark: number; - /** - * This property contains the number of bytes (or objects) in the queue - * ready to be read. The value provides introspection data regarding - * the status of the `highWaterMark`. - * @since v9.4.0 - */ - readonly readableLength: number; - /** - * Getter for the property `objectMode` of a given `Readable` stream. - * @since v12.3.0 - */ - readonly readableObjectMode: boolean; - /** - * Is `true` after `readable.destroy()` has been called. - * @since v18.0.0 - */ - destroyed: boolean; - /** - * Is true after 'close' has been emitted. - * @since v8.0.0 - */ - readonly closed: boolean; - /** - * Returns error if the stream has been destroyed with an error. - * @since v18.0.0 - */ - readonly errored: Error | null; - constructor(opts?: ReadableOptions); - _construct?(callback: (error?: Error | null) => void): void; - _read(size: number): void; - /** - * The `readable.read()` method reads data out of the internal buffer and - * returns it. If no data is available to be read, `null` is returned. By default, - * the data is returned as a `Buffer` object unless an encoding has been - * specified using the `readable.setEncoding()` method or the stream is operating - * in object mode. - * - * The optional `size` argument specifies a specific number of bytes to read. If`size` bytes are not available to be read, `null` will be returned _unless_the stream has ended, in which - * case all of the data remaining in the internal - * buffer will be returned. - * - * If the `size` argument is not specified, all of the data contained in the - * internal buffer will be returned. - * - * The `size` argument must be less than or equal to 1 GiB. - * - * The `readable.read()` method should only be called on `Readable` streams - * operating in paused mode. In flowing mode, `readable.read()` is called - * automatically until the internal buffer is fully drained. - * - * ```js - * const readable = getReadableStreamSomehow(); - * - * // 'readable' may be triggered multiple times as data is buffered in - * readable.on('readable', () => { - * let chunk; - * console.log('Stream is readable (new data received in buffer)'); - * // Use a loop to make sure we read all currently available data - * while (null !== (chunk = readable.read())) { - * console.log(`Read ${chunk.length} bytes of data...`); - * } - * }); - * - * // 'end' will be triggered once when there is no more data available - * readable.on('end', () => { - * console.log('Reached end of stream.'); - * }); - * ``` - * - * Each call to `readable.read()` returns a chunk of data, or `null`. The chunks - * are not concatenated. A `while` loop is necessary to consume all data - * currently in the buffer. When reading a large file `.read()` may return `null`, - * having consumed all buffered content so far, but there is still more data to - * come not yet buffered. In this case a new `'readable'` event will be emitted - * when there is more data in the buffer. Finally the `'end'` event will be - * emitted when there is no more data to come. - * - * Therefore to read a file's whole contents from a `readable`, it is necessary - * to collect chunks across multiple `'readable'` events: - * - * ```js - * const chunks = []; - * - * readable.on('readable', () => { - * let chunk; - * while (null !== (chunk = readable.read())) { - * chunks.push(chunk); - * } - * }); - * - * readable.on('end', () => { - * const content = chunks.join(''); - * }); - * ``` - * - * A `Readable` stream in object mode will always return a single item from - * a call to `readable.read(size)`, regardless of the value of the`size` argument. - * - * If the `readable.read()` method returns a chunk of data, a `'data'` event will - * also be emitted. - * - * Calling {@link read} after the `'end'` event has - * been emitted will return `null`. No runtime error will be raised. - * @since v0.9.4 - * @param size Optional argument to specify how much data to read. - */ - read(size?: number): any; - /** - * The `readable.setEncoding()` method sets the character encoding for - * data read from the `Readable` stream. - * - * By default, no encoding is assigned and stream data will be returned as`Buffer` objects. Setting an encoding causes the stream data - * to be returned as strings of the specified encoding rather than as `Buffer`objects. For instance, calling `readable.setEncoding('utf8')` will cause the - * output data to be interpreted as UTF-8 data, and passed as strings. Calling`readable.setEncoding('hex')` will cause the data to be encoded in hexadecimal - * string format. - * - * The `Readable` stream will properly handle multi-byte characters delivered - * through the stream that would otherwise become improperly decoded if simply - * pulled from the stream as `Buffer` objects. - * - * ```js - * const readable = getReadableStreamSomehow(); - * readable.setEncoding('utf8'); - * readable.on('data', (chunk) => { - * assert.equal(typeof chunk, 'string'); - * console.log('Got %d characters of string data:', chunk.length); - * }); - * ``` - * @since v0.9.4 - * @param encoding The encoding to use. - */ - setEncoding(encoding: BufferEncoding): this; - /** - * The `readable.pause()` method will cause a stream in flowing mode to stop - * emitting `'data'` events, switching out of flowing mode. Any data that - * becomes available will remain in the internal buffer. - * - * ```js - * const readable = getReadableStreamSomehow(); - * readable.on('data', (chunk) => { - * console.log(`Received ${chunk.length} bytes of data.`); - * readable.pause(); - * console.log('There will be no additional data for 1 second.'); - * setTimeout(() => { - * console.log('Now data will start flowing again.'); - * readable.resume(); - * }, 1000); - * }); - * ``` - * - * The `readable.pause()` method has no effect if there is a `'readable'`event listener. - * @since v0.9.4 - */ - pause(): this; - /** - * The `readable.resume()` method causes an explicitly paused `Readable` stream to - * resume emitting `'data'` events, switching the stream into flowing mode. - * - * The `readable.resume()` method can be used to fully consume the data from a - * stream without actually processing any of that data: - * - * ```js - * getReadableStreamSomehow() - * .resume() - * .on('end', () => { - * console.log('Reached the end, but did not read anything.'); - * }); - * ``` - * - * The `readable.resume()` method has no effect if there is a `'readable'`event listener. - * @since v0.9.4 - */ - resume(): this; - /** - * The `readable.isPaused()` method returns the current operating state of the`Readable`. This is used primarily by the mechanism that underlies the`readable.pipe()` method. In most - * typical cases, there will be no reason to - * use this method directly. - * - * ```js - * const readable = new stream.Readable(); - * - * readable.isPaused(); // === false - * readable.pause(); - * readable.isPaused(); // === true - * readable.resume(); - * readable.isPaused(); // === false - * ``` - * @since v0.11.14 - */ - isPaused(): boolean; - /** - * The `readable.unpipe()` method detaches a `Writable` stream previously attached - * using the {@link pipe} method. - * - * If the `destination` is not specified, then _all_ pipes are detached. - * - * If the `destination` is specified, but no pipe is set up for it, then - * the method does nothing. - * - * ```js - * const fs = require('fs'); - * const readable = getReadableStreamSomehow(); - * const writable = fs.createWriteStream('file.txt'); - * // All the data from readable goes into 'file.txt', - * // but only for the first second. - * readable.pipe(writable); - * setTimeout(() => { - * console.log('Stop writing to file.txt.'); - * readable.unpipe(writable); - * console.log('Manually close the file stream.'); - * writable.end(); - * }, 1000); - * ``` - * @since v0.9.4 - * @param destination Optional specific stream to unpipe - */ - unpipe(destination?: NodeJS.WritableStream): this; - /** - * Passing `chunk` as `null` signals the end of the stream (EOF) and behaves the - * same as `readable.push(null)`, after which no more data can be written. The EOF - * signal is put at the end of the buffer and any buffered data will still be - * flushed. - * - * The `readable.unshift()` method pushes a chunk of data back into the internal - * buffer. This is useful in certain situations where a stream is being consumed by - * code that needs to "un-consume" some amount of data that it has optimistically - * pulled out of the source, so that the data can be passed on to some other party. - * - * The `stream.unshift(chunk)` method cannot be called after the `'end'` event - * has been emitted or a runtime error will be thrown. - * - * Developers using `stream.unshift()` often should consider switching to - * use of a `Transform` stream instead. See the `API for stream implementers` section for more information. - * - * ```js - * // Pull off a header delimited by \n\n. - * // Use unshift() if we get too much. - * // Call the callback with (error, header, stream). - * const { StringDecoder } = require('string_decoder'); - * function parseHeader(stream, callback) { - * stream.on('error', callback); - * stream.on('readable', onReadable); - * const decoder = new StringDecoder('utf8'); - * let header = ''; - * function onReadable() { - * let chunk; - * while (null !== (chunk = stream.read())) { - * const str = decoder.write(chunk); - * if (str.includes('\n\n')) { - * // Found the header boundary. - * const split = str.split(/\n\n/); - * header += split.shift(); - * const remaining = split.join('\n\n'); - * const buf = Buffer.from(remaining, 'utf8'); - * stream.removeListener('error', callback); - * // Remove the 'readable' listener before unshifting. - * stream.removeListener('readable', onReadable); - * if (buf.length) - * stream.unshift(buf); - * // Now the body of the message can be read from the stream. - * callback(null, header, stream); - * return; - * } - * // Still reading the header. - * header += str; - * } - * } - * } - * ``` - * - * Unlike {@link push}, `stream.unshift(chunk)` will not - * end the reading process by resetting the internal reading state of the stream. - * This can cause unexpected results if `readable.unshift()` is called during a - * read (i.e. from within a {@link _read} implementation on a - * custom stream). Following the call to `readable.unshift()` with an immediate {@link push} will reset the reading state appropriately, - * however it is best to simply avoid calling `readable.unshift()` while in the - * process of performing a read. - * @since v0.9.11 - * @param chunk Chunk of data to unshift onto the read queue. For streams not operating in object mode, `chunk` must be a string, `Buffer`, `Uint8Array` or `null`. For object mode - * streams, `chunk` may be any JavaScript value. - * @param encoding Encoding of string chunks. Must be a valid `Buffer` encoding, such as `'utf8'` or `'ascii'`. - */ - unshift(chunk: any, encoding?: BufferEncoding): void; - /** - * Prior to Node.js 0.10, streams did not implement the entire `stream` module API - * as it is currently defined. (See `Compatibility` for more information.) - * - * When using an older Node.js library that emits `'data'` events and has a {@link pause} method that is advisory only, the`readable.wrap()` method can be used to create a `Readable` - * stream that uses - * the old stream as its data source. - * - * It will rarely be necessary to use `readable.wrap()` but the method has been - * provided as a convenience for interacting with older Node.js applications and - * libraries. - * - * ```js - * const { OldReader } = require('./old-api-module.js'); - * const { Readable } = require('stream'); - * const oreader = new OldReader(); - * const myReader = new Readable().wrap(oreader); - * - * myReader.on('readable', () => { - * myReader.read(); // etc. - * }); - * ``` - * @since v0.9.4 - * @param stream An "old style" readable stream - */ - wrap(stream: NodeJS.ReadableStream): this; - push(chunk: any, encoding?: BufferEncoding): boolean; - _destroy(error: Error | null, callback: (error?: Error | null) => void): void; - /** - * Destroy the stream. Optionally emit an `'error'` event, and emit a `'close'`event (unless `emitClose` is set to `false`). After this call, the readable - * stream will release any internal resources and subsequent calls to `push()`will be ignored. - * - * Once `destroy()` has been called any further calls will be a no-op and no - * further errors except from `_destroy()` may be emitted as `'error'`. - * - * Implementors should not override this method, but instead implement `readable._destroy()`. - * @since v8.0.0 - * @param error Error which will be passed as payload in `'error'` event - */ - destroy(error?: Error): this; - /** - * Event emitter - * The defined events on documents including: - * 1. close - * 2. data - * 3. end - * 4. error - * 5. pause - * 6. readable - * 7. resume - */ - addListener(event: 'close', listener: () => void): this; - addListener(event: 'data', listener: (chunk: any) => void): this; - addListener(event: 'end', listener: () => void): this; - addListener(event: 'error', listener: (err: Error) => void): this; - addListener(event: 'pause', listener: () => void): this; - addListener(event: 'readable', listener: () => void): this; - addListener(event: 'resume', listener: () => void): this; - addListener(event: string | symbol, listener: (...args: any[]) => void): this; - emit(event: 'close'): boolean; - emit(event: 'data', chunk: any): boolean; - emit(event: 'end'): boolean; - emit(event: 'error', err: Error): boolean; - emit(event: 'pause'): boolean; - emit(event: 'readable'): boolean; - emit(event: 'resume'): boolean; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'close', listener: () => void): this; - on(event: 'data', listener: (chunk: any) => void): this; - on(event: 'end', listener: () => void): this; - on(event: 'error', listener: (err: Error) => void): this; - on(event: 'pause', listener: () => void): this; - on(event: 'readable', listener: () => void): this; - on(event: 'resume', listener: () => void): this; - on(event: string | symbol, listener: (...args: any[]) => void): this; - once(event: 'close', listener: () => void): this; - once(event: 'data', listener: (chunk: any) => void): this; - once(event: 'end', listener: () => void): this; - once(event: 'error', listener: (err: Error) => void): this; - once(event: 'pause', listener: () => void): this; - once(event: 'readable', listener: () => void): this; - once(event: 'resume', listener: () => void): this; - once(event: string | symbol, listener: (...args: any[]) => void): this; - prependListener(event: 'close', listener: () => void): this; - prependListener(event: 'data', listener: (chunk: any) => void): this; - prependListener(event: 'end', listener: () => void): this; - prependListener(event: 'error', listener: (err: Error) => void): this; - prependListener(event: 'pause', listener: () => void): this; - prependListener(event: 'readable', listener: () => void): this; - prependListener(event: 'resume', listener: () => void): this; - prependListener(event: string | symbol, listener: (...args: any[]) => void): this; - prependOnceListener(event: 'close', listener: () => void): this; - prependOnceListener(event: 'data', listener: (chunk: any) => void): this; - prependOnceListener(event: 'end', listener: () => void): this; - prependOnceListener(event: 'error', listener: (err: Error) => void): this; - prependOnceListener(event: 'pause', listener: () => void): this; - prependOnceListener(event: 'readable', listener: () => void): this; - prependOnceListener(event: 'resume', listener: () => void): this; - prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this; - removeListener(event: 'close', listener: () => void): this; - removeListener(event: 'data', listener: (chunk: any) => void): this; - removeListener(event: 'end', listener: () => void): this; - removeListener(event: 'error', listener: (err: Error) => void): this; - removeListener(event: 'pause', listener: () => void): this; - removeListener(event: 'readable', listener: () => void): this; - removeListener(event: 'resume', listener: () => void): this; - removeListener(event: string | symbol, listener: (...args: any[]) => void): this; - [Symbol.asyncIterator](): AsyncIterableIterator<any>; - } - interface WritableOptions extends StreamOptions<Writable> { - decodeStrings?: boolean | undefined; - defaultEncoding?: BufferEncoding | undefined; - write?(this: Writable, chunk: any, encoding: BufferEncoding, callback: (error?: Error | null) => void): void; - writev?( - this: Writable, - chunks: Array<{ - chunk: any; - encoding: BufferEncoding; - }>, - callback: (error?: Error | null) => void - ): void; - final?(this: Writable, callback: (error?: Error | null) => void): void; - } - /** - * @since v0.9.4 - */ - class Writable extends Stream implements NodeJS.WritableStream { - /** - * A utility method for creating a `Writable` from a web `WritableStream`. - * @since v17.0.0 - * @experimental - */ - static fromWeb(writableStream: streamWeb.WritableStream, options?: Pick<WritableOptions, 'decodeStrings' | 'highWaterMark' | 'objectMode' | 'signal'>): Writable; - /** - * A utility method for creating a web `WritableStream` from a `Writable`. - * @since v17.0.0 - * @experimental - */ - static toWeb(streamWritable: Writable): streamWeb.WritableStream; - /** - * Is `true` if it is safe to call `writable.write()`, which means - * the stream has not been destroyed, errored or ended. - * @since v11.4.0 - */ - readonly writable: boolean; - /** - * Is `true` after `writable.end()` has been called. This property - * does not indicate whether the data has been flushed, for this use `writable.writableFinished` instead. - * @since v12.9.0 - */ - readonly writableEnded: boolean; - /** - * Is set to `true` immediately before the `'finish'` event is emitted. - * @since v12.6.0 - */ - readonly writableFinished: boolean; - /** - * Return the value of `highWaterMark` passed when creating this `Writable`. - * @since v9.3.0 - */ - readonly writableHighWaterMark: number; - /** - * This property contains the number of bytes (or objects) in the queue - * ready to be written. The value provides introspection data regarding - * the status of the `highWaterMark`. - * @since v9.4.0 - */ - readonly writableLength: number; - /** - * Getter for the property `objectMode` of a given `Writable` stream. - * @since v12.3.0 - */ - readonly writableObjectMode: boolean; - /** - * Number of times `writable.uncork()` needs to be - * called in order to fully uncork the stream. - * @since v13.2.0, v12.16.0 - */ - readonly writableCorked: number; - /** - * Is `true` after `writable.destroy()` has been called. - * @since v8.0.0 - */ - destroyed: boolean; - /** - * Is true after 'close' has been emitted. - * @since v8.0.0 - */ - readonly closed: boolean; - /** - * Returns error if the stream has been destroyed with an error. - * @since v18.0.0 - */ - readonly errored: Error | null; - /** - * Is `true` if the stream's buffer has been full and stream will emit 'drain'. - * @since v15.2.0, v14.17.0 - */ - readonly writableNeedDrain: boolean; - constructor(opts?: WritableOptions); - _write(chunk: any, encoding: BufferEncoding, callback: (error?: Error | null) => void): void; - _writev?( - chunks: Array<{ - chunk: any; - encoding: BufferEncoding; - }>, - callback: (error?: Error | null) => void - ): void; - _construct?(callback: (error?: Error | null) => void): void; - _destroy(error: Error | null, callback: (error?: Error | null) => void): void; - _final(callback: (error?: Error | null) => void): void; - /** - * The `writable.write()` method writes some data to the stream, and calls the - * supplied `callback` once the data has been fully handled. If an error - * occurs, the `callback` will be called with the error as its - * first argument. The `callback` is called asynchronously and before `'error'` is - * emitted. - * - * The return value is `true` if the internal buffer is less than the`highWaterMark` configured when the stream was created after admitting `chunk`. - * If `false` is returned, further attempts to write data to the stream should - * stop until the `'drain'` event is emitted. - * - * While a stream is not draining, calls to `write()` will buffer `chunk`, and - * return false. Once all currently buffered chunks are drained (accepted for - * delivery by the operating system), the `'drain'` event will be emitted. - * Once `write()` returns false, do not write more chunks - * until the `'drain'` event is emitted. While calling `write()` on a stream that - * is not draining is allowed, Node.js will buffer all written chunks until - * maximum memory usage occurs, at which point it will abort unconditionally. - * Even before it aborts, high memory usage will cause poor garbage collector - * performance and high RSS (which is not typically released back to the system, - * even after the memory is no longer required). Since TCP sockets may never - * drain if the remote peer does not read the data, writing a socket that is - * not draining may lead to a remotely exploitable vulnerability. - * - * Writing data while the stream is not draining is particularly - * problematic for a `Transform`, because the `Transform` streams are paused - * by default until they are piped or a `'data'` or `'readable'` event handler - * is added. - * - * If the data to be written can be generated or fetched on demand, it is - * recommended to encapsulate the logic into a `Readable` and use {@link pipe}. However, if calling `write()` is preferred, it is - * possible to respect backpressure and avoid memory issues using the `'drain'` event: - * - * ```js - * function write(data, cb) { - * if (!stream.write(data)) { - * stream.once('drain', cb); - * } else { - * process.nextTick(cb); - * } - * } - * - * // Wait for cb to be called before doing any other write. - * write('hello', () => { - * console.log('Write completed, do more writes now.'); - * }); - * ``` - * - * A `Writable` stream in object mode will always ignore the `encoding` argument. - * @since v0.9.4 - * @param chunk Optional data to write. For streams not operating in object mode, `chunk` must be a string, `Buffer` or `Uint8Array`. For object mode streams, `chunk` may be any - * JavaScript value other than `null`. - * @param [encoding='utf8'] The encoding, if `chunk` is a string. - * @param callback Callback for when this chunk of data is flushed. - * @return `false` if the stream wishes for the calling code to wait for the `'drain'` event to be emitted before continuing to write additional data; otherwise `true`. - */ - write(chunk: any, callback?: (error: Error | null | undefined) => void): boolean; - write(chunk: any, encoding: BufferEncoding, callback?: (error: Error | null | undefined) => void): boolean; - /** - * The `writable.setDefaultEncoding()` method sets the default `encoding` for a `Writable` stream. - * @since v0.11.15 - * @param encoding The new default encoding - */ - setDefaultEncoding(encoding: BufferEncoding): this; - /** - * Calling the `writable.end()` method signals that no more data will be written - * to the `Writable`. The optional `chunk` and `encoding` arguments allow one - * final additional chunk of data to be written immediately before closing the - * stream. - * - * Calling the {@link write} method after calling {@link end} will raise an error. - * - * ```js - * // Write 'hello, ' and then end with 'world!'. - * const fs = require('fs'); - * const file = fs.createWriteStream('example.txt'); - * file.write('hello, '); - * file.end('world!'); - * // Writing more now is not allowed! - * ``` - * @since v0.9.4 - * @param chunk Optional data to write. For streams not operating in object mode, `chunk` must be a string, `Buffer` or `Uint8Array`. For object mode streams, `chunk` may be any - * JavaScript value other than `null`. - * @param encoding The encoding if `chunk` is a string - * @param callback Callback for when the stream is finished. - */ - end(cb?: () => void): this; - end(chunk: any, cb?: () => void): this; - end(chunk: any, encoding: BufferEncoding, cb?: () => void): this; - /** - * The `writable.cork()` method forces all written data to be buffered in memory. - * The buffered data will be flushed when either the {@link uncork} or {@link end} methods are called. - * - * The primary intent of `writable.cork()` is to accommodate a situation in which - * several small chunks are written to the stream in rapid succession. Instead of - * immediately forwarding them to the underlying destination, `writable.cork()`buffers all the chunks until `writable.uncork()` is called, which will pass them - * all to `writable._writev()`, if present. This prevents a head-of-line blocking - * situation where data is being buffered while waiting for the first small chunk - * to be processed. However, use of `writable.cork()` without implementing`writable._writev()` may have an adverse effect on throughput. - * - * See also: `writable.uncork()`, `writable._writev()`. - * @since v0.11.2 - */ - cork(): void; - /** - * The `writable.uncork()` method flushes all data buffered since {@link cork} was called. - * - * When using `writable.cork()` and `writable.uncork()` to manage the buffering - * of writes to a stream, defer calls to `writable.uncork()` using`process.nextTick()`. Doing so allows batching of all`writable.write()` calls that occur within a given Node.js event - * loop phase. - * - * ```js - * stream.cork(); - * stream.write('some '); - * stream.write('data '); - * process.nextTick(() => stream.uncork()); - * ``` - * - * If the `writable.cork()` method is called multiple times on a stream, the - * same number of calls to `writable.uncork()` must be called to flush the buffered - * data. - * - * ```js - * stream.cork(); - * stream.write('some '); - * stream.cork(); - * stream.write('data '); - * process.nextTick(() => { - * stream.uncork(); - * // The data will not be flushed until uncork() is called a second time. - * stream.uncork(); - * }); - * ``` - * - * See also: `writable.cork()`. - * @since v0.11.2 - */ - uncork(): void; - /** - * Destroy the stream. Optionally emit an `'error'` event, and emit a `'close'`event (unless `emitClose` is set to `false`). After this call, the writable - * stream has ended and subsequent calls to `write()` or `end()` will result in - * an `ERR_STREAM_DESTROYED` error. - * This is a destructive and immediate way to destroy a stream. Previous calls to`write()` may not have drained, and may trigger an `ERR_STREAM_DESTROYED` error. - * Use `end()` instead of destroy if data should flush before close, or wait for - * the `'drain'` event before destroying the stream. - * - * Once `destroy()` has been called any further calls will be a no-op and no - * further errors except from `_destroy()` may be emitted as `'error'`. - * - * Implementors should not override this method, - * but instead implement `writable._destroy()`. - * @since v8.0.0 - * @param error Optional, an error to emit with `'error'` event. - */ - destroy(error?: Error): this; - /** - * Event emitter - * The defined events on documents including: - * 1. close - * 2. drain - * 3. error - * 4. finish - * 5. pipe - * 6. unpipe - */ - addListener(event: 'close', listener: () => void): this; - addListener(event: 'drain', listener: () => void): this; - addListener(event: 'error', listener: (err: Error) => void): this; - addListener(event: 'finish', listener: () => void): this; - addListener(event: 'pipe', listener: (src: Readable) => void): this; - addListener(event: 'unpipe', listener: (src: Readable) => void): this; - addListener(event: string | symbol, listener: (...args: any[]) => void): this; - emit(event: 'close'): boolean; - emit(event: 'drain'): boolean; - emit(event: 'error', err: Error): boolean; - emit(event: 'finish'): boolean; - emit(event: 'pipe', src: Readable): boolean; - emit(event: 'unpipe', src: Readable): boolean; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'close', listener: () => void): this; - on(event: 'drain', listener: () => void): this; - on(event: 'error', listener: (err: Error) => void): this; - on(event: 'finish', listener: () => void): this; - on(event: 'pipe', listener: (src: Readable) => void): this; - on(event: 'unpipe', listener: (src: Readable) => void): this; - on(event: string | symbol, listener: (...args: any[]) => void): this; - once(event: 'close', listener: () => void): this; - once(event: 'drain', listener: () => void): this; - once(event: 'error', listener: (err: Error) => void): this; - once(event: 'finish', listener: () => void): this; - once(event: 'pipe', listener: (src: Readable) => void): this; - once(event: 'unpipe', listener: (src: Readable) => void): this; - once(event: string | symbol, listener: (...args: any[]) => void): this; - prependListener(event: 'close', listener: () => void): this; - prependListener(event: 'drain', listener: () => void): this; - prependListener(event: 'error', listener: (err: Error) => void): this; - prependListener(event: 'finish', listener: () => void): this; - prependListener(event: 'pipe', listener: (src: Readable) => void): this; - prependListener(event: 'unpipe', listener: (src: Readable) => void): this; - prependListener(event: string | symbol, listener: (...args: any[]) => void): this; - prependOnceListener(event: 'close', listener: () => void): this; - prependOnceListener(event: 'drain', listener: () => void): this; - prependOnceListener(event: 'error', listener: (err: Error) => void): this; - prependOnceListener(event: 'finish', listener: () => void): this; - prependOnceListener(event: 'pipe', listener: (src: Readable) => void): this; - prependOnceListener(event: 'unpipe', listener: (src: Readable) => void): this; - prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this; - removeListener(event: 'close', listener: () => void): this; - removeListener(event: 'drain', listener: () => void): this; - removeListener(event: 'error', listener: (err: Error) => void): this; - removeListener(event: 'finish', listener: () => void): this; - removeListener(event: 'pipe', listener: (src: Readable) => void): this; - removeListener(event: 'unpipe', listener: (src: Readable) => void): this; - removeListener(event: string | symbol, listener: (...args: any[]) => void): this; - } - interface DuplexOptions extends ReadableOptions, WritableOptions { - allowHalfOpen?: boolean | undefined; - readableObjectMode?: boolean | undefined; - writableObjectMode?: boolean | undefined; - readableHighWaterMark?: number | undefined; - writableHighWaterMark?: number | undefined; - writableCorked?: number | undefined; - construct?(this: Duplex, callback: (error?: Error | null) => void): void; - read?(this: Duplex, size: number): void; - write?(this: Duplex, chunk: any, encoding: BufferEncoding, callback: (error?: Error | null) => void): void; - writev?( - this: Duplex, - chunks: Array<{ - chunk: any; - encoding: BufferEncoding; - }>, - callback: (error?: Error | null) => void - ): void; - final?(this: Duplex, callback: (error?: Error | null) => void): void; - destroy?(this: Duplex, error: Error | null, callback: (error: Error | null) => void): void; - } - /** - * Duplex streams are streams that implement both the `Readable` and `Writable` interfaces. - * - * Examples of `Duplex` streams include: - * - * * `TCP sockets` - * * `zlib streams` - * * `crypto streams` - * @since v0.9.4 - */ - class Duplex extends Readable implements Writable { - readonly writable: boolean; - readonly writableEnded: boolean; - readonly writableFinished: boolean; - readonly writableHighWaterMark: number; - readonly writableLength: number; - readonly writableObjectMode: boolean; - readonly writableCorked: number; - readonly writableNeedDrain: boolean; - readonly closed: boolean; - readonly errored: Error | null; - /** - * If `false` then the stream will automatically end the writable side when the - * readable side ends. Set initially by the `allowHalfOpen` constructor option, - * which defaults to `false`. - * - * This can be changed manually to change the half-open behavior of an existing`Duplex` stream instance, but must be changed before the `'end'` event is - * emitted. - * @since v0.9.4 - */ - allowHalfOpen: boolean; - constructor(opts?: DuplexOptions); - /** - * A utility method for creating duplex streams. - * - * - `Stream` converts writable stream into writable `Duplex` and readable stream - * to `Duplex`. - * - `Blob` converts into readable `Duplex`. - * - `string` converts into readable `Duplex`. - * - `ArrayBuffer` converts into readable `Duplex`. - * - `AsyncIterable` converts into a readable `Duplex`. Cannot yield `null`. - * - `AsyncGeneratorFunction` converts into a readable/writable transform - * `Duplex`. Must take a source `AsyncIterable` as first parameter. Cannot yield - * `null`. - * - `AsyncFunction` converts into a writable `Duplex`. Must return - * either `null` or `undefined` - * - `Object ({ writable, readable })` converts `readable` and - * `writable` into `Stream` and then combines them into `Duplex` where the - * `Duplex` will write to the `writable` and read from the `readable`. - * - `Promise` converts into readable `Duplex`. Value `null` is ignored. - * - * @since v16.8.0 - */ - static from(src: Stream | NodeBlob | ArrayBuffer | string | Iterable<any> | AsyncIterable<any> | AsyncGeneratorFunction | Promise<any> | Object): Duplex; - _write(chunk: any, encoding: BufferEncoding, callback: (error?: Error | null) => void): void; - _writev?( - chunks: Array<{ - chunk: any; - encoding: BufferEncoding; - }>, - callback: (error?: Error | null) => void - ): void; - _destroy(error: Error | null, callback: (error: Error | null) => void): void; - _final(callback: (error?: Error | null) => void): void; - write(chunk: any, encoding?: BufferEncoding, cb?: (error: Error | null | undefined) => void): boolean; - write(chunk: any, cb?: (error: Error | null | undefined) => void): boolean; - setDefaultEncoding(encoding: BufferEncoding): this; - end(cb?: () => void): this; - end(chunk: any, cb?: () => void): this; - end(chunk: any, encoding?: BufferEncoding, cb?: () => void): this; - cork(): void; - uncork(): void; - } - type TransformCallback = (error?: Error | null, data?: any) => void; - interface TransformOptions extends DuplexOptions { - construct?(this: Transform, callback: (error?: Error | null) => void): void; - read?(this: Transform, size: number): void; - write?(this: Transform, chunk: any, encoding: BufferEncoding, callback: (error?: Error | null) => void): void; - writev?( - this: Transform, - chunks: Array<{ - chunk: any; - encoding: BufferEncoding; - }>, - callback: (error?: Error | null) => void - ): void; - final?(this: Transform, callback: (error?: Error | null) => void): void; - destroy?(this: Transform, error: Error | null, callback: (error: Error | null) => void): void; - transform?(this: Transform, chunk: any, encoding: BufferEncoding, callback: TransformCallback): void; - flush?(this: Transform, callback: TransformCallback): void; - } - /** - * Transform streams are `Duplex` streams where the output is in some way - * related to the input. Like all `Duplex` streams, `Transform` streams - * implement both the `Readable` and `Writable` interfaces. - * - * Examples of `Transform` streams include: - * - * * `zlib streams` - * * `crypto streams` - * @since v0.9.4 - */ - class Transform extends Duplex { - constructor(opts?: TransformOptions); - _transform(chunk: any, encoding: BufferEncoding, callback: TransformCallback): void; - _flush(callback: TransformCallback): void; - } - /** - * The `stream.PassThrough` class is a trivial implementation of a `Transform` stream that simply passes the input bytes across to the output. Its purpose is - * primarily for examples and testing, but there are some use cases where`stream.PassThrough` is useful as a building block for novel sorts of streams. - */ - class PassThrough extends Transform {} - /** - * Attaches an AbortSignal to a readable or writeable stream. This lets code - * control stream destruction using an `AbortController`. - * - * Calling `abort` on the `AbortController` corresponding to the passed`AbortSignal` will behave the same way as calling `.destroy(new AbortError())`on the stream. - * - * ```js - * const fs = require('fs'); - * - * const controller = new AbortController(); - * const read = addAbortSignal( - * controller.signal, - * fs.createReadStream(('object.json')) - * ); - * // Later, abort the operation closing the stream - * controller.abort(); - * ``` - * - * Or using an `AbortSignal` with a readable stream as an async iterable: - * - * ```js - * const controller = new AbortController(); - * setTimeout(() => controller.abort(), 10_000); // set a timeout - * const stream = addAbortSignal( - * controller.signal, - * fs.createReadStream(('object.json')) - * ); - * (async () => { - * try { - * for await (const chunk of stream) { - * await process(chunk); - * } - * } catch (e) { - * if (e.name === 'AbortError') { - * // The operation was cancelled - * } else { - * throw e; - * } - * } - * })(); - * ``` - * @since v15.4.0 - * @param signal A signal representing possible cancellation - * @param stream a stream to attach a signal to - */ - function addAbortSignal<T extends Stream>(signal: AbortSignal, stream: T): T; - interface FinishedOptions extends Abortable { - error?: boolean | undefined; - readable?: boolean | undefined; - writable?: boolean | undefined; - } - /** - * A function to get notified when a stream is no longer readable, writable - * or has experienced an error or a premature close event. - * - * ```js - * const { finished } = require('stream'); - * - * const rs = fs.createReadStream('archive.tar'); - * - * finished(rs, (err) => { - * if (err) { - * console.error('Stream failed.', err); - * } else { - * console.log('Stream is done reading.'); - * } - * }); - * - * rs.resume(); // Drain the stream. - * ``` - * - * Especially useful in error handling scenarios where a stream is destroyed - * prematurely (like an aborted HTTP request), and will not emit `'end'`or `'finish'`. - * - * The `finished` API provides promise version: - * - * ```js - * const { finished } = require('stream/promises'); - * - * const rs = fs.createReadStream('archive.tar'); - * - * async function run() { - * await finished(rs); - * console.log('Stream is done reading.'); - * } - * - * run().catch(console.error); - * rs.resume(); // Drain the stream. - * ``` - * - * `stream.finished()` leaves dangling event listeners (in particular`'error'`, `'end'`, `'finish'` and `'close'`) after `callback` has been - * invoked. The reason for this is so that unexpected `'error'` events (due to - * incorrect stream implementations) do not cause unexpected crashes. - * If this is unwanted behavior then the returned cleanup function needs to be - * invoked in the callback: - * - * ```js - * const cleanup = finished(rs, (err) => { - * cleanup(); - * // ... - * }); - * ``` - * @since v10.0.0 - * @param stream A readable and/or writable stream. - * @param callback A callback function that takes an optional error argument. - * @return A cleanup function which removes all registered listeners. - */ - function finished(stream: NodeJS.ReadableStream | NodeJS.WritableStream | NodeJS.ReadWriteStream, options: FinishedOptions, callback: (err?: NodeJS.ErrnoException | null) => void): () => void; - function finished(stream: NodeJS.ReadableStream | NodeJS.WritableStream | NodeJS.ReadWriteStream, callback: (err?: NodeJS.ErrnoException | null) => void): () => void; - namespace finished { - function __promisify__(stream: NodeJS.ReadableStream | NodeJS.WritableStream | NodeJS.ReadWriteStream, options?: FinishedOptions): Promise<void>; - } - type PipelineSourceFunction<T> = () => Iterable<T> | AsyncIterable<T>; - type PipelineSource<T> = Iterable<T> | AsyncIterable<T> | NodeJS.ReadableStream | PipelineSourceFunction<T>; - type PipelineTransform<S extends PipelineTransformSource<any>, U> = - | NodeJS.ReadWriteStream - | ((source: S extends (...args: any[]) => Iterable<infer ST> | AsyncIterable<infer ST> ? AsyncIterable<ST> : S) => AsyncIterable<U>); - type PipelineTransformSource<T> = PipelineSource<T> | PipelineTransform<any, T>; - type PipelineDestinationIterableFunction<T> = (source: AsyncIterable<T>) => AsyncIterable<any>; - type PipelineDestinationPromiseFunction<T, P> = (source: AsyncIterable<T>) => Promise<P>; - type PipelineDestination<S extends PipelineTransformSource<any>, P> = S extends PipelineTransformSource<infer ST> - ? NodeJS.WritableStream | PipelineDestinationIterableFunction<ST> | PipelineDestinationPromiseFunction<ST, P> - : never; - type PipelineCallback<S extends PipelineDestination<any, any>> = S extends PipelineDestinationPromiseFunction<any, infer P> - ? (err: NodeJS.ErrnoException | null, value: P) => void - : (err: NodeJS.ErrnoException | null) => void; - type PipelinePromise<S extends PipelineDestination<any, any>> = S extends PipelineDestinationPromiseFunction<any, infer P> ? Promise<P> : Promise<void>; - interface PipelineOptions { - signal: AbortSignal; - } - /** - * A module method to pipe between streams and generators forwarding errors and - * properly cleaning up and provide a callback when the pipeline is complete. - * - * ```js - * const { pipeline } = require('stream'); - * const fs = require('fs'); - * const zlib = require('zlib'); - * - * // Use the pipeline API to easily pipe a series of streams - * // together and get notified when the pipeline is fully done. - * - * // A pipeline to gzip a potentially huge tar file efficiently: - * - * pipeline( - * fs.createReadStream('archive.tar'), - * zlib.createGzip(), - * fs.createWriteStream('archive.tar.gz'), - * (err) => { - * if (err) { - * console.error('Pipeline failed.', err); - * } else { - * console.log('Pipeline succeeded.'); - * } - * } - * ); - * ``` - * - * The `pipeline` API provides a promise version, which can also - * receive an options argument as the last parameter with a`signal` `AbortSignal` property. When the signal is aborted,`destroy` will be called on the underlying pipeline, with - * an`AbortError`. - * - * ```js - * const { pipeline } = require('stream/promises'); - * - * async function run() { - * await pipeline( - * fs.createReadStream('archive.tar'), - * zlib.createGzip(), - * fs.createWriteStream('archive.tar.gz') - * ); - * console.log('Pipeline succeeded.'); - * } - * - * run().catch(console.error); - * ``` - * - * To use an `AbortSignal`, pass it inside an options object, - * as the last argument: - * - * ```js - * const { pipeline } = require('stream/promises'); - * - * async function run() { - * const ac = new AbortController(); - * const signal = ac.signal; - * - * setTimeout(() => ac.abort(), 1); - * await pipeline( - * fs.createReadStream('archive.tar'), - * zlib.createGzip(), - * fs.createWriteStream('archive.tar.gz'), - * { signal }, - * ); - * } - * - * run().catch(console.error); // AbortError - * ``` - * - * The `pipeline` API also supports async generators: - * - * ```js - * const { pipeline } = require('stream/promises'); - * const fs = require('fs'); - * - * async function run() { - * await pipeline( - * fs.createReadStream('lowercase.txt'), - * async function* (source, { signal }) { - * source.setEncoding('utf8'); // Work with strings rather than `Buffer`s. - * for await (const chunk of source) { - * yield await processChunk(chunk, { signal }); - * } - * }, - * fs.createWriteStream('uppercase.txt') - * ); - * console.log('Pipeline succeeded.'); - * } - * - * run().catch(console.error); - * ``` - * - * Remember to handle the `signal` argument passed into the async generator. - * Especially in the case where the async generator is the source for the - * pipeline (i.e. first argument) or the pipeline will never complete. - * - * ```js - * const { pipeline } = require('stream/promises'); - * const fs = require('fs'); - * - * async function run() { - * await pipeline( - * async function* ({ signal }) { - * await someLongRunningfn({ signal }); - * yield 'asd'; - * }, - * fs.createWriteStream('uppercase.txt') - * ); - * console.log('Pipeline succeeded.'); - * } - * - * run().catch(console.error); - * ``` - * - * `stream.pipeline()` will call `stream.destroy(err)` on all streams except: - * - * * `Readable` streams which have emitted `'end'` or `'close'`. - * * `Writable` streams which have emitted `'finish'` or `'close'`. - * - * `stream.pipeline()` leaves dangling event listeners on the streams - * after the `callback` has been invoked. In the case of reuse of streams after - * failure, this can cause event listener leaks and swallowed errors. If the last - * stream is readable, dangling event listeners will be removed so that the last - * stream can be consumed later. - * - * `stream.pipeline()` closes all the streams when an error is raised. - * The `IncomingRequest` usage with `pipeline` could lead to an unexpected behavior - * once it would destroy the socket without sending the expected response. - * See the example below: - * - * ```js - * const fs = require('fs'); - * const http = require('http'); - * const { pipeline } = require('stream'); - * - * const server = http.createServer((req, res) => { - * const fileStream = fs.createReadStream('./fileNotExist.txt'); - * pipeline(fileStream, res, (err) => { - * if (err) { - * console.log(err); // No such file - * // this message can't be sent once `pipeline` already destroyed the socket - * return res.end('error!!!'); - * } - * }); - * }); - * ``` - * @since v10.0.0 - * @param callback Called when the pipeline is fully done. - */ - function pipeline<A extends PipelineSource<any>, B extends PipelineDestination<A, any>>( - source: A, - destination: B, - callback?: PipelineCallback<B> - ): B extends NodeJS.WritableStream ? B : NodeJS.WritableStream; - function pipeline<A extends PipelineSource<any>, T1 extends PipelineTransform<A, any>, B extends PipelineDestination<T1, any>>( - source: A, - transform1: T1, - destination: B, - callback?: PipelineCallback<B> - ): B extends NodeJS.WritableStream ? B : NodeJS.WritableStream; - function pipeline<A extends PipelineSource<any>, T1 extends PipelineTransform<A, any>, T2 extends PipelineTransform<T1, any>, B extends PipelineDestination<T2, any>>( - source: A, - transform1: T1, - transform2: T2, - destination: B, - callback?: PipelineCallback<B> - ): B extends NodeJS.WritableStream ? B : NodeJS.WritableStream; - function pipeline< - A extends PipelineSource<any>, - T1 extends PipelineTransform<A, any>, - T2 extends PipelineTransform<T1, any>, - T3 extends PipelineTransform<T2, any>, - B extends PipelineDestination<T3, any> - >(source: A, transform1: T1, transform2: T2, transform3: T3, destination: B, callback?: PipelineCallback<B>): B extends NodeJS.WritableStream ? B : NodeJS.WritableStream; - function pipeline< - A extends PipelineSource<any>, - T1 extends PipelineTransform<A, any>, - T2 extends PipelineTransform<T1, any>, - T3 extends PipelineTransform<T2, any>, - T4 extends PipelineTransform<T3, any>, - B extends PipelineDestination<T4, any> - >(source: A, transform1: T1, transform2: T2, transform3: T3, transform4: T4, destination: B, callback?: PipelineCallback<B>): B extends NodeJS.WritableStream ? B : NodeJS.WritableStream; - function pipeline( - streams: ReadonlyArray<NodeJS.ReadableStream | NodeJS.WritableStream | NodeJS.ReadWriteStream>, - callback?: (err: NodeJS.ErrnoException | null) => void - ): NodeJS.WritableStream; - function pipeline( - stream1: NodeJS.ReadableStream, - stream2: NodeJS.ReadWriteStream | NodeJS.WritableStream, - ...streams: Array<NodeJS.ReadWriteStream | NodeJS.WritableStream | ((err: NodeJS.ErrnoException | null) => void)> - ): NodeJS.WritableStream; - namespace pipeline { - function __promisify__<A extends PipelineSource<any>, B extends PipelineDestination<A, any>>(source: A, destination: B, options?: PipelineOptions): PipelinePromise<B>; - function __promisify__<A extends PipelineSource<any>, T1 extends PipelineTransform<A, any>, B extends PipelineDestination<T1, any>>( - source: A, - transform1: T1, - destination: B, - options?: PipelineOptions - ): PipelinePromise<B>; - function __promisify__<A extends PipelineSource<any>, T1 extends PipelineTransform<A, any>, T2 extends PipelineTransform<T1, any>, B extends PipelineDestination<T2, any>>( - source: A, - transform1: T1, - transform2: T2, - destination: B, - options?: PipelineOptions - ): PipelinePromise<B>; - function __promisify__< - A extends PipelineSource<any>, - T1 extends PipelineTransform<A, any>, - T2 extends PipelineTransform<T1, any>, - T3 extends PipelineTransform<T2, any>, - B extends PipelineDestination<T3, any> - >(source: A, transform1: T1, transform2: T2, transform3: T3, destination: B, options?: PipelineOptions): PipelinePromise<B>; - function __promisify__< - A extends PipelineSource<any>, - T1 extends PipelineTransform<A, any>, - T2 extends PipelineTransform<T1, any>, - T3 extends PipelineTransform<T2, any>, - T4 extends PipelineTransform<T3, any>, - B extends PipelineDestination<T4, any> - >(source: A, transform1: T1, transform2: T2, transform3: T3, transform4: T4, destination: B, options?: PipelineOptions): PipelinePromise<B>; - function __promisify__(streams: ReadonlyArray<NodeJS.ReadableStream | NodeJS.WritableStream | NodeJS.ReadWriteStream>, options?: PipelineOptions): Promise<void>; - function __promisify__( - stream1: NodeJS.ReadableStream, - stream2: NodeJS.ReadWriteStream | NodeJS.WritableStream, - ...streams: Array<NodeJS.ReadWriteStream | NodeJS.WritableStream | PipelineOptions> - ): Promise<void>; - } - interface Pipe { - close(): void; - hasRef(): boolean; - ref(): void; - unref(): void; - } - - /** - * Returns whether the stream has encountered an error. - * @since v17.3.0 - */ - function isErrored(stream: Readable | Writable | NodeJS.ReadableStream | NodeJS.WritableStream): boolean; - - /** - * Returns whether the stream is readable. - * @since v17.4.0 - */ - function isReadable(stream: Readable | NodeJS.ReadableStream): boolean; - - const promises: typeof streamPromises; - const consumers: typeof streamConsumers; - } - export = internal; -} -declare module 'node:stream' { - import stream = require('stream'); - export = stream; -} diff --git a/spaces/firsk/ai_otto/text/english_bert_mock.py b/spaces/firsk/ai_otto/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/firsk/ai_otto/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/firsk/ai_otto/text/japanese.py b/spaces/firsk/ai_otto/text/japanese.py deleted file mode 100644 index 53db38b7349af5a117f81314304d69796c0daf81..0000000000000000000000000000000000000000 --- a/spaces/firsk/ai_otto/text/japanese.py +++ /dev/null @@ -1,586 +0,0 @@ -# Convert Japanese text to phonemes which is -# compatible with Julius https://github.com/julius-speech/segmentation-kit -import re -import unicodedata - -from transformers import AutoTokenizer - -from text import punctuation, symbols - -try: - import MeCab -except ImportError as e: - raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e -from num2words import num2words - -_CONVRULES = [ - # Conversion of 2 letters - "アァ/ a a", - "イィ/ i i", - "イェ/ i e", - "イャ/ y a", - "ウゥ/ u:", - "エェ/ e e", - "オォ/ o:", - "カァ/ k a:", - "キィ/ k i:", - "クゥ/ k u:", - "クャ/ ky a", - "クュ/ ky u", - "クョ/ ky o", - "ケェ/ k e:", - "コォ/ k o:", - "ガァ/ g a:", - "ギィ/ g i:", - "グゥ/ g u:", - "グャ/ gy a", - "グュ/ gy u", - "グョ/ gy o", - "ゲェ/ g e:", - "ゴォ/ g o:", - "サァ/ s a:", - "シィ/ sh i:", - "スゥ/ s u:", - "スャ/ sh a", - "スュ/ sh u", - "スョ/ sh o", - "セェ/ s e:", - "ソォ/ s o:", - "ザァ/ z a:", - "ジィ/ j i:", - "ズゥ/ z u:", - "ズャ/ zy a", - "ズュ/ zy u", - "ズョ/ zy o", - "ゼェ/ z e:", - "ゾォ/ z o:", - "タァ/ t a:", - "チィ/ ch i:", - "ツァ/ ts a", - "ツィ/ ts i", - "ツゥ/ ts u:", - "ツャ/ ch a", - "ツュ/ ch u", - "ツョ/ ch o", - "ツェ/ ts e", - "ツォ/ ts o", - "テェ/ t e:", - "トォ/ t o:", - "ダァ/ d a:", - "ヂィ/ j i:", - "ヅゥ/ d u:", - "ヅャ/ zy a", - "ヅュ/ zy u", - "ヅョ/ zy o", - "デェ/ d e:", - "ドォ/ d o:", - "ナァ/ n a:", - "ニィ/ n i:", - "ヌゥ/ n u:", - "ヌャ/ ny a", - "ヌュ/ ny u", - "ヌョ/ ny o", - "ネェ/ n e:", - "ノォ/ n o:", - "ハァ/ h a:", - "ヒィ/ h i:", - "フゥ/ f u:", - "フャ/ hy a", - "フュ/ hy u", - "フョ/ hy o", - "ヘェ/ h e:", - "ホォ/ h o:", - "バァ/ b a:", - "ビィ/ b i:", - "ブゥ/ b u:", - "フャ/ hy a", - "ブュ/ by u", - "フョ/ hy o", - "ベェ/ b e:", - "ボォ/ b o:", - "パァ/ p a:", - "ピィ/ p i:", - "プゥ/ p u:", - "プャ/ py a", - "プュ/ py u", - "プョ/ py o", - "ペェ/ p e:", - "ポォ/ p o:", - "マァ/ m a:", - "ミィ/ m i:", - "ムゥ/ m u:", - "ムャ/ my a", - "ムュ/ my u", - "ムョ/ my o", - "メェ/ m e:", - "モォ/ m o:", - "ヤァ/ y a:", - "ユゥ/ y u:", - "ユャ/ y a:", - "ユュ/ y u:", - "ユョ/ y o:", - "ヨォ/ y o:", - "ラァ/ r a:", - "リィ/ r i:", - "ルゥ/ r u:", - "ルャ/ ry a", - "ルュ/ ry u", - "ルョ/ ry o", - "レェ/ r e:", - "ロォ/ r o:", - "ワァ/ w a:", - "ヲォ/ o:", - "ディ/ d i", - "デェ/ d e:", - "デャ/ dy a", - "デュ/ dy u", - "デョ/ dy o", - "ティ/ t i", - "テェ/ t e:", - "テャ/ ty a", - "テュ/ ty u", - "テョ/ ty o", - "スィ/ s i", - "ズァ/ z u a", - "ズィ/ z i", - "ズゥ/ z u", - "ズャ/ zy a", - "ズュ/ zy u", - "ズョ/ zy o", - "ズェ/ z e", - "ズォ/ z o", - "キャ/ ky a", - "キュ/ ky u", - "キョ/ ky o", - "シャ/ sh a", - "シュ/ sh u", - "シェ/ sh e", - "ショ/ sh o", - "チャ/ ch a", - "チュ/ ch u", - "チェ/ ch e", - "チョ/ ch o", - "トゥ/ t u", - "トャ/ ty a", - "トュ/ ty u", - "トョ/ ty o", - "ドァ/ d o a", - "ドゥ/ d u", - "ドャ/ dy a", - "ドュ/ dy u", - "ドョ/ dy o", - "ドォ/ d o:", - "ニャ/ ny a", - "ニュ/ ny u", - "ニョ/ ny o", - "ヒャ/ hy a", - "ヒュ/ hy u", - "ヒョ/ hy o", - "ミャ/ my a", - "ミュ/ my u", - "ミョ/ my o", - "リャ/ ry a", - "リュ/ ry u", - "リョ/ ry o", - "ギャ/ gy a", - "ギュ/ gy u", - "ギョ/ gy o", - "ヂェ/ j e", - "ヂャ/ j a", - "ヂュ/ j u", - "ヂョ/ j o", - "ジェ/ j e", - "ジャ/ j a", - "ジュ/ j u", - "ジョ/ j o", - "ビャ/ by a", - "ビュ/ by u", - "ビョ/ by o", - "ピャ/ py a", - "ピュ/ py u", - "ピョ/ py o", - "ウァ/ u a", - "ウィ/ w i", - "ウェ/ w e", - "ウォ/ w o", - "ファ/ f a", - "フィ/ f i", - "フゥ/ f u", - "フャ/ hy a", - "フュ/ hy u", - "フョ/ hy o", - "フェ/ f e", - "フォ/ f o", - "ヴァ/ b a", - "ヴィ/ b i", - "ヴェ/ b e", - "ヴォ/ b o", - "ヴュ/ by u", - # Conversion of 1 letter - "ア/ a", - "イ/ i", - "ウ/ u", - "エ/ e", - "オ/ o", - "カ/ k a", - "キ/ k i", - "ク/ k u", - "ケ/ k e", - "コ/ k o", - "サ/ s a", - "シ/ sh i", - "ス/ s u", - "セ/ s e", - "ソ/ s o", - "タ/ t a", - "チ/ ch i", - "ツ/ ts u", - "テ/ t e", - "ト/ t o", - "ナ/ n a", - "ニ/ n i", - "ヌ/ n u", - "ネ/ n e", - "ノ/ n o", - "ハ/ h a", - "ヒ/ h i", - "フ/ f u", - "ヘ/ h e", - "ホ/ h o", - "マ/ m a", - "ミ/ m i", - "ム/ m u", - "メ/ m e", - "モ/ m o", - "ラ/ r a", - "リ/ r i", - "ル/ r u", - "レ/ r e", - "ロ/ r o", - "ガ/ g a", - "ギ/ g i", - "グ/ g u", - "ゲ/ g e", - "ゴ/ g o", - "ザ/ z a", - "ジ/ j i", - "ズ/ z u", - "ゼ/ z e", - "ゾ/ z o", - "ダ/ d a", - "ヂ/ j i", - "ヅ/ z u", - "デ/ d e", - "ド/ d o", - "バ/ b a", - "ビ/ b i", - "ブ/ b u", - "ベ/ b e", - "ボ/ b o", - "パ/ p a", - "ピ/ p i", - "プ/ p u", - "ペ/ p e", - "ポ/ p o", - "ヤ/ y a", - "ユ/ y u", - "ヨ/ y o", - "ワ/ w a", - "ヰ/ i", - "ヱ/ e", - "ヲ/ o", - "ン/ N", - "ッ/ q", - "ヴ/ b u", - "ー/:", - # Try converting broken text - "ァ/ a", - "ィ/ i", - "ゥ/ u", - "ェ/ e", - "ォ/ o", - "ヮ/ w a", - "ォ/ o", - # Symbols - "、/ ,", - "。/ .", - "!/ !", - "?/ ?", - "・/ ,", -] - -_COLON_RX = re.compile(":+") -_REJECT_RX = re.compile("[^ a-zA-Z:,.?]") - - -def _makerulemap(): - l = [tuple(x.split("/")) for x in _CONVRULES] - return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2)) - - -_RULEMAP1, _RULEMAP2 = _makerulemap() - - -def kata2phoneme(text: str) -> str: - """Convert katakana text to phonemes.""" - text = text.strip() - res = [] - while text: - if len(text) >= 2: - x = _RULEMAP2.get(text[:2]) - if x is not None: - text = text[2:] - res += x.split(" ")[1:] - continue - x = _RULEMAP1.get(text[0]) - if x is not None: - text = text[1:] - res += x.split(" ")[1:] - continue - res.append(text[0]) - text = text[1:] - # res = _COLON_RX.sub(":", res) - return res - - -_KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1)) -_HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1)) -_HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA) - - -def hira2kata(text: str) -> str: - text = text.translate(_HIRA2KATATRANS) - return text.replace("う゛", "ヴ") - - -_SYMBOL_TOKENS = set(list("・、。?!")) -_NO_YOMI_TOKENS = set(list("「」『』―()[][]")) -_TAGGER = MeCab.Tagger() - - -def text2kata(text: str) -> str: - parsed = _TAGGER.parse(text) - res = [] - for line in parsed.split("\n"): - if line == "EOS": - break - parts = line.split("\t") - - word, yomi = parts[0], parts[1] - if yomi: - res.append(yomi) - else: - if word in _SYMBOL_TOKENS: - res.append(word) - elif word in ("っ", "ッ"): - res.append("ッ") - elif word in _NO_YOMI_TOKENS: - pass - else: - res.append(word) - return hira2kata("".join(res)) - - -_ALPHASYMBOL_YOMI = { - "#": "シャープ", - "%": "パーセント", - "&": "アンド", - "+": "プラス", - "-": "マイナス", - ":": "コロン", - ";": "セミコロン", - "<": "小なり", - "=": "イコール", - ">": "大なり", - "@": "アット", - "a": "エー", - "b": "ビー", - "c": "シー", - "d": "ディー", - "e": "イー", - "f": "エフ", - "g": "ジー", - "h": "エイチ", - "i": "アイ", - "j": "ジェー", - "k": "ケー", - "l": "エル", - "m": "エム", - "n": "エヌ", - "o": "オー", - "p": "ピー", - "q": "キュー", - "r": "アール", - "s": "エス", - "t": "ティー", - "u": "ユー", - "v": "ブイ", - "w": "ダブリュー", - "x": "エックス", - "y": "ワイ", - "z": "ゼット", - "α": "アルファ", - "β": "ベータ", - "γ": "ガンマ", - "δ": "デルタ", - "ε": "イプシロン", - "ζ": "ゼータ", - "η": "イータ", - "θ": "シータ", - "ι": "イオタ", - "κ": "カッパ", - "λ": "ラムダ", - "μ": "ミュー", - "ν": "ニュー", - "ξ": "クサイ", - "ο": "オミクロン", - "π": "パイ", - "ρ": "ロー", - "σ": "シグマ", - "τ": "タウ", - "υ": "ウプシロン", - "φ": "ファイ", - "χ": "カイ", - "ψ": "プサイ", - "ω": "オメガ", -} - - -_NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+") -_CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"} -_CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])") -_NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?") - - -def japanese_convert_numbers_to_words(text: str) -> str: - res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text) - res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res) - res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res) - return res - - -def japanese_convert_alpha_symbols_to_words(text: str) -> str: - return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()]) - - -def japanese_text_to_phonemes(text: str) -> str: - """Convert Japanese text to phonemes.""" - res = unicodedata.normalize("NFKC", text) - res = japanese_convert_numbers_to_words(res) - # res = japanese_convert_alpha_symbols_to_words(res) - res = text2kata(res) - res = kata2phoneme(res) - return res - - -def is_japanese_character(char): - # 定义日语文字系统的 Unicode 范围 - japanese_ranges = [ - (0x3040, 0x309F), # 平假名 - (0x30A0, 0x30FF), # 片假名 - (0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs) - (0x3400, 0x4DBF), # 汉字扩展 A - (0x20000, 0x2A6DF), # 汉字扩展 B - # 可以根据需要添加其他汉字扩展范围 - ] - - # 将字符的 Unicode 编码转换为整数 - char_code = ord(char) - - # 检查字符是否在任何一个日语范围内 - for start, end in japanese_ranges: - if start <= char_code <= end: - return True - - return False - - -rep_map = { - ":": ",", - ";": ",", - ",": ",", - "。": ".", - "!": "!", - "?": "?", - "\n": ".", - "·": ",", - "、": ",", - "...": "…", -} - - -def replace_punctuation(text): - pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub( - r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF" - + "".join(punctuation) - + r"]+", - "", - replaced_text, - ) - - return replaced_text - - -def text_normalize(text): - res = unicodedata.normalize("NFKC", text) - res = japanese_convert_numbers_to_words(res) - # res = "".join([i for i in res if is_japanese_character(i)]) - res = replace_punctuation(res) - return res - - -def distribute_phone(n_phone, n_word): - phones_per_word = [0] * n_word - for task in range(n_phone): - min_tasks = min(phones_per_word) - min_index = phones_per_word.index(min_tasks) - phones_per_word[min_index] += 1 - return phones_per_word - - -tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3") - - -def g2p(norm_text): - tokenized = tokenizer.tokenize(norm_text) - phs = [] - ph_groups = [] - for t in tokenized: - if not t.startswith("#"): - ph_groups.append([t]) - else: - ph_groups[-1].append(t.replace("#", "")) - word2ph = [] - for group in ph_groups: - phonemes = kata2phoneme(text2kata("".join(group))) - # phonemes = [i for i in phonemes if i in symbols] - for i in phonemes: - assert i in symbols, (group, norm_text, tokenized) - phone_len = len(phonemes) - word_len = len(group) - - aaa = distribute_phone(phone_len, word_len) - word2ph += aaa - - phs += phonemes - phones = ["_"] + phs + ["_"] - tones = [0 for i in phones] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -if __name__ == "__main__": - tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3") - text = "hello,こんにちは、世界!……" - from text.japanese_bert import get_bert_feature - - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) diff --git a/spaces/flax-community/Multilingual-VQA/sections/conclusion_future_work/future_work.md b/spaces/flax-community/Multilingual-VQA/sections/conclusion_future_work/future_work.md deleted file mode 100644 index bd009c394c9ee2151e6e98f332b12bff5a07b84a..0000000000000000000000000000000000000000 --- a/spaces/flax-community/Multilingual-VQA/sections/conclusion_future_work/future_work.md +++ /dev/null @@ -1,9 +0,0 @@ -We hope to improve this project in the future by using: -- Superior translation model: Translation has a very huge impact on how the end model would perform. Better translators (for e.g. Google Translate API) and language specific seq2seq models for translation are able to generate better data, both for high-resource and low-resource languages. -- Checking translation quality: Inspecting quality of translated data is as important as the translation model itself. For this we'll either require native speakers to manually inspect a sample of translated data or devise some unsupervised translation quality metrics for the same. -- More data: Currently we are using only 2.5M images of Conceptual 12M for image captioning. We plan to include other datasets like Conceptual Captions 3M, subset of YFCC100M dataset etc. -- Low resource languages: With better translation tools we also wish to train our model in low resource languages which would further democratize the image captioning solution and help people realise the potential of language systems. -- Better training: We can improve our training by playing with hyperparameters, optimizers, and learnign rate schedulers which we didn't get the time for during the sprint. -- More models: Currently we stick to CLIP-ViT and mBART-50. However, there are many multilingual models which can be used in place of mBART-50. ViT transformer, officially, has many many checkpoints which can be combined. We can use any other auto-regressive model insted of seq2seq trained on multilingual data in order to create a diverse set of models specifically for this task. -- Better deployability: We intend to make several different versions of the model in order to help make it available for mobile-phone deployments. -- More domains: We want to go beyond the domain of natural images, and cover medical, artistic, and satellite images which have several downstream applications and such a model would be very much in demand. \ No newline at end of file diff --git a/spaces/florim/MedGPT/autogpt/memory/redismem.py b/spaces/florim/MedGPT/autogpt/memory/redismem.py deleted file mode 100644 index 082a812c5362cc9f19e35bf1bb10269b558f7724..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/memory/redismem.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Redis memory provider.""" -from __future__ import annotations - -from typing import Any - -import numpy as np -import redis -from colorama import Fore, Style -from redis.commands.search.field import TextField, VectorField -from redis.commands.search.indexDefinition import IndexDefinition, IndexType -from redis.commands.search.query import Query - -from autogpt.llm_utils import create_embedding_with_ada -from autogpt.logs import logger -from autogpt.memory.base import MemoryProviderSingleton - -SCHEMA = [ - TextField("data"), - VectorField( - "embedding", - "HNSW", - {"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"}, - ), -] - - -class RedisMemory(MemoryProviderSingleton): - def __init__(self, cfg): - """ - Initializes the Redis memory provider. - - Args: - cfg: The config object. - - Returns: None - """ - redis_host = cfg.redis_host - redis_port = cfg.redis_port - redis_password = cfg.redis_password - self.dimension = 1536 - self.redis = redis.Redis( - host=redis_host, - port=redis_port, - password=redis_password, - db=0, # Cannot be changed - ) - self.cfg = cfg - - # Check redis connection - try: - self.redis.ping() - except redis.ConnectionError as e: - logger.typewriter_log( - "FAILED TO CONNECT TO REDIS", - Fore.RED, - Style.BRIGHT + str(e) + Style.RESET_ALL, - ) - logger.double_check( - "Please ensure you have setup and configured Redis properly for use. " - + f"You can check out {Fore.CYAN + Style.BRIGHT}" - f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}" - " to ensure you've set up everything correctly." - ) - exit(1) - - if cfg.wipe_redis_on_start: - self.redis.flushall() - try: - self.redis.ft(f"{cfg.memory_index}").create_index( - fields=SCHEMA, - definition=IndexDefinition( - prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH - ), - ) - except Exception as e: - print("Error creating Redis search index: ", e) - existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num") - self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0 - - def add(self, data: str) -> str: - """ - Adds a data point to the memory. - - Args: - data: The data to add. - - Returns: Message indicating that the data has been added. - """ - if "Command Error:" in data: - return "" - vector = create_embedding_with_ada(data) - vector = np.array(vector).astype(np.float32).tobytes() - data_dict = {b"data": data, "embedding": vector} - pipe = self.redis.pipeline() - pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict) - _text = ( - f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}" - ) - self.vec_num += 1 - pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num) - pipe.execute() - return _text - - def get(self, data: str) -> list[Any] | None: - """ - Gets the data from the memory that is most relevant to the given data. - - Args: - data: The data to compare to. - - Returns: The most relevant data. - """ - return self.get_relevant(data, 1) - - def clear(self) -> str: - """ - Clears the redis server. - - Returns: A message indicating that the memory has been cleared. - """ - self.redis.flushall() - return "Obliviated" - - def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None: - """ - Returns all the data in the memory that is relevant to the given data. - Args: - data: The data to compare to. - num_relevant: The number of relevant data to return. - - Returns: A list of the most relevant data. - """ - query_embedding = create_embedding_with_ada(data) - base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]" - query = ( - Query(base_query) - .return_fields("data", "vector_score") - .sort_by("vector_score") - .dialect(2) - ) - query_vector = np.array(query_embedding).astype(np.float32).tobytes() - - try: - results = self.redis.ft(f"{self.cfg.memory_index}").search( - query, query_params={"vector": query_vector} - ) - except Exception as e: - print("Error calling Redis search: ", e) - return None - return [result.data for result in results.docs] - - def get_stats(self): - """ - Returns: The stats of the memory index. - """ - return self.redis.ft(f"{self.cfg.memory_index}").info() diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/curriculums/expertcurriculumsocialaiparamenv.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/curriculums/expertcurriculumsocialaiparamenv.py deleted file mode 100644 index 1196189dc51d2584097f6e596c5fe1d631ad82d9..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/curriculums/expertcurriculumsocialaiparamenv.py +++ /dev/null @@ -1,143 +0,0 @@ -import warnings - -import numpy as np -import random - -class ScaffoldingExpertCurriculum: - - def __init__(self, type, minimum_episodes=1000, average_interval=500, phase_thresholds=(0.75, 0.75)): - self.phase = 1 - self.performance_history = [] - self.phase_two_current_type = None - self.minimum_episodes = minimum_episodes - self.phase_thresholds = phase_thresholds # how many episodes to wait for before starting to compute the estimate - self.average_interval = average_interval # number of episodes to use to estimate current performance (100 ~ 10 updated) - self.mean_perf = 0 - self.max_mean_perf = 0 - self.type = type - - def get_status_dict(self): - return { - "curriculum_phase": self.phase, - "curriculum_performance_history": self.performance_history, - } - - def load_status_dict(self, status): - self.phase = status["curriculum_phase"] - self.performance_history = status["curriculum_performance_history"] - - @staticmethod - def select(children, label): - ch = list(filter(lambda c: c.label == label, children)) - - if len(ch) == 0: - raise ValueError(f"Label {label} not found in children {children}.") - elif len(ch) > 1: - raise ValueError(f"Multiple labels {label} found in children {children}.") - - selected = ch[0] - assert selected is not None - return selected - - def choose(self, node, chosen_parameters): - """ - Choose a child of the parameter node. - All the parameters used here should be updated by set_curriculum_parameters. - """ - assert node.type == 'param' - - # E + scaf - # E + full - # AE + full - - # N cs -> N full -> A/E/N/AE full -> AE full - - # A/E/N/AE scaf/full -> AE full - if len(self.phase_thresholds) < 2: - warnings.WarningMessage(f"Num of thresholds ({len(self.phase_thresholds)}) is less than the num of phases.") - - if node.label == "Scaffolding": - - if self.type == "intro_seq": - return ScaffoldingExpertCurriculum.select(node.children, "N") - - elif self.type == "intro_seq_scaf": - if self.phase in [1]: - return random.choice(node.children) - - elif self.phase in [2]: - return ScaffoldingExpertCurriculum.select(node.children, "N") - - else: - raise ValueError(f"Undefined phase {self.phase}.") - - else: - raise ValueError(f"Curriculum type {self.type} unknown.") - - elif node.label == "Pragmatic_frame_complexity": - - if self.type not in ["intro_seq", "intro_seq_scaf"]: - raise ValueError(f"Undefined type {self.type}.") - - if self.phase in [1]: - # return random.choice(node.children) - return random.choice([ - ScaffoldingExpertCurriculum.select(node.children, "No"), - ScaffoldingExpertCurriculum.select(node.children, "Ask"), - ScaffoldingExpertCurriculum.select(node.children, "Eye_contact"), - ScaffoldingExpertCurriculum.select(node.children, "Ask_Eye_contact"), - ]) - - elif self.phase in [2]: - return ScaffoldingExpertCurriculum.select(node.children, "Ask_Eye_contact") - - else: - raise ValueError(f"Undefined phase {self.phase}") - - else: - return random.choice(node.children) - - def set_parameters(self, params): - """ - Set ALL the parameters used in choose. - This is important for parallel environments. This function is called by broadcast_curriculum_parameters() - """ - self.phase = params["phase"] - self.mean_perf = params["mean_perf"] - self.max_mean_perf = params["max_mean_perf"] - - def get_parameters(self): - """ - Get ALL the parameters used in choose. Used when restoring the curriculum. - """ - return { - "phase": self.phase, - "mean_perf": self.mean_perf, - "max_mean_perf": self.max_mean_perf, - } - - def update_parameters(self, data): - """ - Updates the parameters of the ACL used in choose(). - If using parallel processes these parameters should be broadcasted with broadcast_curriculum_parameters() - """ - for obs, reward, done, info in zip(data["obs"], data["reward"], data["done"], data["info"]): - if not done: - continue - - self.performance_history.append(info["success"]) - self.mean_perf = np.mean(self.performance_history[-self.average_interval:]) - self.max_mean_perf = max(self.mean_perf, self.max_mean_perf) - - if self.phase in [1]: - if len(self.performance_history) > self.minimum_episodes and self.mean_perf >= self.phase_thresholds[self.phase-1]: - # next phase - self.phase = self.phase + 1 - self.performance_history = [] - self.max_mean_perf = 0 - - return self.get_parameters() - - def get_info(self): - return {"param": self.phase, "mean_perf": self.mean_perf, "max_mean_perf": self.max_mean_perf} - diff --git a/spaces/foghuang/ChatGLM2-6B/README.md b/spaces/foghuang/ChatGLM2-6B/README.md deleted file mode 100644 index dc238d8886efe0dacbc306ae4db9427143e764ce..0000000000000000000000000000000000000000 --- a/spaces/foghuang/ChatGLM2-6B/README.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -title: ChatGLM2-6B -app_file: web_demo.py -sdk: gradio -sdk_version: 3.35.2 ---- -# ChatGLM2-6B - -<p align="center"> -🤗 <a href="https://huggingface.co/THUDM/chatglm2-6b" target="_blank">HF Repo</a> • 🐦 <a href="https://twitter.com/thukeg" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/abs/2103.10360" target="_blank">[GLM@ACL 22]</a> <a href="https://github.com/THUDM/GLM" target="_blank">[GitHub]</a> • 📃 <a href="https://arxiv.org/abs/2210.02414" target="_blank">[GLM-130B@ICLR 23]</a> <a href="https://github.com/THUDM/GLM-130B" target="_blank">[GitHub]</a> <br> -</p> -<p align="center"> - 👋 加入我们的 <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1udqapmrr-ocT1DS_mxWe6dDY8ahRWzg" target="_blank">Slack</a> 和 <a href="resources/WECHAT.md" target="_blank">WeChat</a> -</p> - -*Read this in [English](README_EN.md)* - -## 介绍 - -ChatGLM**2**-6B 是开源中英双语对话模型 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM**2**-6B 引入了如下新特性: - -1. **更强大的性能**:基于 ChatGLM 初代模型的开发经验,我们全面升级了 ChatGLM2-6B 的基座模型。ChatGLM2-6B 使用了 [GLM](https://github.com/THUDM/GLM) 的混合目标函数,经过了 1.4T 中英标识符的预训练与人类偏好对齐训练,[评测结果](#评测结果)显示,相比于初代模型,ChatGLM2-6B 在 MMLU(+23%)、CEval(+33%)、GSM8K(+571%) 、BBH(+60%)等数据集上的性能取得了大幅度的提升,在同尺寸开源模型中具有较强的竞争力。 -2. **更长的上下文**:基于 [FlashAttention](https://github.com/HazyResearch/flash-attention) 技术,我们将基座模型的上下文长度(Context Length)由 ChatGLM-6B 的 2K 扩展到了 32K,并在对话阶段使用 8K 的上下文长度训练,允许更多轮次的对话。但当前版本的 ChatGLM2-6B 对单轮超长文档的理解能力有限,我们会在后续迭代升级中着重进行优化。 -3. **更高效的推理**:基于 [Multi-Query Attention](http://arxiv.org/abs/1911.02150) 技术,ChatGLM2-6B 有更高效的推理速度和更低的显存占用:在官方的模型实现下,推理速度相比初代提升了 42%,INT4 量化下,6G 显存支持的对话长度由 1K 提升到了 8K。 -4. **更开放的协议**:ChatGLM2-6B 权重对学术研究**完全开放**,在获得官方的书面许可后,亦**允许商业使用**。如果您发现我们的开源模型对您的业务有用,我们欢迎您对下一代模型 ChatGLM3 研发的捐赠。 - ------ - -ChatGLM2-6B 开源模型旨在与开源社区一起推动大模型技术发展,恳请开发者和大家遵守[开源协议](MODEL_LICENSE),勿将开源模型和代码及基于开源项目产生的衍生物用于任何可能给国家和社会带来危害的用途以及用于任何未经过安全评估和备案的服务。**目前,本项目团队未基于 ChatGLM2-6B 开发任何应用,包括网页端、安卓、苹果 iOS 及 Windows App 等应用。** - -尽管模型在训练的各个阶段都尽力确保数据的合规性和准确性,但由于 ChatGLM2-6B 模型规模较小,且模型受概率随机性因素影响,无法保证输出内容的准确性,且模型易被误导。**本项目不承担开源模型和代码导致的数据安全、舆情风险或发生任何模型被误导、滥用、传播、不当利用而产生的风险和责任。** - -## 评测结果 -我们选取了部分中英文典型数据集进行了评测,以下为 ChatGLM2-6B 模型在 [MMLU](https://github.com/hendrycks/test) (英文)、[C-Eval](https://cevalbenchmark.com/static/leaderboard.html)(中文)、[GSM8K](https://github.com/openai/grade-school-math)(数学)、[BBH](https://github.com/suzgunmirac/BIG-Bench-Hard)(英文) 上的测评结果。在 [evaluation](./evaluation/README.md) 中提供了在 C-Eval 上进行测评的脚本。 - -### MMLU - -| Model | Average | STEM | Social Sciences | Humanities | Others | -| ----- | ----- | ---- | ----- | ----- | ----- | -| ChatGLM-6B | 40.63 | 33.89 | 44.84 | 39.02 | 45.71 | -| ChatGLM2-6B (base) | 47.86 | 41.20 | 54.44 | 43.66 | 54.46 | -| ChatGLM2-6B | 45.46 | 40.06 | 51.61 | 41.23 | 51.24 | - -> Chat 模型使用 zero-shot CoT (Chain-of-Thought) 的方法测试,Base 模型使用 few-shot answer-only 的方法测试 - -### C-Eval - -| Model | Average | STEM | Social Sciences | Humanities | Others | -| ----- | ---- | ---- | ----- | ----- | ----- | -| ChatGLM-6B | 38.9 | 33.3 | 48.3 | 41.3 | 38.0 | -| ChatGLM2-6B (base) | 51.7 | 48.6 | 60.5 | 51.3 | 49.8 | -| ChatGLM2-6B | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 | - -> Chat 模型使用 zero-shot CoT 的方法测试,Base 模型使用 few-shot answer only 的方法测试 - -### GSM8K - -| Model | Accuracy | Accuracy (Chinese)* | -| ----- | ----- | ----- | -| ChatGLM-6B | 4.82 | 5.85 | -| ChatGLM2-6B (base) | 32.37 | 28.95 | -| ChatGLM2-6B | 28.05 | 20.45 | - -> 所有模型均使用 few-shot CoT 的方法测试,CoT prompt 来自 http://arxiv.org/abs/2201.11903 -> -> \* 我们使用翻译 API 翻译了 GSM8K 中的 500 道题目和 CoT prompt 并进行了人工校对 - - -### BBH - -| Model | Accuracy | -| ----- | ----- | -| ChatGLM-6B | 18.73 | -| ChatGLM2-6B (base) | 33.68 | -| ChatGLM2-6B | 30.00 | - -> 所有模型均使用 few-shot CoT 的方法测试,CoT prompt 来自 https://github.com/suzgunmirac/BIG-Bench-Hard/tree/main/cot-prompts - -## 推理性能 -ChatGLM2-6B 使用了 [Multi-Query Attention](http://arxiv.org/abs/1911.02150),提高了生成速度。生成 2000 个字符的平均速度对比如下 - -| Model | 推理速度 (字符/秒) | -| ---- | ----- | -| ChatGLM-6B | 31.49 | -| ChatGLM2-6B | 44.62 | - -> 使用官方实现,batch size = 1,max length = 2048,bf16 精度,测试硬件为 A100-SXM4-80G,软件环境为 PyTorch 2.0.1 - -Multi-Query Attention 同时也降低了生成过程中 KV Cache 的显存占用,此外,ChatGLM2-6B 采用 Causal Mask 进行对话训练,连续对话时可复用前面轮次的 KV Cache,进一步优化了显存占用。因此,使用 6GB 显存的显卡进行 INT4 量化的推理时,初代的 ChatGLM-6B 模型最多能够生成 1119 个字符就会提示显存耗尽,而 ChatGLM2-6B 能够生成至少 8192 个字符。 - -| **量化等级** | **编码 2048 长度的最小显存** | **生成 8192 长度的最小显存** | -| -------------- |---------------------|---------------------| -| FP16 / BF16 | 13.1 GB | 12.8 GB | -| INT8 | 8.2 GB | 8.1 GB | -| INT4 | 5.5 GB | 5.1 GB | - -> ChatGLM2-6B 利用了 PyTorch 2.0 引入的 `torch.nn.functional.scaled_dot_product_attention` 实现高效的 Attention 计算,如果 PyTorch 版本较低则会 fallback 到朴素的 Attention 实现,出现显存占用高于上表的情况。 - -我们也测试了量化对模型性能的影响。结果表明,量化对模型性能的影响在可接受范围内。 - -| 量化等级 | Accuracy (MMLU) | Accuracy (C-Eval dev) | -| ----- | ----- |-----------------------| -| BF16 | 45.47 | 53.57 | -| INT4 | 43.13 | 50.30 | - - - -## ChatGLM2-6B 示例 - -相比于初代模型,ChatGLM2-6B 多个维度的能力都取得了提升,以下是一些对比示例。更多 ChatGLM2-6B 的可能,等待你来探索发现! - -<details><summary><b>数理逻辑</b></summary> - -![](resources/math.png) - -</details> - -<details><summary><b>知识推理</b></summary> - -![](resources/knowledge.png) - -</details> - -<details><summary><b>长文档理解</b></summary> - -![](resources/long-context.png) - -</details> - -## 使用方式 -### 环境安装 -首先需要下载本仓库: -```shell -git clone https://github.com/THUDM/ChatGLM2-6B -cd ChatGLM2-6B -``` - -然后使用 pip 安装依赖:`pip install -r requirements.txt`,其中 `transformers` 库版本推荐为 `4.30.2`,`torch` 推荐使用 2.0 以上的版本,以获得最佳的推理性能。 - -### 代码调用 - -可以通过如下代码调用 ChatGLM2-6B 模型来生成对话: - -```python ->>> from transformers import AutoTokenizer, AutoModel ->>> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) ->>> model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cuda') ->>> model = model.eval() ->>> response, history = model.chat(tokenizer, "你好", history=[]) ->>> print(response) -你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。 ->>> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) ->>> print(response) -晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法: - -1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。 -2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。 -3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。 -4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。 -5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。 -6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。 - -如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。 -``` - -#### 从本地加载模型 -以上代码会由 `transformers` 自动下载模型实现和参数。完整的模型实现在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm2-6b)。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。 - -从 Hugging Face Hub 下载模型需要先[安装Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage),然后运行 -```Shell -git clone https://huggingface.co/THUDM/chatglm2-6b -``` - -如果你从 Hugging Face Hub 上下载 checkpoint 的速度较慢,可以只下载模型实现 -```Shell -GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/THUDM/chatglm2-6b -``` -然后从[这里](https://cloud.tsinghua.edu.cn/d/674208019e314311ab5c/)手动下载模型参数文件,并将下载的文件替换到本地的 `chatglm2-6b` 目录下。 - - -将模型下载到本地之后,将以上代码中的 `THUDM/chatglm2-6b` 替换为你本地的 `chatglm2-6b` 文件夹的路径,即可从本地加载模型。 - -模型的实现仍然处在变动中。如果希望固定使用的模型实现以保证兼容性,可以在 `from_pretrained` 的调用中增加 `revision="v1.0"` 参数。`v1.0` 是当前最新的版本号,完整的版本列表参见 [Change Log](https://huggingface.co/THUDM/chatglm2-6b#change-log)。 - -### 网页版 Demo - -![web-demo](resources/web-demo.gif) - -首先安装 Gradio:`pip install gradio`,然后运行仓库中的 [web_demo.py](web_demo.py): - -```shell -python web_demo.py -``` - -程序会运行一个 Web Server,并输出地址。在浏览器中打开输出的地址即可使用。 -> 默认使用了 `share=False` 启动,不会生成公网链接。如有需要公网访问的需求,可以修改为 `share=True` 启动。 -> - -感谢 [@AdamBear](https://github.com/AdamBear) 实现了基于 Streamlit 的网页版 Demo `web_demo2.py`。使用时首先需要额外安装以下依赖: -```shell -pip install streamlit streamlit-chat -``` -然后通过以下命令运行: -```shell -streamlit run web_demo2.py -``` -经测试,如果输入的 prompt 较长的话,使用基于 Streamlit 的网页版 Demo 会更流畅。 - -### 命令行 Demo - -![cli-demo](resources/cli-demo.png) - -运行仓库中 [cli_demo.py](cli_demo.py): - -```shell -python cli_demo.py -``` - -程序会在命令行中进行交互式的对话,在命令行中输入指示并回车即可生成回复,输入 `clear` 可以清空对话历史,输入 `stop` 终止程序。 - -### API 部署 -首先需要安装额外的依赖 `pip install fastapi uvicorn`,然后运行仓库中的 [api.py](api.py): -```shell -python api.py -``` -默认部署在本地的 8000 端口,通过 POST 方法进行调用 -```shell -curl -X POST "http://127.0.0.1:8000" \ - -H 'Content-Type: application/json' \ - -d '{"prompt": "你好", "history": []}' -``` -得到的返回值为 -```shell -{ - "response":"你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。", - "history":[["你好","你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。"]], - "status":200, - "time":"2023-03-23 21:38:40" -} -``` -感谢 [@hiyouga]() 实现了 OpenAI 格式的流式 API 部署,可以作为任意基于 ChatGPT 的应用的后端,比如 [ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web)。可以通过运行仓库中的[openai_api.py](openai_api.py) 进行部署: -```shell -python openai_api.py -``` -进行 API 调用的示例代码为 -```python -import openai -if __name__ == "__main__": - openai.api_base = "http://localhost:8000/v1" - openai.api_key = "none" - for chunk in openai.ChatCompletion.create( - model="chatglm2-6b", - messages=[ - {"role": "user", "content": "你好"} - ], - stream=True - ): - if hasattr(chunk.choices[0].delta, "content"): - print(chunk.choices[0].delta.content, end="", flush=True) -``` - - -## 低成本部署 - -### 模型量化 - -默认情况下,模型以 FP16 精度加载,运行上述代码需要大概 13GB 显存。如果你的 GPU 显存有限,可以尝试以量化方式加载模型,使用方法如下: - -```python -# 按需修改,目前只支持 4/8 bit 量化 -model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).quantize(8).cuda() -``` - -模型量化会带来一定的性能损失,经过测试,ChatGLM2-6B 在 4-bit 量化下仍然能够进行自然流畅的生成。 - -如果你的内存不足,可以直接加载量化后的模型: -```python -model = AutoModel.from_pretrained("THUDM/chatglm2-6b-int4",trust_remote_code=True).cuda() -``` - -<!-- 量化模型的参数文件也可以从[这里](https://cloud.tsinghua.edu.cn/d/674208019e314311ab5c/)手动下载。 --> - -### CPU 部署 - -如果你没有 GPU 硬件的话,也可以在 CPU 上进行推理,但是推理速度会更慢。使用方法如下(需要大概 32GB 内存) -```python -model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float() -``` -如果你的内存不足的话,也可以使用量化后的模型 -```python -model = AutoModel.from_pretrained("THUDM/chatglm2-6b-int4",trust_remote_code=True).float() -``` -在 cpu 上运行量化后的模型需要安装 `gcc` 与 `openmp`。多数 Linux 发行版默认已安装。对于 Windows ,可在安装 [TDM-GCC](https://jmeubank.github.io/tdm-gcc/) 时勾选 `openmp`。 Windows 测试环境 `gcc` 版本为 `TDM-GCC 10.3.0`, Linux 为 `gcc 11.3.0`。在 MacOS 上请参考 [Q1](FAQ.md#q1)。 - -### Mac 部署 - -对于搭载了 Apple Silicon 或者 AMD GPU 的 Mac,可以使用 MPS 后端来在 GPU 上运行 ChatGLM2-6B。需要参考 Apple 的 [官方说明](https://developer.apple.com/metal/pytorch) 安装 PyTorch-Nightly(正确的版本号应该是2.x.x.dev2023xxxx,而不是 2.x.x)。 - -目前在 MacOS 上只支持[从本地加载模型](README.md#从本地加载模型)。将代码中的模型加载改为从本地加载,并使用 mps 后端: -```python -model = AutoModel.from_pretrained("your local path", trust_remote_code=True).to('mps') -``` - -加载半精度的 ChatGLM2-6B 模型需要大概 13GB 内存。内存较小的机器(比如 16GB 内存的 MacBook Pro),在空余内存不足的情况下会使用硬盘上的虚拟内存,导致推理速度严重变慢。 -此时可以使用量化后的模型 chatglm2-6b-int4。因为 GPU 上量化的 kernel 是使用 CUDA 编写的,因此无法在 MacOS 上使用,只能使用 CPU 进行推理。 -为了充分使用 CPU 并行,还需要[单独安装 OpenMP](FAQ.md#q1)。 - -### 多卡部署 -如果你有多张 GPU,但是每张 GPU 的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张GPU上。首先安装 accelerate: `pip install accelerate`,然后通过如下方法加载模型: -```python -from utils import load_model_on_gpus -model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2) -``` -即可将模型部署到两张 GPU 上进行推理。你可以将 `num_gpus` 改为你希望使用的 GPU 数。默认是均匀切分的,你也可以传入 `device_map` 参数来自己指定。 - -## 协议 - -本仓库的代码依照 [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) 协议开源,ChatGLM2-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。ChatGLM2-6B 权重对学术研究**完全开放**,在获得官方的书面许可后,亦**允许商业使用**。如果您发现我们的开源模型对您的业务有用,我们欢迎您对下一代模型 ChatGLM3 研发的捐赠。申请商用许可与捐赠请联系 [yiwen.xu@zhipuai.cn](mailto:yiwen.xu@zhipuai.cn)。 - - -## 引用 - -如果你觉得我们的工作有帮助的话,请考虑引用下列论文,ChatGLM2-6B 的论文会在近期公布,敬请期待~ - -``` -@article{zeng2022glm, - title={Glm-130b: An open bilingual pre-trained model}, - author={Zeng, Aohan and Liu, Xiao and Du, Zhengxiao and Wang, Zihan and Lai, Hanyu and Ding, Ming and Yang, Zhuoyi and Xu, Yifan and Zheng, Wendi and Xia, Xiao and others}, - journal={arXiv preprint arXiv:2210.02414}, - year={2022} -} -``` -``` -@inproceedings{du2022glm, - title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling}, - author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie}, - booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, - pages={320--335}, - year={2022} -} -``` diff --git a/spaces/fornaxai/RNet/index.html b/spaces/fornaxai/RNet/index.html deleted file mode 100644 index 12cd456576890b2e9f01b6a6fc22411dea9092c7..0000000000000000000000000000000000000000 --- a/spaces/fornaxai/RNet/index.html +++ /dev/null @@ -1,17 +0,0 @@ -<!DOCTYPE html> -<html> - <head> - <meta charset="utf-8" /> - <meta name="viewport" content="width=device-width" /> - <title>My static Space - - - -

    Loading may take some time. Please wait.

    -
    -
    -
    - -
    - - diff --git a/spaces/fuckyoudeki/AutoGPT/tests/integration/memory_tests.py b/spaces/fuckyoudeki/AutoGPT/tests/integration/memory_tests.py deleted file mode 100644 index eead2da1cfa9b8a99592939623955808fc430068..0000000000000000000000000000000000000000 --- a/spaces/fuckyoudeki/AutoGPT/tests/integration/memory_tests.py +++ /dev/null @@ -1,49 +0,0 @@ -import random -import string -import sys -import unittest -from pathlib import Path - -from autogpt.config import Config -from autogpt.memory.local import LocalCache - - -class TestLocalCache(unittest.TestCase): - def random_string(self, length): - return "".join(random.choice(string.ascii_letters) for _ in range(length)) - - def setUp(self): - cfg = cfg = Config() - self.cache = LocalCache(cfg) - self.cache.clear() - - # Add example texts to the cache - self.example_texts = [ - "The quick brown fox jumps over the lazy dog", - "I love machine learning and natural language processing", - "The cake is a lie, but the pie is always true", - "ChatGPT is an advanced AI model for conversation", - ] - - for text in self.example_texts: - self.cache.add(text) - - # Add some random strings to test noise - for _ in range(5): - self.cache.add(self.random_string(10)) - - def test_get_relevant(self): - query = "I'm interested in artificial intelligence and NLP" - k = 3 - relevant_texts = self.cache.get_relevant(query, k) - - print(f"Top {k} relevant texts for the query '{query}':") - for i, text in enumerate(relevant_texts, start=1): - print(f"{i}. {text}") - - self.assertEqual(len(relevant_texts), k) - self.assertIn(self.example_texts[1], relevant_texts) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_coco_instance.py b/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_coco_instance.py deleted file mode 100644 index d8c1c7ac9314a7f8ebf1b006a0384e8a79d2e9d8..0000000000000000000000000000000000000000 --- a/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_coco_instance.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import json -import logging -import numpy as np -import os -from PIL import Image - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets.coco import load_coco_json, register_coco_instances -from detectron2.utils.file_io import PathManager - -from . import openseg_classes -import copy -COCO_CATEGORIES = openseg_classes.get_coco_categories_with_prompt_eng() -COCO_CATEGORIES = [x for x in COCO_CATEGORIES if x["isthing"] == 1] - -_PREDEFINED_SPLITS = { - # point annotations without masks - "openvocab_coco_2017_train": ( - "coco/train2017", - "coco/annotations/instances_train2017.json", - ), - "openvocab_coco_2017_val": ( - "coco/val2017", - "coco/annotations/instances_val2017.json", - ), -} - - -def _get_coco_instances_meta(): - thing_ids = [k["id"] for k in COCO_CATEGORIES] - assert len(thing_ids) == 80, len(thing_ids) - # Mapping from the incontiguous ADE category id to an id in [0, 99] - thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} - thing_classes = [k["name"] for k in COCO_CATEGORIES] - ret = { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes, - } - return ret - - -def register_all_coco_instance(root): - for key, (image_root, json_file) in _PREDEFINED_SPLITS.items(): - # Assume pre-defined datasets live in `./datasets`. - register_coco_instances( - key, - _get_coco_instances_meta(), - os.path.join(root, json_file) if "://" not in json_file else json_file, - os.path.join(root, image_root), - ) - - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_all_coco_instance(_root) diff --git a/spaces/fuxin123zz/ChuanhuChatGPT/assets/custom.js b/spaces/fuxin123zz/ChuanhuChatGPT/assets/custom.js deleted file mode 100644 index 7b1761043149ff97ca498501c87a0d15db5258ee..0000000000000000000000000000000000000000 --- a/spaces/fuxin123zz/ChuanhuChatGPT/assets/custom.js +++ /dev/null @@ -1 +0,0 @@ -// custom javascript here \ No newline at end of file diff --git a/spaces/g4f/freegpt-webui/client/js/highlight.min.js b/spaces/g4f/freegpt-webui/client/js/highlight.min.js deleted file mode 100644 index d410b45b38119606525a0a7c0c60c428c5ee6eb7..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/client/js/highlight.min.js +++ /dev/null @@ -1 +0,0 @@ -var hljs=function(){"use strict";var e={exports:{}};function n(e){return e instanceof Map?e.clear=e.delete=e.set=()=>{throw Error("map is read-only")}:e instanceof Set&&(e.add=e.clear=e.delete=()=>{throw Error("set is read-only")}),Object.freeze(e),Object.getOwnPropertyNames(e).forEach(t=>{var a=e[t];"object"!=typeof a||Object.isFrozen(a)||n(a)}),e}e.exports=n,e.exports.default=n;class t{constructor(e){void 0===e.data&&(e.data={}),this.data=e.data,this.isMatchIgnored=!1}ignoreMatch(){this.isMatchIgnored=!0}}function a(e){return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'")}function i(e,...n){let t=Object.create(null);for(let a in e)t[a]=e[a];return n.forEach(e=>{for(let n in e)t[n]=e[n]}),t}let r=e=>!!e.scope||e.sublanguage&&e.language;class s{constructor(e,n){this.buffer="",this.classPrefix=n.classPrefix,e.walk(this)}addText(e){this.buffer+=a(e)}openNode(e){if(!r(e))return;let n="";n=e.sublanguage?"language-"+e.language:((e,{prefix:n})=>{if(e.includes(".")){let t=e.split(".");return[`${n}${t.shift()}`,...t.map((e,n)=>`${e}${"_".repeat(n+1)}`),].join(" ")}return`${n}${e}`})(e.scope,{prefix:this.classPrefix}),this.span(n)}closeNode(e){r(e)&&(this.buffer+="")}value(){return this.buffer}span(e){this.buffer+=``}}let l=(e={})=>{let n={children:[]};return Object.assign(n,e),n};class o{constructor(){this.rootNode=l(),this.stack=[this.rootNode]}get top(){return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){this.top.children.push(e)}openNode(e){let n=l({scope:e});this.add(n),this.stack.push(n)}closeNode(){if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)}walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,n){return"string"==typeof n?e.addText(n):n.children&&(e.openNode(n),n.children.forEach(n=>this._walk(e,n)),e.closeNode(n)),e}static _collapse(e){"string"!=typeof e&&e.children&&(e.children.every(e=>"string"==typeof e)?e.children=[e.children.join("")]:e.children.forEach(e=>{o._collapse(e)}))}}class c extends o{constructor(e){super(),this.options=e}addKeyword(e,n){""!==e&&(this.openNode(n),this.addText(e),this.closeNode())}addText(e){""!==e&&this.add(e)}addSublanguage(e,n){let t=e.root;t.sublanguage=!0,t.language=n,this.add(t)}toHTML(){return new s(this,this.options).value()}finalize(){return!0}}function d(e){return e?"string"==typeof e?e:e.source:null}function g(e){return m("(?=",e,")")}function u(e){return m("(?:",e,")*")}function b(e){return m("(?:",e,")?")}function m(...e){return e.map(e=>d(e)).join("")}function p(...e){let n=(e=>{let n=e[e.length-1];return"object"==typeof n&&n.constructor===Object?(e.splice(e.length-1,1),n):{}})(e);return"("+(n.capture?"":"?:")+e.map(e=>d(e)).join("|")+")"}function h(e){return RegExp(e.toString()+"|").exec("").length-1}let f=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./;function E(e,{joinWith:n}){let t=0;return e.map(e=>{t+=1;let n=t,a=d(e),i="";for(;a.length>0;){let r=f.exec(a);if(!r){i+=a;break}i+=a.substring(0,r.index),a=a.substring(r.index+r[0].length),"\\"===r[0][0]&&r[1]?i+="\\"+(Number(r[1])+n):(i+=r[0],"("===r[0]&&t++)}return i}).map(e=>`(${e})`).join(n)}let $="[a-zA-Z]\\w*",y="[a-zA-Z_]\\w*",N="\\b\\d+(\\.\\d+)?",w="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",v="\\b(0b[01]+)",x={begin:"\\\\[\\s\\S]",relevance:0},k=(e,n,t={})=>{let a=i({scope:"comment",begin:e,end:n,contains:[]},t);a.contains.push({scope:"doctag",begin:"[ ]*(?=(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):)",end:/(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):/,excludeBegin:!0,relevance:0});let r=p("I","a","is","so","us","to","at","if","in","it","on",/[A-Za-z]+['](d|ve|re|ll|t|s|n)/,/[A-Za-z]+[-][a-z]+/,/[A-Za-z][a-z]{2,}/);return a.contains.push({begin:m(/[ ]+/,"(",r,/[.]?[:]?([.][ ]|[ ])/,"){3}")}),a},M=k("//","$"),O=k("/\\*","\\*/"),S=k("#","$");var A=Object.freeze({__proto__:null,MATCH_NOTHING_RE:/\b\B/,IDENT_RE:$,UNDERSCORE_IDENT_RE:y,NUMBER_RE:N,C_NUMBER_RE:w,BINARY_NUMBER_RE:v,RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",SHEBANG(e={}){let n=/^#![ ]*\//;return e.binary&&(e.begin=m(n,/.*\b/,e.binary,/\b.*/)),i({scope:"meta",begin:n,end:/$/,relevance:0,"on:begin"(e,n){0!==e.index&&n.ignoreMatch()}},e)},BACKSLASH_ESCAPE:x,APOS_STRING_MODE:{scope:"string",begin:"'",end:"'",illegal:"\\n",contains:[x]},QUOTE_STRING_MODE:{scope:"string",begin:'"',end:'"',illegal:"\\n",contains:[x]},PHRASAL_WORDS_MODE:{begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},COMMENT:k,C_LINE_COMMENT_MODE:M,C_BLOCK_COMMENT_MODE:O,HASH_COMMENT_MODE:S,NUMBER_MODE:{scope:"number",begin:N,relevance:0},C_NUMBER_MODE:{scope:"number",begin:w,relevance:0},BINARY_NUMBER_MODE:{scope:"number",begin:v,relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{scope:"regexp",begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[x,{begin:/\[/,end:/\]/,relevance:0,contains:[x]},]},]},TITLE_MODE:{scope:"title",begin:$,relevance:0},UNDERSCORE_TITLE_MODE:{scope:"title",begin:y,relevance:0},METHOD_GUARD:{begin:"\\.\\s*[a-zA-Z_]\\w*",relevance:0},END_SAME_AS_BEGIN:e=>Object.assign(e,{"on:begin"(e,n){n.data._beginMatch=e[1]},"on:end"(e,n){n.data._beginMatch!==e[1]&&n.ignoreMatch()}})});function C(e,n){"."===e.input[e.index-1]&&n.ignoreMatch()}function T(e,n){void 0!==e.className&&(e.scope=e.className,delete e.className)}function R(e,n){n&&e.beginKeywords&&(e.begin="\\b("+e.beginKeywords.split(" ").join("|")+")(?!\\.)(?=\\b|\\s)",e.__beforeBegin=C,e.keywords=e.keywords||e.beginKeywords,delete e.beginKeywords,void 0===e.relevance&&(e.relevance=0))}function D(e,n){Array.isArray(e.illegal)&&(e.illegal=p(...e.illegal))}function I(e,n){if(e.match){if(e.begin||e.end)throw Error("begin & end are not supported with match");e.begin=e.match,delete e.match}}function L(e,n){void 0===e.relevance&&(e.relevance=1)}let B=(e,n)=>{if(!e.beforeMatch)return;if(e.starts)throw Error("beforeMatch cannot be used with starts");let t=Object.assign({},e);Object.keys(e).forEach(n=>{delete e[n]}),e.keywords=t.keywords,e.begin=m(t.beforeMatch,g(t.begin)),e.starts={relevance:0,contains:[Object.assign(t,{endsParent:!0})]},e.relevance=0,delete t.beforeMatch},_=["of","and","for","in","not","or","if","then","parent","list","value",],z={},F=e=>{console.error(e)},U=(e,...n)=>{},P=(e,n)=>{z[`${e}/${n}`]||(console.log(`Deprecated as of ${e}. ${n}`),z[`${e}/${n}`]=!0)},j=Error();function K(e,n,{key:t}){let a=0,i=e[t],r={},s={};for(let l=1;l<=n.length;l++)s[l+a]=i[l],r[l+a]=!0,a+=h(n[l-1]);e[t]=s,e[t]._emit=r,e[t]._multi=!0}function q(e){var n;(n=e).scope&&"object"==typeof n.scope&&null!==n.scope&&(n.beginScope=n.scope,delete n.scope),"string"==typeof e.beginScope&&(e.beginScope={_wrap:e.beginScope}),"string"==typeof e.endScope&&(e.endScope={_wrap:e.endScope}),(e=>{if(Array.isArray(e.begin)){if(e.skip||e.excludeBegin||e.returnBegin)throw F("skip, excludeBegin, returnBegin not compatible with beginScope: {}"),j;if("object"!=typeof e.beginScope||null===e.beginScope)throw F("beginScope must be object"),j;K(e,e.begin,{key:"beginScope"}),e.begin=E(e.begin,{joinWith:""})}})(e),(e=>{if(Array.isArray(e.end)){if(e.skip||e.excludeEnd||e.returnEnd)throw F("skip, excludeEnd, returnEnd not compatible with endScope: {}"),j;if("object"!=typeof e.endScope||null===e.endScope)throw F("endScope must be object"),j;K(e,e.end,{key:"endScope"}),e.end=E(e.end,{joinWith:""})}})(e)}class H extends Error{constructor(e,n){super(e),this.name="HTMLInjectionError",this.html=n}}let Z=a,G=i,W=Symbol("nomatch");var Q=(n=>{let a=Object.create(null),r=Object.create(null),s=[],l=!0,o="Could not find the language '{}', did you forget to load/include a language module?",f={disableAutodetect:!0,name:"Plain text",contains:[]},$={ignoreUnescapedHTML:!1,throwUnescapedHTML:!1,noHighlightRe:/^(no-?highlight)$/i,languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-",cssSelector:"pre code",languages:null,__emitter:c};function y(e){return $.noHighlightRe.test(e)}function N(e,n,t){let a="",i="";"object"==typeof n?(a=e,t=n.ignoreIllegals,i=n.language):(P("10.7.0","highlight(lang, code, ...args) has been deprecated."),P("10.7.0","Please use highlight(code, options) instead.\nhttps://github.com/highlightjs/highlight.js/issues/2277"),i=e,a=n),void 0===t&&(t=!0);let r={code:a,language:i};z("before:highlight",r);let s=r.result?r.result:w(r.language,r.code,t);return s.code=r.code,z("after:highlight",s),s}function w(e,n,r,s){let c=Object.create(null);function g(){var e;if(!M.keywords)return void A.addText(C);let n=0;M.keywordPatternRe.lastIndex=0;let t=M.keywordPatternRe.exec(C),a="";for(;t;){a+=C.substring(n,t.index);let i=N.case_insensitive?t[0].toLowerCase():t[0],r=(e=i,M.keywords[e]);if(r){let[s,l]=r;if(A.addText(a),a="",c[i]=(c[i]||0)+1,c[i]<=7&&(z+=l),s.startsWith("_"))a+=t[0];else{let o=N.classNameAliases[s]||s;A.addKeyword(t[0],o)}}else a+=t[0];n=M.keywordPatternRe.lastIndex,t=M.keywordPatternRe.exec(C)}a+=C.substring(n),A.addText(a)}function u(){null!=M.subLanguage?(()=>{if(""===C)return;let e=null;if("string"==typeof M.subLanguage){if(!a[M.subLanguage])return void A.addText(C);e=w(M.subLanguage,C,!0,S[M.subLanguage]),S[M.subLanguage]=e._top}else e=v(C,M.subLanguage.length?M.subLanguage:null);M.relevance>0&&(z+=e.relevance),A.addSublanguage(e._emitter,e.language)})():g(),C=""}function b(e,n){let t=1,a=n.length-1;for(;t<=a;){if(!e._emit[t]){t++;continue}let i=N.classNameAliases[e[t]]||e[t],r=n[t];i?A.addKeyword(r,i):(C=r,g(),C=""),t++}}function m(e,n){return e.scope&&"string"==typeof e.scope&&A.openNode(N.classNameAliases[e.scope]||e.scope),e.beginScope&&(e.beginScope._wrap?(A.addKeyword(C,N.classNameAliases[e.beginScope._wrap]||e.beginScope._wrap),C=""):e.beginScope._multi&&(b(e.beginScope,n),C="")),M=Object.create(e,{parent:{value:M}})}function p(e){return 0===M.matcher.regexIndex?(C+=e[0],1):(j=!0,0)}let f={};function y(a,i){let s=i&&i[0];if(C+=a,null==s)return u(),0;if("begin"===f.type&&"end"===i.type&&f.index===i.index&&""===s){if(C+=n.slice(i.index,i.index+1),!l){let o=Error(`0 width match regex (${e})`);throw o.languageName=e,o.badRule=f.rule,o}return 1}if(f=i,"begin"===i.type)return(e=>{let n=e[0],a=e.rule,i=new t(a),r=[a.__beforeBegin,a["on:begin"]];for(let s of r)if(s&&(s(e,i),i.isMatchIgnored))return p(n);return a.skip?C+=n:(a.excludeBegin&&(C+=n),u(),a.returnBegin||a.excludeBegin||(C=n)),m(a,e),a.returnBegin?0:n.length})(i);if("illegal"===i.type&&!r){let c=Error('Illegal lexeme "'+s+'" for mode "'+(M.scope||"")+'"');throw c.mode=M,c}if("end"===i.type){let d=function e(a){let i=a[0],r=n.substring(a.index),s=function e(n,a,i){let r=((e,n)=>{let t=e&&e.exec(n);return t&&0===t.index})(n.endRe,i);if(r){if(n["on:end"]){let s=new t(n);n["on:end"](a,s),s.isMatchIgnored&&(r=!1)}if(r){for(;n.endsParent&&n.parent;)n=n.parent;return n}}if(n.endsWithParent)return e(n.parent,a,i)}(M,a,r);if(!s)return W;let l=M;M.endScope&&M.endScope._wrap?(u(),A.addKeyword(i,M.endScope._wrap)):M.endScope&&M.endScope._multi?(u(),b(M.endScope,a)):l.skip?C+=i:(l.returnEnd||l.excludeEnd||(C+=i),u(),l.excludeEnd&&(C=i));do M.scope&&A.closeNode(),M.skip||M.subLanguage||(z+=M.relevance),M=M.parent;while(M!==s.parent);return s.starts&&m(s.starts,a),l.returnEnd?0:i.length}(i);if(d!==W)return d}if("illegal"===i.type&&""===s)return 1;if(P>1e5&&P>3*i.index)throw Error("potential infinite loop, way more iterations than matches");return C+=s,s.length}let N=O(e);if(!N)throw F(o.replace("{}",e)),Error('Unknown language: "'+e+'"');let x=function e(n){function t(e,t){return RegExp(d(e),"m"+(n.case_insensitive?"i":"")+(n.unicodeRegex?"u":"")+(t?"g":""))}class a{constructor(){this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0}addRule(e,n){n.position=this.position++,this.matchIndexes[this.matchAt]=n,this.regexes.push([n,e]),this.matchAt+=h(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null);let e=this.regexes.map(e=>e[1]);this.matcherRe=t(E(e,{joinWith:"|"}),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex;let n=this.matcherRe.exec(e);if(!n)return null;let t=n.findIndex((e,n)=>n>0&&void 0!==e),a=this.matchIndexes[t];return n.splice(0,t),Object.assign(n,a)}}class r{constructor(){this.rules=[],this.multiRegexes=[],this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){if(this.multiRegexes[e])return this.multiRegexes[e];let n=new a;return this.rules.slice(e).forEach(([e,t])=>n.addRule(e,t)),n.compile(),this.multiRegexes[e]=n,n}resumingScanAtSamePosition(){return 0!==this.regexIndex}considerAll(){this.regexIndex=0}addRule(e,n){this.rules.push([e,n]),"begin"===n.type&&this.count++}exec(e){let n=this.getMatcher(this.regexIndex);n.lastIndex=this.lastIndex;let t=n.exec(e);if(this.resumingScanAtSamePosition()){if(t&&t.index===this.lastIndex);else{let a=this.getMatcher(0);a.lastIndex=this.lastIndex+1,t=a.exec(e)}}return t&&(this.regexIndex+=t.position+1,this.regexIndex===this.count&&this.considerAll()),t}}if(n.compilerExtensions||(n.compilerExtensions=[]),n.contains&&n.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.");return n.classNameAliases=i(n.classNameAliases||{}),function e(a,s){let l=a;if(a.isCompiled)return l;[T,I,q,B].forEach(e=>e(a,s)),n.compilerExtensions.forEach(e=>e(a,s)),a.__beforeBegin=null,[R,D,L].forEach(e=>e(a,s)),a.isCompiled=!0;let o=null;return"object"==typeof a.keywords&&a.keywords.$pattern&&(a.keywords=Object.assign({},a.keywords),o=a.keywords.$pattern,delete a.keywords.$pattern),o=o||/\w+/,a.keywords&&(a.keywords=function e(n,t,a="keyword"){let i=Object.create(null);return"string"==typeof n?r(a,n.split(" ")):Array.isArray(n)?r(a,n):Object.keys(n).forEach(a=>{Object.assign(i,e(n[a],t,a))}),i;function r(e,n){t&&(n=n.map(e=>e.toLowerCase())),n.forEach(n=>{var t,a,r;let s=n.split("|");i[s[0]]=[e,(t=s[0],a=s[1],a?Number(a):(r=t,_.includes(r.toLowerCase()))?0:1)]})}}(a.keywords,n.case_insensitive)),l.keywordPatternRe=t(o,!0),s&&(a.begin||(a.begin=/\B|\b/),l.beginRe=t(l.begin),a.end||a.endsWithParent||(a.end=/\B|\b/),a.end&&(l.endRe=t(l.end)),l.terminatorEnd=d(l.end)||"",a.endsWithParent&&s.terminatorEnd&&(l.terminatorEnd+=(a.end?"|":"")+s.terminatorEnd)),a.illegal&&(l.illegalRe=t(a.illegal)),a.contains||(a.contains=[]),a.contains=[].concat(...a.contains.map(e=>{var n;return(n="self"===e?a:e).variants&&!n.cachedVariants&&(n.cachedVariants=n.variants.map(e=>i(n,{variants:null},e))),n.cachedVariants?n.cachedVariants:!function e(n){return!!n&&(n.endsWithParent||e(n.starts))}(n)?Object.isFrozen(n)?i(n):n:i(n,{starts:n.starts?i(n.starts):null})})),a.contains.forEach(n=>{e(n,l)}),a.starts&&e(a.starts,s),l.matcher=(e=>{let n=new r;return e.contains.forEach(e=>n.addRule(e.begin,{rule:e,type:"begin"})),e.terminatorEnd&&n.addRule(e.terminatorEnd,{type:"end"}),e.illegal&&n.addRule(e.illegal,{type:"illegal"}),n})(l),l}(n)}(N),k="",M=s||x,S={},A=new $.__emitter($);(()=>{let e=[];for(let n=M;n!==N;n=n.parent)n.scope&&e.unshift(n.scope);e.forEach(e=>A.openNode(e))})();let C="",z=0,U=0,P=0,j=!1;try{for(M.matcher.considerAll();;){P++,j?j=!1:M.matcher.considerAll(),M.matcher.lastIndex=U;let K=M.matcher.exec(n);if(!K)break;let H=y(n.substring(U,K.index),K);U=K.index+H}return y(n.substring(U)),A.closeAllNodes(),A.finalize(),k=A.toHTML(),{language:e,value:k,relevance:z,illegal:!1,_emitter:A,_top:M}}catch(G){if(G.message&&G.message.includes("Illegal"))return{language:e,value:Z(n),illegal:!0,relevance:0,_illegalBy:{message:G.message,index:U,context:n.slice(U-100,U+100),mode:G.mode,resultSoFar:k},_emitter:A};if(l)return{language:e,value:Z(n),illegal:!1,relevance:0,errorRaised:G,_emitter:A,_top:M};throw G}}function v(e,n){n=n||$.languages||Object.keys(a);let t=(e=>{let n={value:Z(e),illegal:!1,relevance:0,_top:f,_emitter:new $.__emitter($)};return n._emitter.addText(e),n})(e),i=n.filter(O).filter(C).map(n=>w(n,e,!1));i.unshift(t);let r=i.sort((e,n)=>{if(e.relevance!==n.relevance)return n.relevance-e.relevance;if(e.language&&n.language){if(O(e.language).supersetOf===n.language)return 1;if(O(n.language).supersetOf===e.language)return -1}return 0}),[s,l]=r,o=s;return o.secondBest=l,o}function x(e){let n=null,t=(e=>{let n=e.className+" ";n+=e.parentNode?e.parentNode.className:"";let t=$.languageDetectRe.exec(n);if(t){let a=O(t[1]);return a||(U(o.replace("{}",t[1])),U("Falling back to no-highlight mode for this block.",e)),a?t[1]:"no-highlight"}return n.split(/\s+/).find(e=>y(e)||O(e))})(e);if(y(t))return;if(z("before:highlightElement",{el:e,language:t}),e.children.length>0&&($.ignoreUnescapedHTML||$.throwUnescapedHTML))throw new H("One of your code blocks includes unescaped HTML.",e.innerHTML);n=e;let a=n.textContent,i=t?N(a,{language:t,ignoreIllegals:!0}):v(a);e.innerHTML=i.value,((e,n,t)=>{let a=n&&r[n]||t;e.classList.add("hljs"),e.classList.add("language-"+a)})(e,t,i.language),e.result={language:i.language,re:i.relevance,relevance:i.relevance},i.secondBest&&(e.secondBest={language:i.secondBest.language,relevance:i.secondBest.relevance}),z("after:highlightElement",{el:e,result:i,text:a})}let k=!1;function M(){"loading"!==document.readyState?document.querySelectorAll($.cssSelector).forEach(x):k=!0}function O(e){return a[e=(e||"").toLowerCase()]||a[r[e]]}function S(e,{languageName:n}){"string"==typeof e&&(e=[e]),e.forEach(e=>{r[e.toLowerCase()]=n})}function C(e){let n=O(e);return n&&!n.disableAutodetect}function z(e,n){let t=e;s.forEach(e=>{e[t]&&e[t](n)})}for(let j in"undefined"!=typeof window&&window.addEventListener&&window.addEventListener("DOMContentLoaded",()=>{k&&M()},!1),Object.assign(n,{highlight:N,highlightAuto:v,highlightAll:M,highlightElement:x,highlightBlock:e=>(P("10.7.0","highlightBlock will be removed entirely in v12.0"),P("10.7.0","Please use highlightElement now."),x(e)),configure(e){$=G($,e)},initHighlighting(){M(),P("10.6.0","initHighlighting() deprecated. Use highlightAll() now.")},initHighlightingOnLoad(){M(),P("10.6.0","initHighlightingOnLoad() deprecated. Use highlightAll() now.")},registerLanguage(e,t){let i=null;try{i=t(n)}catch(r){if(F("Language definition for '{}' could not be registered.".replace("{}",e)),!l)throw r;F(r),i=f}i.name||(i.name=e),a[e]=i,i.rawDefinition=t.bind(null,n),i.aliases&&S(i.aliases,{languageName:e})},unregisterLanguage(e){for(let n of(delete a[e],Object.keys(r)))r[n]===e&&delete r[n]},listLanguages:()=>Object.keys(a),getLanguage:O,registerAliases:S,autoDetection:C,inherit:G,addPlugin(e){var n;(n=e)["before:highlightBlock"]&&!n["before:highlightElement"]&&(n["before:highlightElement"]=e=>{n["before:highlightBlock"](Object.assign({block:e.el},e))}),n["after:highlightBlock"]&&!n["after:highlightElement"]&&(n["after:highlightElement"]=e=>{n["after:highlightBlock"](Object.assign({block:e.el},e))}),s.push(e)}}),n.debugMode=()=>{l=!1},n.safeMode=()=>{l=!0},n.versionString="11.7.0",n.regex={concat:m,lookahead:g,either:p,optional:b,anyNumberOfTimes:u},A)"object"==typeof A[j]&&e.exports(A[j]);return Object.assign(n,A),n})({});let X=e=>({IMPORTANT:{scope:"meta",begin:"!important"},BLOCK_COMMENT:e.C_BLOCK_COMMENT_MODE,HEXCOLOR:{scope:"number",begin:/#(([0-9a-fA-F]{3,4})|(([0-9a-fA-F]{2}){3,4}))\b/},FUNCTION_DISPATCH:{className:"built_in",begin:/[\w-]+(?=\()/},ATTRIBUTE_SELECTOR_MODE:{scope:"selector-attr",begin:/\[/,end:/\]/,illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},CSS_NUMBER_MODE:{scope:"number",begin:e.NUMBER_RE+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",relevance:0},CSS_VARIABLE:{className:"attr",begin:/--[A-Za-z][A-Za-z0-9_-]*/}}),V=["a","abbr","address","article","aside","audio","b","blockquote","body","button","canvas","caption","cite","code","dd","del","details","dfn","div","dl","dt","em","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","header","hgroup","html","i","iframe","img","input","ins","kbd","label","legend","li","main","mark","menu","nav","object","ol","p","q","quote","samp","section","span","strong","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","tr","ul","var","video",],J=["any-hover","any-pointer","aspect-ratio","color","color-gamut","color-index","device-aspect-ratio","device-height","device-width","display-mode","forced-colors","grid","height","hover","inverted-colors","monochrome","orientation","overflow-block","overflow-inline","pointer","prefers-color-scheme","prefers-contrast","prefers-reduced-motion","prefers-reduced-transparency","resolution","scan","scripting","update","width","min-width","max-width","min-height","max-height",],Y=["active","any-link","blank","checked","current","default","defined","dir","disabled","drop","empty","enabled","first","first-child","first-of-type","fullscreen","future","focus","focus-visible","focus-within","has","host","host-context","hover","indeterminate","in-range","invalid","is","lang","last-child","last-of-type","left","link","local-link","not","nth-child","nth-col","nth-last-child","nth-last-col","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","past","placeholder-shown","read-only","read-write","required","right","root","scope","target","target-within","user-invalid","valid","visited","where",],ee=["after","backdrop","before","cue","cue-region","first-letter","first-line","grammar-error","marker","part","placeholder","selection","slotted","spelling-error",],en=["align-content","align-items","align-self","all","animation","animation-delay","animation-direction","animation-duration","animation-fill-mode","animation-iteration-count","animation-name","animation-play-state","animation-timing-function","backface-visibility","background","background-attachment","background-blend-mode","background-clip","background-color","background-image","background-origin","background-position","background-repeat","background-size","block-size","border","border-block","border-block-color","border-block-end","border-block-end-color","border-block-end-style","border-block-end-width","border-block-start","border-block-start-color","border-block-start-style","border-block-start-width","border-block-style","border-block-width","border-bottom","border-bottom-color","border-bottom-left-radius","border-bottom-right-radius","border-bottom-style","border-bottom-width","border-collapse","border-color","border-image","border-image-outset","border-image-repeat","border-image-slice","border-image-source","border-image-width","border-inline","border-inline-color","border-inline-end","border-inline-end-color","border-inline-end-style","border-inline-end-width","border-inline-start","border-inline-start-color","border-inline-start-style","border-inline-start-width","border-inline-style","border-inline-width","border-left","border-left-color","border-left-style","border-left-width","border-radius","border-right","border-right-color","border-right-style","border-right-width","border-spacing","border-style","border-top","border-top-color","border-top-left-radius","border-top-right-radius","border-top-style","border-top-width","border-width","bottom","box-decoration-break","box-shadow","box-sizing","break-after","break-before","break-inside","caption-side","caret-color","clear","clip","clip-path","clip-rule","color","column-count","column-fill","column-gap","column-rule","column-rule-color","column-rule-style","column-rule-width","column-span","column-width","columns","contain","content","content-visibility","counter-increment","counter-reset","cue","cue-after","cue-before","cursor","direction","display","empty-cells","filter","flex","flex-basis","flex-direction","flex-flow","flex-grow","flex-shrink","flex-wrap","float","flow","font","font-display","font-family","font-feature-settings","font-kerning","font-language-override","font-size","font-size-adjust","font-smoothing","font-stretch","font-style","font-synthesis","font-variant","font-variant-caps","font-variant-east-asian","font-variant-ligatures","font-variant-numeric","font-variant-position","font-variation-settings","font-weight","gap","glyph-orientation-vertical","grid","grid-area","grid-auto-columns","grid-auto-flow","grid-auto-rows","grid-column","grid-column-end","grid-column-start","grid-gap","grid-row","grid-row-end","grid-row-start","grid-template","grid-template-areas","grid-template-columns","grid-template-rows","hanging-punctuation","height","hyphens","icon","image-orientation","image-rendering","image-resolution","ime-mode","inline-size","isolation","justify-content","left","letter-spacing","line-break","line-height","list-style","list-style-image","list-style-position","list-style-type","margin","margin-block","margin-block-end","margin-block-start","margin-bottom","margin-inline","margin-inline-end","margin-inline-start","margin-left","margin-right","margin-top","marks","mask","mask-border","mask-border-mode","mask-border-outset","mask-border-repeat","mask-border-slice","mask-border-source","mask-border-width","mask-clip","mask-composite","mask-image","mask-mode","mask-origin","mask-position","mask-repeat","mask-size","mask-type","max-block-size","max-height","max-inline-size","max-width","min-block-size","min-height","min-inline-size","min-width","mix-blend-mode","nav-down","nav-index","nav-left","nav-right","nav-up","none","normal","object-fit","object-position","opacity","order","orphans","outline","outline-color","outline-offset","outline-style","outline-width","overflow","overflow-wrap","overflow-x","overflow-y","padding","padding-block","padding-block-end","padding-block-start","padding-bottom","padding-inline","padding-inline-end","padding-inline-start","padding-left","padding-right","padding-top","page-break-after","page-break-before","page-break-inside","pause","pause-after","pause-before","perspective","perspective-origin","pointer-events","position","quotes","resize","rest","rest-after","rest-before","right","row-gap","scroll-margin","scroll-margin-block","scroll-margin-block-end","scroll-margin-block-start","scroll-margin-bottom","scroll-margin-inline","scroll-margin-inline-end","scroll-margin-inline-start","scroll-margin-left","scroll-margin-right","scroll-margin-top","scroll-padding","scroll-padding-block","scroll-padding-block-end","scroll-padding-block-start","scroll-padding-bottom","scroll-padding-inline","scroll-padding-inline-end","scroll-padding-inline-start","scroll-padding-left","scroll-padding-right","scroll-padding-top","scroll-snap-align","scroll-snap-stop","scroll-snap-type","scrollbar-color","scrollbar-gutter","scrollbar-width","shape-image-threshold","shape-margin","shape-outside","speak","speak-as","src","tab-size","table-layout","text-align","text-align-all","text-align-last","text-combine-upright","text-decoration","text-decoration-color","text-decoration-line","text-decoration-style","text-emphasis","text-emphasis-color","text-emphasis-position","text-emphasis-style","text-indent","text-justify","text-orientation","text-overflow","text-rendering","text-shadow","text-transform","text-underline-position","top","transform","transform-box","transform-origin","transform-style","transition","transition-delay","transition-duration","transition-property","transition-timing-function","unicode-bidi","vertical-align","visibility","voice-balance","voice-duration","voice-family","voice-pitch","voice-range","voice-rate","voice-stress","voice-volume","white-space","widows","width","will-change","word-break","word-spacing","word-wrap","writing-mode","z-index",].reverse(),et=Y.concat(ee);var ea="\\.([0-9](_*[0-9])*)",ei="[0-9a-fA-F](_*[0-9a-fA-F])*",er={className:"number",variants:[{begin:`(\\b([0-9](_*[0-9])*)((${ea})|\\.)?|(${ea}))[eE][+-]?([0-9](_*[0-9])*)[fFdD]?\\b`},{begin:`\\b([0-9](_*[0-9])*)((${ea})[fFdD]?\\b|\\.([fFdD]\\b)?)`},{begin:`(${ea})[fFdD]?\\b`},{begin:"\\b([0-9](_*[0-9])*)[fFdD]\\b"},{begin:`\\b0[xX]((${ei})\\.?|(${ei})?\\.(${ei}))[pP][+-]?([0-9](_*[0-9])*)[fFdD]?\\b`},{begin:"\\b(0|[1-9](_*[0-9])*)[lL]?\\b"},{begin:`\\b0[xX](${ei})[lL]?\\b`},{begin:"\\b0(_*[0-7])*[lL]?\\b"},{begin:"\\b0[bB][01](_*[01])*[lL]?\\b"},],relevance:0};let es="[A-Za-z$_][0-9A-Za-z$_]*",el=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends",],eo=["true","false","null","undefined","NaN","Infinity"],ec=["Object","Function","Boolean","Symbol","Math","Date","Number","BigInt","String","RegExp","Array","Float32Array","Float64Array","Int8Array","Uint8Array","Uint8ClampedArray","Int16Array","Int32Array","Uint16Array","Uint32Array","BigInt64Array","BigUint64Array","Set","Map","WeakSet","WeakMap","ArrayBuffer","SharedArrayBuffer","Atomics","DataView","JSON","Promise","Generator","GeneratorFunction","AsyncFunction","Reflect","Proxy","Intl","WebAssembly",],ed=["Error","EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError",],eg=["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape",],eu=["arguments","this","super","console","window","document","localStorage","module","global",],eb=[].concat(eg,ec,ed);function em(e){var n;let t=e.regex,a=es,i={begin:/<[A-Za-z0-9\\._:-]+/,end:/\/[A-Za-z0-9\\._:-]+>|\/>/,isTrulyOpeningTag(e,n){let t=e[0].length+e.index,a=e.input[t];if("<"===a||","===a)return void n.ignoreMatch();let i;">"===a&&(((e,{after:n})=>{let t="",v={match:[/const|var|let/,/\s+/,a,/\s*/,/=\s*/,/(async\s*)?/,t.lookahead(w),],keywords:"async",className:{1:"keyword",3:"title.function"},contains:[f]};return{name:"Javascript",aliases:["js","jsx","mjs","cjs"],keywords:r,exports:{PARAMS_CONTAINS:h,CLASS_REFERENCE:$},illegal:/#(?![$_A-z])/,contains:[e.SHEBANG({label:"shebang",binary:"node",relevance:5}),{label:"use_strict",className:"meta",relevance:10,begin:/^\s*['"]use (strict|asm)['"]/},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,d,g,u,b,{match:/\$\d+/},o,$,{className:"attr",begin:a+t.lookahead(":"),relevance:0},v,{begin:"("+e.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",relevance:0,contains:[b,e.REGEXP_MODE,{className:"function",begin:w,returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:e.UNDERSCORE_IDENT_RE,relevance:0},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:r,contains:h},]},]},{begin:/,/,relevance:0},{match:/\s+/,relevance:0},{variants:[{begin:"<>",end:""},{match:/<[A-Za-z0-9\\._:-]+\s*\/>/},{begin:i.begin,"on:begin":i.isTrulyOpeningTag,end:i.end},],subLanguage:"xml",contains:[{begin:i.begin,end:i.end,skip:!0,contains:["self"]},]},]},{variants:[{match:[/function/,/\s+/,a,/(?=\s*\()/]},{match:[/function/,/\s*(?=\()/]},],className:{1:"keyword",3:"title.function"},label:"func.def",contains:[f],illegal:/%/},{beginKeywords:"while if switch catch for"},{begin:"\\b(?!function)"+e.UNDERSCORE_IDENT_RE+"\\([^()]*(\\([^()]*(\\([^()]*\\)[^()]*)*\\)[^()]*)*\\)\\s*\\{",returnBegin:!0,label:"func.def",contains:[f,e.inherit(e.TITLE_MODE,{begin:a,className:"title.function"}),]},{match:/\.\.\./,relevance:0},N,{match:"\\$"+a,relevance:0},{match:[/\bconstructor(?=\s*\()/],className:{1:"title.function"},contains:[f]},y,{relevance:0,match:/\b[A-Z][A-Z_0-9]+\b/,className:"variable.constant"},E,{match:[/get|set/,/\s+/,a,/(?=\()/],className:{1:"keyword",3:"title.function"},contains:[{begin:/\(\)/},f]},{match:/\$[(.]/},]}}let ep=e=>m(/\b/,e,/\w$/.test(e)?/\b/:/\B/),e8=["Protocol","Type"].map(ep),eh=["init","self"].map(ep),ef=["Any","Self"],eE=["actor","any","associatedtype","async","await",/as\?/,/as!/,"as","break","case","catch","class","continue","convenience","default","defer","deinit","didSet","distributed","do","dynamic","else","enum","extension","fallthrough",/fileprivate\(set\)/,"fileprivate","final","for","func","get","guard","if","import","indirect","infix",/init\?/,/init!/,"inout",/internal\(set\)/,"internal","in","is","isolated","nonisolated","lazy","let","mutating","nonmutating",/open\(set\)/,"open","operator","optional","override","postfix","precedencegroup","prefix",/private\(set\)/,"private","protocol",/public\(set\)/,"public","repeat","required","rethrows","return","set","some","static","struct","subscript","super","switch","throws","throw",/try\?/,/try!/,"try","typealias",/unowned\(safe\)/,/unowned\(unsafe\)/,"unowned","var","weak","where","while","willSet",],e$=["false","nil","true"],ey=["assignment","associativity","higherThan","left","lowerThan","none","right",],eN=["#colorLiteral","#column","#dsohandle","#else","#elseif","#endif","#error","#file","#fileID","#fileLiteral","#filePath","#function","#if","#imageLiteral","#keyPath","#line","#selector","#sourceLocation","#warn_unqualified_access","#warning",],ew=["abs","all","any","assert","assertionFailure","debugPrint","dump","fatalError","getVaList","isKnownUniquelyReferenced","max","min","numericCast","pointwiseMax","pointwiseMin","precondition","preconditionFailure","print","readLine","repeatElement","sequence","stride","swap","swift_unboxFromSwiftValueWithType","transcode","type","unsafeBitCast","unsafeDowncast","withExtendedLifetime","withUnsafeMutablePointer","withUnsafePointer","withVaList","withoutActuallyEscaping","zip",],ev=p(/[/=\-+!*%<>&|^~?]/,/[\u00A1-\u00A7]/,/[\u00A9\u00AB]/,/[\u00AC\u00AE]/,/[\u00B0\u00B1]/,/[\u00B6\u00BB\u00BF\u00D7\u00F7]/,/[\u2016-\u2017]/,/[\u2020-\u2027]/,/[\u2030-\u203E]/,/[\u2041-\u2053]/,/[\u2055-\u205E]/,/[\u2190-\u23FF]/,/[\u2500-\u2775]/,/[\u2794-\u2BFF]/,/[\u2E00-\u2E7F]/,/[\u3001-\u3003]/,/[\u3008-\u3020]/,/[\u3030]/),ex=p(ev,/[\u0300-\u036F]/,/[\u1DC0-\u1DFF]/,/[\u20D0-\u20FF]/,/[\uFE00-\uFE0F]/,/[\uFE20-\uFE2F]/),ek=m(ev,ex,"*"),eM=p(/[a-zA-Z_]/,/[\u00A8\u00AA\u00AD\u00AF\u00B2-\u00B5\u00B7-\u00BA]/,/[\u00BC-\u00BE\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF]/,/[\u0100-\u02FF\u0370-\u167F\u1681-\u180D\u180F-\u1DBF]/,/[\u1E00-\u1FFF]/,/[\u200B-\u200D\u202A-\u202E\u203F-\u2040\u2054\u2060-\u206F]/,/[\u2070-\u20CF\u2100-\u218F\u2460-\u24FF\u2776-\u2793]/,/[\u2C00-\u2DFF\u2E80-\u2FFF]/,/[\u3004-\u3007\u3021-\u302F\u3031-\u303F\u3040-\uD7FF]/,/[\uF900-\uFD3D\uFD40-\uFDCF\uFDF0-\uFE1F\uFE30-\uFE44]/,/[\uFE47-\uFEFE\uFF00-\uFFFD]/),eO=p(eM,/\d/,/[\u0300-\u036F\u1DC0-\u1DFF\u20D0-\u20FF\uFE20-\uFE2F]/),eS=m(eM,eO,"*"),eA=m(/[A-Z]/,eO,"*"),eC=["autoclosure",m(/convention\(/,p("swift","block","c"),/\)/),"discardableResult","dynamicCallable","dynamicMemberLookup","escaping","frozen","GKInspectable","IBAction","IBDesignable","IBInspectable","IBOutlet","IBSegueAction","inlinable","main","nonobjc","NSApplicationMain","NSCopying","NSManaged",m(/objc\(/,eS,/\)/),"objc","objcMembers","propertyWrapper","requires_stored_property_inits","resultBuilder","testable","UIApplicationMain","unknown","usableFromInline",],eT=["iOS","iOSApplicationExtension","macOS","macOSApplicationExtension","macCatalyst","macCatalystApplicationExtension","watchOS","watchOSApplicationExtension","tvOS","tvOSApplicationExtension","swift",];var eR=Object.freeze({__proto__:null,grmr_bash(e){let n=e.regex,t={};Object.assign(t,{className:"variable",variants:[{begin:n.concat(/\$[\w\d#@][\w\d_]*/,"(?![\\w\\d])(?![$])")},{begin:/\$\{/,end:/\}/,contains:["self",{begin:/:-/,contains:[t]}]},]});let a={className:"subst",begin:/\$\(/,end:/\)/,contains:[e.BACKSLASH_ESCAPE]},i={begin:/<<-?\s*(?=\w+)/,starts:{contains:[e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,className:"string"}),]}},r={className:"string",begin:/"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,t,a]};a.contains.push(r);let s={begin:/\$?\(\(/,end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},e.NUMBER_MODE,t,]},l=e.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10}),o={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{name:"Bash",aliases:["sh"],keywords:{$pattern:/\b[a-z][a-z0-9._-]+\b/,keyword:["if","then","else","elif","fi","for","while","in","do","done","case","esac","function",],literal:["true","false"],built_in:["break","cd","continue","eval","exec","exit","export","getopts","hash","pwd","readonly","return","shift","test","times","trap","umask","unset","alias","bind","builtin","caller","command","declare","echo","enable","help","let","local","logout","mapfile","printf","read","readarray","source","type","typeset","ulimit","unalias","set","shopt","autoload","bg","bindkey","bye","cap","chdir","clone","comparguments","compcall","compctl","compdescribe","compfiles","compgroups","compquote","comptags","comptry","compvalues","dirs","disable","disown","echotc","echoti","emulate","fc","fg","float","functions","getcap","getln","history","integer","jobs","kill","limit","log","noglob","popd","print","pushd","pushln","rehash","sched","setcap","setopt","stat","suspend","ttyctl","unfunction","unhash","unlimit","unsetopt","vared","wait","whence","where","which","zcompile","zformat","zftp","zle","zmodload","zparseopts","zprof","zpty","zregexparse","zsocket","zstyle","ztcp","chcon","chgrp","chown","chmod","cp","dd","df","dir","dircolors","ln","ls","mkdir","mkfifo","mknod","mktemp","mv","realpath","rm","rmdir","shred","sync","touch","truncate","vdir","b2sum","base32","base64","cat","cksum","comm","csplit","cut","expand","fmt","fold","head","join","md5sum","nl","numfmt","od","paste","ptx","pr","sha1sum","sha224sum","sha256sum","sha384sum","sha512sum","shuf","sort","split","sum","tac","tail","tr","tsort","unexpand","uniq","wc","arch","basename","chroot","date","dirname","du","echo","env","expr","factor","groups","hostid","id","link","logname","nice","nohup","nproc","pathchk","pinky","printenv","printf","pwd","readlink","runcon","seq","sleep","stat","stdbuf","stty","tee","test","timeout","tty","uname","unlink","uptime","users","who","whoami","yes",]},contains:[l,e.SHEBANG(),o,s,e.HASH_COMMENT_MODE,i,{match:/(\/[a-z._-]+)+/},r,{className:"",begin:/\\"/},{className:"string",begin:/'/,end:/'/},t,]}},grmr_c(e){let n=e.regex,t=e.COMMENT("//","$",{contains:[{begin:/\\\n/}]}),a="[a-zA-Z_]\\w*::",i="(decltype\\(auto\\)|"+n.optional(a)+"[a-zA-Z_]\\w*"+n.optional("<[^<>]+>")+")",r={className:"type",variants:[{begin:"\\b[a-z\\d_]*_t\\b"},{match:/\batomic_[a-z]{3,6}\b/},]},s={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/}),]},l={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"},],relevance:0},o={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{keyword:"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(s,{className:"string"}),{className:"string",begin:/<.*?>/},t,e.C_BLOCK_COMMENT_MODE,]},c={className:"title",begin:n.optional(a)+e.IDENT_RE,relevance:0},d=n.optional(a)+e.IDENT_RE+"\\s*\\(",g={keyword:["asm","auto","break","case","continue","default","do","else","enum","extern","for","fortran","goto","if","inline","register","restrict","return","sizeof","struct","switch","typedef","union","volatile","while","_Alignas","_Alignof","_Atomic","_Generic","_Noreturn","_Static_assert","_Thread_local","alignas","alignof","noreturn","static_assert","thread_local","_Pragma",],type:["float","double","signed","unsigned","int","short","long","char","void","_Bool","_Complex","_Imaginary","_Decimal32","_Decimal64","_Decimal128","const","static","complex","bool","imaginary",],literal:"true false NULL",built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr"},u=[o,r,t,e.C_BLOCK_COMMENT_MODE,l,s],b={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/},],keywords:g,contains:u.concat([{begin:/\(/,end:/\)/,keywords:g,contains:u.concat(["self"]),relevance:0},]),relevance:0},m={begin:"("+i+"[\\*&\\s]+)+"+d,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:g,illegal:/[^\w\s\*&:<>.]/,contains:[{begin:"decltype\\(auto\\)",keywords:g,relevance:0},{begin:d,returnBegin:!0,contains:[e.inherit(c,{className:"title.function"}),],relevance:0},{relevance:0,match:/,/},{className:"params",begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:[t,e.C_BLOCK_COMMENT_MODE,s,l,r,{begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:["self",t,e.C_BLOCK_COMMENT_MODE,s,l,r]},]},r,t,e.C_BLOCK_COMMENT_MODE,o,]};return{name:"C",aliases:["h"],keywords:g,disableAutodetect:!0,illegal:"=]/,contains:[{beginKeywords:"final class struct"},e.TITLE_MODE,]},]),exports:{preprocessor:o,strings:s,keywords:g}}},grmr_cpp(e){let n=e.regex,t=e.COMMENT("//","$",{contains:[{begin:/\\\n/}]}),a="[a-zA-Z_]\\w*::",i="(?!struct)(decltype\\(auto\\)|"+n.optional(a)+"[a-zA-Z_]\\w*"+n.optional("<[^<>]+>")+")",r={className:"type",begin:"\\b[a-z\\d_]*_t\\b"},s={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/}),]},l={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"},],relevance:0},o={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{keyword:"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(s,{className:"string"}),{className:"string",begin:/<.*?>/},t,e.C_BLOCK_COMMENT_MODE,]},c={className:"title",begin:n.optional(a)+e.IDENT_RE,relevance:0},d=n.optional(a)+e.IDENT_RE+"\\s*\\(",g={type:["bool","char","char16_t","char32_t","char8_t","double","float","int","long","short","void","wchar_t","unsigned","signed","const","static",],keyword:["alignas","alignof","and","and_eq","asm","atomic_cancel","atomic_commit","atomic_noexcept","auto","bitand","bitor","break","case","catch","class","co_await","co_return","co_yield","compl","concept","const_cast|10","consteval","constexpr","constinit","continue","decltype","default","delete","do","dynamic_cast|10","else","enum","explicit","export","extern","false","final","for","friend","goto","if","import","inline","module","mutable","namespace","new","noexcept","not","not_eq","nullptr","operator","or","or_eq","override","private","protected","public","reflexpr","register","reinterpret_cast|10","requires","return","sizeof","static_assert","static_cast|10","struct","switch","synchronized","template","this","thread_local","throw","transaction_safe","transaction_safe_dynamic","true","try","typedef","typeid","typename","union","using","virtual","volatile","while","xor","xor_eq",],literal:["NULL","false","nullopt","nullptr","true"],built_in:["_Pragma"],_type_hints:["any","auto_ptr","barrier","binary_semaphore","bitset","complex","condition_variable","condition_variable_any","counting_semaphore","deque","false_type","future","imaginary","initializer_list","istringstream","jthread","latch","lock_guard","multimap","multiset","mutex","optional","ostringstream","packaged_task","pair","promise","priority_queue","queue","recursive_mutex","recursive_timed_mutex","scoped_lock","set","shared_future","shared_lock","shared_mutex","shared_timed_mutex","shared_ptr","stack","string_view","stringstream","timed_mutex","thread","true_type","tuple","unique_lock","unique_ptr","unordered_map","unordered_multimap","unordered_multiset","unordered_set","variant","vector","weak_ptr","wstring","wstring_view",]},u={className:"function.dispatch",relevance:0,keywords:{_hint:["abort","abs","acos","apply","as_const","asin","atan","atan2","calloc","ceil","cerr","cin","clog","cos","cosh","cout","declval","endl","exchange","exit","exp","fabs","floor","fmod","forward","fprintf","fputs","free","frexp","fscanf","future","invoke","isalnum","isalpha","iscntrl","isdigit","isgraph","islower","isprint","ispunct","isspace","isupper","isxdigit","labs","launder","ldexp","log","log10","make_pair","make_shared","make_shared_for_overwrite","make_tuple","make_unique","malloc","memchr","memcmp","memcpy","memset","modf","move","pow","printf","putchar","puts","realloc","scanf","sin","sinh","snprintf","sprintf","sqrt","sscanf","std","stderr","stdin","stdout","strcat","strchr","strcmp","strcpy","strcspn","strlen","strncat","strncmp","strncpy","strpbrk","strrchr","strspn","strstr","swap","tan","tanh","terminate","to_underlying","tolower","toupper","vfprintf","visit","vprintf","vsprintf",]},begin:n.concat(/\b/,/(?!decltype)/,/(?!if)/,/(?!for)/,/(?!switch)/,/(?!while)/,e.IDENT_RE,n.lookahead(/(<[^<>]+>|)\s*\(/))},b=[u,o,r,t,e.C_BLOCK_COMMENT_MODE,l,s],m={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/},],keywords:g,contains:b.concat([{begin:/\(/,end:/\)/,keywords:g,contains:b.concat(["self"]),relevance:0},]),relevance:0},p={className:"function",begin:"("+i+"[\\*&\\s]+)+"+d,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:g,illegal:/[^\w\s\*&:<>.]/,contains:[{begin:"decltype\\(auto\\)",keywords:g,relevance:0},{begin:d,returnBegin:!0,contains:[c],relevance:0},{begin:/::/,relevance:0},{begin:/:/,endsWithParent:!0,contains:[s,l]},{relevance:0,match:/,/},{className:"params",begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:[t,e.C_BLOCK_COMMENT_MODE,s,l,r,{begin:/\(/,end:/\)/,keywords:g,relevance:0,contains:["self",t,e.C_BLOCK_COMMENT_MODE,s,l,r]},]},r,t,e.C_BLOCK_COMMENT_MODE,o,]};return{name:"C++",aliases:["cc","c++","h++","hpp","hh","hxx","cxx"],keywords:g,illegal:"",keywords:g,contains:["self",r]},{begin:e.IDENT_RE+"::",keywords:g},{match:[/\b(?:enum(?:\s+(?:class|struct))?|class|struct|union)/,/\s+/,/\w+/,],className:{1:"keyword",3:"title.class"}},])}},grmr_csharp(e){let n={keyword:["abstract","as","base","break","case","catch","class","const","continue","do","else","event","explicit","extern","finally","fixed","for","foreach","goto","if","implicit","in","interface","internal","is","lock","namespace","new","operator","out","override","params","private","protected","public","readonly","record","ref","return","scoped","sealed","sizeof","stackalloc","static","struct","switch","this","throw","try","typeof","unchecked","unsafe","using","virtual","void","volatile","while",].concat(["add","alias","and","ascending","async","await","by","descending","equals","from","get","global","group","init","into","join","let","nameof","not","notnull","on","or","orderby","partial","remove","select","set","unmanaged","value|0","var","when","where","with","yield",]),built_in:["bool","byte","char","decimal","delegate","double","dynamic","enum","float","int","long","nint","nuint","object","sbyte","short","string","ulong","uint","ushort",],literal:["default","false","null","true"]},t=e.inherit(e.TITLE_MODE,{begin:"[a-zA-Z](\\.?\\w)*"}),a={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"},],relevance:0},i={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}]},r=e.inherit(i,{illegal:/\n/}),s={className:"subst",begin:/\{/,end:/\}/,keywords:n},l=e.inherit(s,{illegal:/\n/}),o={className:"string",begin:/\$"/,end:'"',illegal:/\n/,contains:[{begin:/\{\{/},{begin:/\}\}/},e.BACKSLASH_ESCAPE,l,]},c={className:"string",begin:/\$@"/,end:'"',contains:[{begin:/\{\{/},{begin:/\}\}/},{begin:'""'},s,]},d=e.inherit(c,{illegal:/\n/,contains:[{begin:/\{\{/},{begin:/\}\}/},{begin:'""'},l]});s.contains=[c,o,i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.C_BLOCK_COMMENT_MODE,],l.contains=[d,o,r,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.inherit(e.C_BLOCK_COMMENT_MODE,{illegal:/\n/}),];let g={variants:[c,o,i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},u={begin:"<",end:">",contains:[{beginKeywords:"in out"},t]},b=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",m={begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"],keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0,contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{begin:""},{begin:""},]},]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#",end:"$",keywords:{keyword:"if else elif endif define undef warning error line region endregion pragma checksum"}},g,a,{beginKeywords:"class interface",relevance:0,end:/[{;=]/,illegal:/[^\s:,]/,contains:[{beginKeywords:"where class"},t,u,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},{beginKeywords:"namespace",relevance:0,end:/[{;=]/,illegal:/[^\s:]/,contains:[t,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"record",relevance:0,end:/[{;=]/,illegal:/[^\s:]/,contains:[t,u,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta",begin:"^\\s*\\[(?=[\\w])",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{className:"string",begin:/"/,end:/"/},]},{beginKeywords:"new return throw await else",relevance:0},{className:"function",begin:"("+b+"\\s+)+"+e.IDENT_RE+"\\s*(<[^=]+>\\s*)?\\(",returnBegin:!0,end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{beginKeywords:"public private protected static internal protected abstract async extern override unsafe virtual new sealed partial",relevance:0},{begin:e.IDENT_RE+"\\s*(<[^=]+>\\s*)?\\(",returnBegin:!0,contains:[e.TITLE_MODE,u],relevance:0},{match:/\(\)/},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0,contains:[g,a,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},m,]}},grmr_css(e){let n=e.regex,t=X(e),a=[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE];return{name:"CSS",case_insensitive:!0,illegal:/[=|'\$]/,keywords:{keyframePosition:"from to"},classNameAliases:{keyframePosition:"selector-tag"},contains:[t.BLOCK_COMMENT,{begin:/-(webkit|moz|ms|o)-(?=[a-z])/},t.CSS_NUMBER_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/,relevance:0},{className:"selector-class",begin:"\\.[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0},t.ATTRIBUTE_SELECTOR_MODE,{className:"selector-pseudo",variants:[{begin:":("+Y.join("|")+")"},{begin:":(:)?("+ee.join("|")+")"},]},t.CSS_VARIABLE,{className:"attribute",begin:"\\b("+en.join("|")+")\\b"},{begin:/:/,end:/[;}{]/,contains:[t.BLOCK_COMMENT,t.HEXCOLOR,t.IMPORTANT,t.CSS_NUMBER_MODE,...a,{begin:/(url|data-uri)\(/,end:/\)/,relevance:0,keywords:{built_in:"url data-uri"},contains:[...a,{className:"string",begin:/[^)]/,endsWithParent:!0,excludeEnd:!0},]},t.FUNCTION_DISPATCH,]},{begin:n.lookahead(/@/),end:"[{;]",relevance:0,illegal:/:/,contains:[{className:"keyword",begin:/@-?\w[\w]*(-\w+)*/},{begin:/\s/,endsWithParent:!0,excludeEnd:!0,relevance:0,keywords:{$pattern:/[a-z-]+/,keyword:"and or not only",attribute:J.join(" ")},contains:[{begin:/[a-z-]+(?=:)/,className:"attribute"},...a,t.CSS_NUMBER_MODE,]},]},{className:"selector-tag",begin:"\\b("+V.join("|")+")\\b"},]}},grmr_diff(e){let n=e.regex;return{name:"Diff",aliases:["patch"],contains:[{className:"meta",relevance:10,match:n.either(/^@@ +-\d+,\d+ +\+\d+,\d+ +@@/,/^\*\*\* +\d+,\d+ +\*\*\*\*$/,/^--- +\d+,\d+ +----$/)},{className:"comment",variants:[{begin:n.either(/Index: /,/^index/,/={3,}/,/^-{3}/,/^\*{3} /,/^\+{3}/,/^diff --git/),end:/$/},{match:/^\*{15}$/},]},{className:"addition",begin:/^\+/,end:/$/},{className:"deletion",begin:/^-/,end:/$/},{className:"addition",begin:/^!/,end:/$/},]}},grmr_go(e){let n={keyword:["break","case","chan","const","continue","default","defer","else","fallthrough","for","func","go","goto","if","import","interface","map","package","range","return","select","struct","switch","type","var",],type:["bool","byte","complex64","complex128","error","float32","float64","int8","int16","int32","int64","string","uint8","uint16","uint32","uint64","int","uint","uintptr","rune",],literal:["true","false","iota","nil"],built_in:["append","cap","close","complex","copy","imag","len","make","new","panic","print","println","real","recover","delete",]};return{name:"Go",aliases:["golang"],keywords:n,illegal:"e(n,t,a-1))}("(?:<"+t+"~~~(?:\\s*,\\s*"+t+"~~~)*>)?",/~~~/g,2),i={keyword:["synchronized","abstract","private","var","static","if","const ","for","while","strictfp","finally","protected","import","native","final","void","enum","else","break","transient","catch","instanceof","volatile","case","assert","package","default","public","try","switch","continue","throws","protected","public","private","module","requires","exports","do","sealed","yield","permits",],literal:["false","true","null"],type:["char","boolean","long","float","int","byte","short","double",],built_in:["super","this"]},r={className:"meta",begin:"@"+t,contains:[{begin:/\(/,end:/\)/,contains:["self"]},]},s={className:"params",begin:/\(/,end:/\)/,keywords:i,relevance:0,contains:[e.C_BLOCK_COMMENT_MODE],endsParent:!0};return{name:"Java",aliases:["jsp"],keywords:i,illegal:/<\/|#/,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/,relevance:0},{className:"doctag",begin:"@[A-Za-z]+"},]}),{begin:/import java\.[a-z]+\./,keywords:"import",relevance:2},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{begin:/"""/,end:/"""/,className:"string",contains:[e.BACKSLASH_ESCAPE]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{match:[/\b(?:class|interface|enum|extends|implements|new)/,/\s+/,t,],className:{1:"keyword",3:"title.class"}},{match:/non-sealed/,scope:"keyword"},{begin:[n.concat(/(?!else)/,t),/\s+/,t,/\s+/,/=(?!=)/],className:{1:"type",3:"variable",5:"operator"}},{begin:[/record/,/\s+/,t],className:{1:"keyword",3:"title.class"},contains:[s,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"new throw return else",relevance:0},{begin:["(?:"+a+"\\s+)",e.UNDERSCORE_IDENT_RE,/\s*(?=\()/],className:{2:"title.function"},keywords:i,contains:[{className:"params",begin:/\(/,end:/\)/,keywords:i,relevance:0,contains:[r,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,er,e.C_BLOCK_COMMENT_MODE,]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},er,r,]}},grmr_javascript:em,grmr_json(e){let n=["true","false","null"],t={scope:"literal",beginKeywords:n.join(" ")};return{name:"JSON",keywords:{literal:n},contains:[{className:"attr",begin:/"(\\.|[^\\"\r\n])*"(?=\s*:)/,relevance:1.01},{match:/[{}[\],:]/,className:"punctuation",relevance:0},e.QUOTE_STRING_MODE,t,e.C_NUMBER_MODE,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,],illegal:"\\S"}},grmr_kotlin(e){let n={keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual",built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing",literal:"true false null"},t={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@"},a={className:"subst",begin:/\$\{/,end:/\}/,contains:[e.C_NUMBER_MODE]},i={className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},r={className:"string",variants:[{begin:'"""',end:'"""(?=[^"])',contains:[i,a]},{begin:"'",end:"'",illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/,contains:[e.BACKSLASH_ESCAPE,i,a]},]};a.contains.push(r);let s={className:"meta",begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?"},l={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/,end:/\)/,contains:[e.inherit(r,{className:"string"}),"self"]},]},o=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),c={variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/,contains:[]},]},d=c;return d.variants[1].contains=[c],c.variants[1].contains=[d],{name:"Kotlin",aliases:["kt","kts"],keywords:n,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,o,{className:"keyword",begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol",begin:/@\w+/}]}},t,s,l,{className:"function",beginKeywords:"fun",end:"[(]|$",returnBegin:!0,excludeEnd:!0,keywords:n,relevance:5,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://,keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/,endsWithParent:!0,contains:[c,e.C_LINE_COMMENT_MODE,o],relevance:0},e.C_LINE_COMMENT_MODE,o,s,l,r,e.C_NUMBER_MODE,]},o,]},{begin:[/class|interface|trait/,/\s+/,e.UNDERSCORE_IDENT_RE],beginScope:{3:"title.class"},keywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0,illegal:"extends implements",contains:[{beginKeywords:"public protected internal private constructor"},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,){\s]|$/,excludeBegin:!0,returnEnd:!0},s,l,]},r,{className:"meta",begin:"^#!/usr/bin/env",end:"$",illegal:"\n"},er,]}},grmr_less(e){let n=X(e),t="([\\w-]+|@\\{[\\w-]+\\})",a=[],i=[],r=e=>({className:"string",begin:"~?"+e+".*?"+e}),s=(e,n,t)=>({className:e,begin:n,relevance:t}),l={$pattern:/[a-z-]+/,keyword:"and or not only",attribute:J.join(" ")};i.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,r("'"),r('"'),n.CSS_NUMBER_MODE,{begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]",excludeEnd:!0}},n.HEXCOLOR,{begin:"\\(",end:"\\)",contains:i,keywords:l,relevance:0},s("variable","@@?[\\w-]+",10),s("variable","@\\{[\\w-]+\\}"),s("built_in","~?`[^`]*?`"),{className:"attribute",begin:"[\\w-]+\\s*:",end:":",returnBegin:!0,excludeEnd:!0},n.IMPORTANT,{beginKeywords:"and not"},n.FUNCTION_DISPATCH);let o=i.concat({begin:/\{/,end:/\}/,contains:a}),c={beginKeywords:"when",endsWithParent:!0,contains:[{beginKeywords:"and not"}].concat(i)},d={begin:t+"\\s*:",returnBegin:!0,end:/[;}]/,relevance:0,contains:[{begin:/-(webkit|moz|ms|o)-/},n.CSS_VARIABLE,{className:"attribute",begin:"\\b("+en.join("|")+")\\b",end:/(?=:)/,starts:{endsWithParent:!0,illegal:"[<=$]",relevance:0,contains:i}},]},g={variants:[{begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:t,end:/\{/},],returnBegin:!0,returnEnd:!0,illegal:"[<='$\"]",relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,c,s("keyword","all\\b"),s("variable","@\\{[\\w-]+\\}"),{begin:"\\b("+V.join("|")+")\\b",className:"selector-tag"},n.CSS_NUMBER_MODE,s("selector-tag",t,0),s("selector-id","#"+t),s("selector-class","\\."+t,0),s("selector-tag","&",0),n.ATTRIBUTE_SELECTOR_MODE,{className:"selector-pseudo",begin:":("+Y.join("|")+")"},{className:"selector-pseudo",begin:":(:)?("+ee.join("|")+")"},{begin:/\(/,end:/\)/,relevance:0,contains:o},{begin:"!important"},n.FUNCTION_DISPATCH,]},u={begin:`[\\w-]+:(:)?(${et.join("|")})`,returnBegin:!0,contains:[g]};return a.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"keyword",begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b",starts:{end:"[;{}]",keywords:l,returnEnd:!0,contains:i,relevance:0}},{className:"variable",variants:[{begin:"@[\\w-]+\\s*:",relevance:15},{begin:"@[\\w-]+"},],starts:{end:"[;}]",returnEnd:!0,contains:o}},u,d,g,c,n.FUNCTION_DISPATCH),{name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:a}},grmr_lua(e){let n="\\[=*\\[",t="\\]=*\\]",a={begin:n,end:t,contains:["self"]},i=[e.COMMENT("--(?!\\[=*\\[)","$"),e.COMMENT("--\\[=*\\[",t,{contains:[a],relevance:10}),];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE,literal:"true false nil",keyword:"and break do else elseif end for goto if in local not or repeat return then until while",built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove"},contains:i.concat([{className:"function",beginKeywords:"function",end:"\\)",contains:[e.inherit(e.TITLE_MODE,{begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params",begin:"\\(",endsWithParent:!0,contains:i},].concat(i)},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string",begin:n,end:t,contains:[a],relevance:5},])}},grmr_makefile(e){let n={className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)",contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%`]+/},]},]},]};return{name:"HTML, XML",aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg",],case_insensitive:!0,unicodeRegex:!0,contains:[{className:"meta",begin://,relevance:10,contains:[i,l,s,r,{begin:/\[/,end:/\]/,contains:[{className:"meta",begin://,contains:[i,r,l,s]},]},]},e.COMMENT(//,{relevance:10}),{begin://,relevance:10},a,{className:"meta",end:/\?>/,variants:[{begin:/<\?xml/,relevance:10,contains:[l]},{begin:/<\?[a-z][a-z0-9]+/},]},{className:"tag",begin:/)/,end:/>/,keywords:{name:"style"},contains:[o],starts:{end:/<\/style>/,returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag",begin:/)/,end:/>/,keywords:{name:"script"},contains:[o],starts:{end:/<\/script>/,returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{className:"tag",begin:/<>|<\/>/},{className:"tag",begin:n.concat(//,/>/,/\s/)))),end:/\/?>/,contains:[{className:"name",begin:t,relevance:0,starts:o},]},{className:"tag",begin:n.concat(/<\//,n.lookahead(n.concat(t,/>/))),contains:[{className:"name",begin:t,relevance:0},{begin:/>/,relevance:0,endsParent:!0},]},]}},grmr_markdown(e){let n={begin:/<\/?[A-Za-z_]/,end:">",subLanguage:"xml",relevance:0},t={variants:[{begin:/\[.+?\]\[.*?\]/,relevance:0},{begin:/\[.+?\]\(((data|javascript|mailto):|(?:http|ftp)s?:\/\/).*?\)/,relevance:2},{begin:e.regex.concat(/\[.+?\]\(/,/[A-Za-z][A-Za-z0-9+.-]*/,/:\/\/.*?\)/),relevance:2},{begin:/\[.+?\]\([./?&#].*?\)/,relevance:1},{begin:/\[.*?\]\(.*?\)/,relevance:0},],returnBegin:!0,contains:[{match:/\[(?=\])/},{className:"string",relevance:0,begin:"\\[",end:"\\]",excludeBegin:!0,returnEnd:!0},{className:"link",relevance:0,begin:"\\]\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0},{className:"symbol",relevance:0,begin:"\\]\\[",end:"\\]",excludeBegin:!0,excludeEnd:!0},]},a={className:"strong",contains:[],variants:[{begin:/_{2}(?!\s)/,end:/_{2}/},{begin:/\*{2}(?!\s)/,end:/\*{2}/},]},i={className:"emphasis",contains:[],variants:[{begin:/\*(?![*\s])/,end:/\*/},{begin:/_(?![_\s])/,end:/_/,relevance:0},]},r=e.inherit(a,{contains:[]}),s=e.inherit(i,{contains:[]});a.contains.push(s),i.contains.push(r);let l=[n,t];return[a,i,r,s].forEach(e=>{e.contains=e.contains.concat(l)}),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:l=l.concat(a,i)},{begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n",contains:l},]},]},n,{className:"bullet",begin:"^[ ]*([*+-]|(\\d+\\.))(?=\\s+)",end:"\\s+",excludeEnd:!0},a,i,{className:"quote",begin:"^>\\s+",contains:l,end:"$"},{className:"code",variants:[{begin:"(`{3,})[^`](.|\\n)*?\\1`*[ ]*"},{begin:"(~{3,})[^~](.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))",contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0},]},{begin:"^[-\\*]{3,}",end:"$"},t,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0},]},]}},grmr_objectivec(e){let n=/[a-zA-Z@][a-zA-Z0-9_]*/,t={$pattern:n,keyword:["@interface","@class","@protocol","@implementation"]};return{name:"Objective-C",aliases:["mm","objc","obj-c","obj-c++","objective-c++"],keywords:{"variable.language":["this","super"],$pattern:n,keyword:["while","export","sizeof","typedef","const","struct","for","union","volatile","static","mutable","if","do","return","goto","enum","else","break","extern","asm","case","default","register","explicit","typename","switch","continue","inline","readonly","assign","readwrite","self","@synchronized","id","typeof","nonatomic","IBOutlet","IBAction","strong","weak","copy","in","out","inout","bycopy","byref","oneway","__strong","__weak","__block","__autoreleasing","@private","@protected","@public","@try","@property","@end","@throw","@catch","@finally","@autoreleasepool","@synthesize","@dynamic","@selector","@optional","@required","@encode","@package","@import","@defs","@compatibility_alias","__bridge","__bridge_transfer","__bridge_retained","__bridge_retain","__covariant","__contravariant","__kindof","_Nonnull","_Nullable","_Null_unspecified","__FUNCTION__","__PRETTY_FUNCTION__","__attribute__","getter","setter","retain","unsafe_unretained","nonnull","nullable","null_unspecified","null_resettable","class","instancetype","NS_DESIGNATED_INITIALIZER","NS_UNAVAILABLE","NS_REQUIRES_SUPER","NS_RETURNS_INNER_POINTER","NS_INLINE","NS_AVAILABLE","NS_DEPRECATED","NS_ENUM","NS_OPTIONS","NS_SWIFT_UNAVAILABLE","NS_ASSUME_NONNULL_BEGIN","NS_ASSUME_NONNULL_END","NS_REFINED_FOR_SWIFT","NS_SWIFT_NAME","NS_SWIFT_NOTHROW","NS_DURING","NS_HANDLER","NS_ENDHANDLER","NS_VALUERETURN","NS_VOIDRETURN",],literal:["false","true","FALSE","TRUE","nil","YES","NO","NULL",],built_in:["dispatch_once_t","dispatch_queue_t","dispatch_sync","dispatch_async","dispatch_once",],type:["int","float","char","unsigned","signed","short","long","double","wchar_t","unichar","void","bool","BOOL","id|0","_Bool",]},illegal:"/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,]},{className:"class",begin:"("+t.keyword.join("|")+")\\b",end:/(\{|$)/,excludeEnd:!0,keywords:t,contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"\\."+e.UNDERSCORE_IDENT_RE,relevance:0},]}},grmr_perl(e){let n=e.regex,t=/[dualxmsipngr]{0,12}/,a={$pattern:/[\w.]+/,keyword:"abs accept alarm and atan2 bind binmode bless break caller chdir chmod chomp chop chown chr chroot close closedir connect continue cos crypt dbmclose dbmopen defined delete die do dump each else elsif endgrent endhostent endnetent endprotoent endpwent endservent eof eval exec exists exit exp fcntl fileno flock for foreach fork format formline getc getgrent getgrgid getgrnam gethostbyaddr gethostbyname gethostent getlogin getnetbyaddr getnetbyname getnetent getpeername getpgrp getpriority getprotobyname getprotobynumber getprotoent getpwent getpwnam getpwuid getservbyname getservbyport getservent getsockname getsockopt given glob gmtime goto grep gt hex if index int ioctl join keys kill last lc lcfirst length link listen local localtime log lstat lt ma map mkdir msgctl msgget msgrcv msgsnd my ne next no not oct open opendir or ord our pack package pipe pop pos print printf prototype push q|0 qq quotemeta qw qx rand read readdir readline readlink readpipe recv redo ref rename require reset return reverse rewinddir rindex rmdir say scalar seek seekdir select semctl semget semop send setgrent sethostent setnetent setpgrp setpriority setprotoent setpwent setservent setsockopt shift shmctl shmget shmread shmwrite shutdown sin sleep socket socketpair sort splice split sprintf sqrt srand stat state study sub substr symlink syscall sysopen sysread sysseek system syswrite tell telldir tie tied time times tr truncate uc ucfirst umask undef unless unlink unpack unshift untie until use utime values vec wait waitpid wantarray warn when while write x|0 xor y|0"},i={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:a},r={begin:/->\{/,end:/\}/},s={variants:[{begin:/\$\d/},{begin:n.concat(/[$%@](\^\w\b|#\w+(::\w+)*|\{\w+\}|\w+(::\w*)*)/,"(?![A-Za-z])(?![@$%])")},{begin:/[$%@][^\s\w{]/,relevance:0},]},l=[e.BACKSLASH_ESCAPE,i,s],o=[/!/,/\//,/\|/,/\?/,/'/,/"/,/#/],c=(e,a,i="\\1")=>{let r="\\1"===i?i:n.concat(i,a);return n.concat(n.concat("(?:",e,")"),a,/(?:\\.|[^\\\/])*?/,r,/(?:\\.|[^\\\/])*?/,i,t)},d=(e,a,i)=>n.concat(n.concat("(?:",e,")"),a,/(?:\\.|[^\\\/])*?/,i,t),g=[s,e.HASH_COMMENT_MODE,e.COMMENT(/^=\w/,/=cut/,{endsWithParent:!0}),r,{className:"string",contains:l,variants:[{begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[",end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*<",end:">",relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'",contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`",contains:[e.BACKSLASH_ESCAPE]},{begin:/\{\w+\}/,relevance:0},{begin:"-?\\w+\\s*=>",relevance:0},]},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*",keywords:"split return print reverse grep",relevance:0,contains:[e.HASH_COMMENT_MODE,{className:"regexp",variants:[{begin:c("s|tr|y",n.either(...o,{capture:!0}))},{begin:c("s|tr|y","\\(","\\)")},{begin:c("s|tr|y","\\[","\\]")},{begin:c("s|tr|y","\\{","\\}")},],relevance:2},{className:"regexp",variants:[{begin:/(m|qr)\/\//,relevance:0},{begin:d("(?:m|qr)?",/\//,/\//)},{begin:d("m|qr",n.either(...o,{capture:!0}),/\1/)},{begin:d("m|qr",/\(/,/\)/)},{begin:d("m|qr",/\[/,/\]/)},{begin:d("m|qr",/\{/,/\}/)},]},]},{className:"function",beginKeywords:"sub",end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$",subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}]},];return i.contains=g,r.contains=g,{name:"Perl",aliases:["pl","pm"],keywords:a,contains:g}},grmr_php(e){let n=e.regex,t=/(?![A-Za-z0-9])(?![$])/,a=n.concat(/[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/,t),i=n.concat(/(\\?[A-Z][a-z0-9_\x7f-\xff]+|\\?[A-Z]+(?=[A-Z][a-z0-9_\x7f-\xff])){1,}/,t),r={scope:"variable",match:"\\$+"+a},s={scope:"subst",variants:[{begin:/\$\w+/},{begin:/\{\$/,end:/\}/},]},l=e.inherit(e.APOS_STRING_MODE,{illegal:null}),o="[ \n]",c={scope:"string",variants:[e.inherit(e.QUOTE_STRING_MODE,{illegal:null,contains:e.QUOTE_STRING_MODE.contains.concat(s)}),l,e.END_SAME_AS_BEGIN({begin:/<<<[ \t]*(\w+)\n/,end:/[ \t]*(\w+)\b/,contains:e.QUOTE_STRING_MODE.contains.concat(s)}),]},d={scope:"number",variants:[{begin:"\\b0[bB][01]+(?:_[01]+)*\\b"},{begin:"\\b0[oO][0-7]+(?:_[0-7]+)*\\b"},{begin:"\\b0[xX][\\da-fA-F]+(?:_[\\da-fA-F]+)*\\b"},{begin:"(?:\\b\\d+(?:_\\d+)*(\\.(?:\\d+(?:_\\d+)*))?|\\B\\.\\d+)(?:[eE][+-]?\\d+)?"},],relevance:0},g=["false","null","true"],u=["__CLASS__","__DIR__","__FILE__","__FUNCTION__","__COMPILER_HALT_OFFSET__","__LINE__","__METHOD__","__NAMESPACE__","__TRAIT__","die","echo","exit","include","include_once","print","require","require_once","array","abstract","and","as","binary","bool","boolean","break","callable","case","catch","class","clone","const","continue","declare","default","do","double","else","elseif","empty","enddeclare","endfor","endforeach","endif","endswitch","endwhile","enum","eval","extends","final","finally","float","for","foreach","from","global","goto","if","implements","instanceof","insteadof","int","integer","interface","isset","iterable","list","match|0","mixed","new","never","object","or","private","protected","public","readonly","real","return","string","switch","throw","trait","try","unset","use","var","void","while","xor","yield",],b=["Error|0","AppendIterator","ArgumentCountError","ArithmeticError","ArrayIterator","ArrayObject","AssertionError","BadFunctionCallException","BadMethodCallException","CachingIterator","CallbackFilterIterator","CompileError","Countable","DirectoryIterator","DivisionByZeroError","DomainException","EmptyIterator","ErrorException","Exception","FilesystemIterator","FilterIterator","GlobIterator","InfiniteIterator","InvalidArgumentException","IteratorIterator","LengthException","LimitIterator","LogicException","MultipleIterator","NoRewindIterator","OutOfBoundsException","OutOfRangeException","OuterIterator","OverflowException","ParentIterator","ParseError","RangeException","RecursiveArrayIterator","RecursiveCachingIterator","RecursiveCallbackFilterIterator","RecursiveDirectoryIterator","RecursiveFilterIterator","RecursiveIterator","RecursiveIteratorIterator","RecursiveRegexIterator","RecursiveTreeIterator","RegexIterator","RuntimeException","SeekableIterator","SplDoublyLinkedList","SplFileInfo","SplFileObject","SplFixedArray","SplHeap","SplMaxHeap","SplMinHeap","SplObjectStorage","SplObserver","SplPriorityQueue","SplQueue","SplStack","SplSubject","SplTempFileObject","TypeError","UnderflowException","UnexpectedValueException","UnhandledMatchError","ArrayAccess","BackedEnum","Closure","Fiber","Generator","Iterator","IteratorAggregate","Serializable","Stringable","Throwable","Traversable","UnitEnum","WeakReference","WeakMap","Directory","__PHP_Incomplete_Class","parent","php_user_filter","self","static","stdClass",],m={keyword:u,literal:(e=>{let n=[];return e.forEach(e=>{n.push(e),e.toLowerCase()===e?n.push(e.toUpperCase()):n.push(e.toLowerCase())}),n})(g),built_in:b},p=e=>e.map(e=>e.replace(/\|\d+$/,"")),h={variants:[{match:[/new/,n.concat(o,"+"),n.concat("(?!",p(b).join("\\b|"),"\\b)"),i,],scope:{1:"keyword",4:"title.class"}},]},f=n.concat(a,"\\b(?!\\()"),E={variants:[{match:[n.concat(/::/,n.lookahead(/(?!class\b)/)),f],scope:{2:"variable.constant"}},{match:[/::/,/class/],scope:{2:"variable.language"}},{match:[i,n.concat(/::/,n.lookahead(/(?!class\b)/)),f],scope:{1:"title.class",3:"variable.constant"}},{match:[i,n.concat("::",n.lookahead(/(?!class\b)/))],scope:{1:"title.class"}},{match:[i,/::/,/class/],scope:{1:"title.class",3:"variable.language"}},]},$={scope:"attr",match:n.concat(a,n.lookahead(":"),n.lookahead(/(?!::)/))},y={relevance:0,begin:/\(/,end:/\)/,keywords:m,contains:[$,r,E,e.C_BLOCK_COMMENT_MODE,c,d,h]},N={relevance:0,match:[/\b/,n.concat("(?!fn\\b|function\\b|",p(u).join("\\b|"),"|",p(b).join("\\b|"),"\\b)"),a,n.concat(o,"*"),n.lookahead(/(?=\()/),],scope:{3:"title.function.invoke"},contains:[y]};y.contains.push(N);let w=[$,E,e.C_BLOCK_COMMENT_MODE,c,d,h];return{case_insensitive:!1,keywords:m,contains:[{begin:n.concat(/#\[\s*/,i),beginScope:"meta",end:/]/,endScope:"meta",keywords:{literal:g,keyword:["new","array"]},contains:[{begin:/\[/,end:/]/,keywords:{literal:g,keyword:["new","array"]},contains:["self",...w]},...w,{scope:"meta",match:i},]},e.HASH_COMMENT_MODE,e.COMMENT("//","$"),e.COMMENT("/\\*","\\*/",{contains:[{scope:"doctag",match:"@[A-Za-z]+"},]}),{match:/__halt_compiler\(\);/,keywords:"__halt_compiler",starts:{scope:"comment",end:e.MATCH_NOTHING_RE,contains:[{match:/\?>/,scope:"meta",endsParent:!0}]}},{scope:"meta",variants:[{begin:/<\?php/,relevance:10},{begin:/<\?=/},{begin:/<\?/,relevance:.1},{begin:/\?>/},]},{scope:"variable.language",match:/\$this\b/},r,N,E,{match:[/const/,/\s/,a],scope:{1:"keyword",3:"variable.constant"}},h,{scope:"function",relevance:0,beginKeywords:"fn function",end:/[;{]/,excludeEnd:!0,illegal:"[$%\\[]",contains:[{beginKeywords:"use"},e.UNDERSCORE_TITLE_MODE,{begin:"=>",endsParent:!0},{scope:"params",begin:"\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0,keywords:m,contains:["self",r,E,e.C_BLOCK_COMMENT_MODE,c,d]},]},{scope:"class",variants:[{beginKeywords:"enum",illegal:/[($"]/},{beginKeywords:"class interface trait",illegal:/[:($"]/},],relevance:0,end:/\{/,excludeEnd:!0,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE,]},{beginKeywords:"namespace",relevance:0,end:";",illegal:/[.']/,contains:[e.inherit(e.UNDERSCORE_TITLE_MODE,{scope:"title.class"}),]},{beginKeywords:"use",relevance:0,end:";",contains:[{match:/\b(as|const|function)\b/,scope:"keyword"},e.UNDERSCORE_TITLE_MODE,]},c,d,]}},grmr_php_template:e=>({name:"PHP template",subLanguage:"xml",contains:[{begin:/<\?(php|=)?/,end:/\?>/,subLanguage:"php",contains:[{begin:"/\\*",end:"\\*/",skip:!0},{begin:'b"',end:'"',skip:!0},{begin:"b'",end:"'",skip:!0},e.inherit(e.APOS_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),e.inherit(e.QUOTE_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),]},]}),grmr_plaintext:e=>({name:"Plain text",aliases:["text","txt"],disableAutodetect:!0}),grmr_python(e){let n=e.regex,t=/[\p{XID_Start}_]\p{XID_Continue}*/u,a=["and","as","assert","async","await","break","case","class","continue","def","del","elif","else","except","finally","for","from","global","if","import","in","is","lambda","match","nonlocal|10","not","or","pass","raise","return","try","while","with","yield",],i={$pattern:/[A-Za-z]\w+|__\w+__/,keyword:a,built_in:["__import__","abs","all","any","ascii","bin","bool","breakpoint","bytearray","bytes","callable","chr","classmethod","compile","complex","delattr","dict","dir","divmod","enumerate","eval","exec","filter","float","format","frozenset","getattr","globals","hasattr","hash","help","hex","id","input","int","isinstance","issubclass","iter","len","list","locals","map","max","memoryview","min","next","object","oct","open","ord","pow","print","property","range","repr","reversed","round","set","setattr","slice","sorted","staticmethod","str","sum","super","tuple","type","vars","zip",],literal:["__debug__","Ellipsis","False","None","NotImplemented","True",],type:["Any","Callable","Coroutine","Dict","List","Literal","Generic","Optional","Sequence","Set","Tuple","Type","Union",]},r={className:"meta",begin:/^(>>>|\.\.\.) /},s={className:"subst",begin:/\{/,end:/\}/,keywords:i,illegal:/#/},l={begin:/\{\{/,relevance:0},o={className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,r],relevance:10},{begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,r],relevance:10},{begin:/([fF][rR]|[rR][fF]|[fF])'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,r,l,s]},{begin:/([fF][rR]|[rR][fF]|[fF])"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,r,l,s]},{begin:/([uU]|[rR])'/,end:/'/,relevance:10},{begin:/([uU]|[rR])"/,end:/"/,relevance:10},{begin:/([bB]|[bB][rR]|[rR][bB])'/,end:/'/},{begin:/([bB]|[bB][rR]|[rR][bB])"/,end:/"/},{begin:/([fF][rR]|[rR][fF]|[fF])'/,end:/'/,contains:[e.BACKSLASH_ESCAPE,l,s]},{begin:/([fF][rR]|[rR][fF]|[fF])"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,l,s]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,]},c="[0-9](_?[0-9])*",d=`(\\b(${c}))?\\.(${c})|\\b(${c})\\.`,g="\\b|"+a.join("|"),u={className:"number",relevance:0,variants:[{begin:`(\\b(${c})|(${d}))[eE][+-]?(${c})[jJ]?(?=${g})`},{begin:`(${d})[jJ]?`},{begin:`\\b([1-9](_?[0-9])*|0+(_?0)*)[lLjJ]?(?=${g})`},{begin:`\\b0[bB](_?[01])+[lL]?(?=${g})`},{begin:`\\b0[oO](_?[0-7])+[lL]?(?=${g})`},{begin:`\\b0[xX](_?[0-9a-fA-F])+[lL]?(?=${g})`},{begin:`\\b(${c})[jJ](?=${g})`},]},b={className:"comment",begin:n.lookahead(/# type:/),end:/$/,keywords:i,contains:[{begin:/# type:/},{begin:/#/,end:/\b\B/,endsWithParent:!0},]},m={className:"params",variants:[{className:"",begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:i,contains:["self",r,u,o,e.HASH_COMMENT_MODE]},]};return s.contains=[o,u,r],{name:"Python",aliases:["py","gyp","ipython"],unicodeRegex:!0,keywords:i,illegal:/(<\/|->|\?)|=>/,contains:[r,u,{begin:/\bself\b/},{beginKeywords:"if",relevance:0},o,b,e.HASH_COMMENT_MODE,{match:[/\bdef/,/\s+/,t],scope:{1:"keyword",3:"title.function"},contains:[m]},{variants:[{match:[/\bclass/,/\s+/,t,/\s*/,/\(\s*/,t,/\s*\)/]},{match:[/\bclass/,/\s+/,t]},],scope:{1:"keyword",3:"title.class",6:"title.class.inherited"}},{className:"meta",begin:/^[\t ]*@/,end:/(?=#)|$/,contains:[u,m,o]},]}},grmr_python_repl:e=>({aliases:["pycon"],contains:[{className:"meta.prompt",starts:{end:/ |$/,starts:{end:"$",subLanguage:"python"}},variants:[{begin:/^>>>(?=[ ]|$)/},{begin:/^\.\.\.(?=[ ]|$)/},]},]}),grmr_r(e){let n=e.regex,t=/(?:(?:[a-zA-Z]|\.[._a-zA-Z])[._a-zA-Z0-9]*)|\.(?!\d)/,a=n.either(/0[xX][0-9a-fA-F]+\.[0-9a-fA-F]*[pP][+-]?\d+i?/,/0[xX][0-9a-fA-F]+(?:[pP][+-]?\d+)?[Li]?/,/(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?[Li]?/),i=/[=!<>:]=|\|\||&&|:::?|<-|<<-|->>|->|\|>|[-+*\/?!$&|:<=>@^~]|\*\*/,r=n.either(/[()]/,/[{}]/,/\[\[/,/[[\]]/,/\\/,/,/);return{name:"R",keywords:{$pattern:t,keyword:"function if in break next repeat else for while",literal:"NULL NA TRUE FALSE Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10",built_in:"LETTERS letters month.abb month.name pi T F abs acos acosh all any anyNA Arg as.call as.character as.complex as.double as.environment as.integer as.logical as.null.default as.numeric as.raw asin asinh atan atanh attr attributes baseenv browser c call ceiling class Conj cos cosh cospi cummax cummin cumprod cumsum digamma dim dimnames emptyenv exp expression floor forceAndCall gamma gc.time globalenv Im interactive invisible is.array is.atomic is.call is.character is.complex is.double is.environment is.expression is.finite is.function is.infinite is.integer is.language is.list is.logical is.matrix is.na is.name is.nan is.null is.numeric is.object is.pairlist is.raw is.recursive is.single is.symbol lazyLoadDBfetch length lgamma list log max min missing Mod names nargs nzchar oldClass on.exit pos.to.env proc.time prod quote range Re rep retracemem return round seq_along seq_len seq.int sign signif sin sinh sinpi sqrt standardGeneric substitute sum switch tan tanh tanpi tracemem trigamma trunc unclass untracemem UseMethod xtfrm"},contains:[e.COMMENT(/#'/,/$/,{contains:[{scope:"doctag",match:/@examples/,starts:{end:n.lookahead(n.either(/\n^#'\s*(?=@[a-zA-Z]+)/,/\n^(?!#')/)),endsParent:!0}},{scope:"doctag",begin:"@param",end:/$/,contains:[{scope:"variable",variants:[{match:t},{match:/`(?:\\.|[^`\\])+`/}],endsParent:!0},]},{scope:"doctag",match:/@[a-zA-Z]+/},{scope:"keyword",match:/\\[a-zA-Z]+/},]}),e.HASH_COMMENT_MODE,{scope:"string",contains:[e.BACKSLASH_ESCAPE],variants:[e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\(/,end:/\)(-*)"/}),e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\{/,end:/\}(-*)"/}),e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\[/,end:/\](-*)"/}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\(/,end:/\)(-*)'/}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\{/,end:/\}(-*)'/}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\[/,end:/\](-*)'/}),{begin:'"',end:'"',relevance:0},{begin:"'",end:"'",relevance:0},]},{relevance:0,variants:[{scope:{1:"operator",2:"number"},match:[i,a]},{scope:{1:"operator",2:"number"},match:[/%[^%]*%/,a]},{scope:{1:"punctuation",2:"number"},match:[r,a]},{scope:{2:"number"},match:[/[^a-zA-Z0-9._]|^/,a]},]},{scope:{3:"operator"},match:[t,/\s+/,/<-/,/\s+/]},{scope:"operator",relevance:0,variants:[{match:i},{match:/%[^%]*%/},]},{scope:"punctuation",relevance:0,match:r},{begin:"`",end:"`",contains:[{begin:/\\./}]},]}},grmr_ruby(e){let n=e.regex,t="([a-zA-Z_]\\w*[!?=]?|[-+~]@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?)",a=n.either(/\b([A-Z]+[a-z0-9]+)+/,/\b([A-Z]+[a-z0-9]+)+[A-Z]+/),i=n.concat(a,/(::\w+)*/),r={"variable.constant":["__FILE__","__LINE__","__ENCODING__"],"variable.language":["self","super"],keyword:["alias","and","begin","BEGIN","break","case","class","defined","do","else","elsif","end","END","ensure","for","if","in","module","next","not","or","redo","require","rescue","retry","return","then","undef","unless","until","when","while","yield","include","extend","prepend","public","private","protected","raise","throw",],built_in:["proc","lambda","attr_accessor","attr_reader","attr_writer","define_method","private_constant","module_function",],literal:["true","false","nil"]},s={className:"doctag",begin:"@[A-Za-z]+"},l={begin:"#<",end:">"},o=[e.COMMENT("#","$",{contains:[s]}),e.COMMENT("^=begin","^=end",{contains:[s],relevance:10}),e.COMMENT("^__END__",e.MATCH_NOTHING_RE),],c={className:"subst",begin:/#\{/,end:/\}/,keywords:r},d={className:"string",contains:[e.BACKSLASH_ESCAPE,c],variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{begin:/%[qQwWx]?\(/,end:/\)/},{begin:/%[qQwWx]?\[/,end:/\]/},{begin:/%[qQwWx]?\{/,end:/\}/},{begin:/%[qQwWx]?/},{begin:/%[qQwWx]?\//,end:/\//},{begin:/%[qQwWx]?%/,end:/%/},{begin:/%[qQwWx]?-/,end:/-/},{begin:/%[qQwWx]?\|/,end:/\|/},{begin:/\B\?(\\\d{1,3})/},{begin:/\B\?(\\x[A-Fa-f0-9]{1,2})/},{begin:/\B\?(\\u\{?[A-Fa-f0-9]{1,6}\}?)/},{begin:/\B\?(\\M-\\C-|\\M-\\c|\\c\\M-|\\M-|\\C-\\M-)[\x20-\x7e]/},{begin:/\B\?\\(c|C-)[\x20-\x7e]/},{begin:/\B\?\\?\S/},{begin:n.concat(/<<[-~]?'?/,n.lookahead(/(\w+)(?=\W)[^\n]*\n(?:[^\n]*\n)*?\s*\1\b/)),contains:[e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,contains:[e.BACKSLASH_ESCAPE,c]}),]},]},g="[0-9](_?[0-9])*",u={className:"number",relevance:0,variants:[{begin:`\\b([1-9](_?[0-9])*|0)(\\.(${g}))?([eE][+-]?(${g})|r)?i?\\b`},{begin:"\\b0[dD][0-9](_?[0-9])*r?i?\\b"},{begin:"\\b0[bB][0-1](_?[0-1])*r?i?\\b"},{begin:"\\b0[oO][0-7](_?[0-7])*r?i?\\b"},{begin:"\\b0[xX][0-9a-fA-F](_?[0-9a-fA-F])*r?i?\\b"},{begin:"\\b0(_?[0-7])+r?i?\\b"},]},b={variants:[{match:/\(\)/},{className:"params",begin:/\(/,end:/(?=\))/,excludeBegin:!0,endsParent:!0,keywords:r},]},m=[d,{variants:[{match:[/class\s+/,i,/\s+<\s+/,i]},{match:[/\b(class|module)\s+/,i]},],scope:{2:"title.class",4:"title.class.inherited"},keywords:r},{match:[/(include|extend)\s+/,i],scope:{2:"title.class"},keywords:r},{relevance:0,match:[i,/\.new[. (]/],scope:{1:"title.class"}},{relevance:0,match:/\b[A-Z][A-Z_0-9]+\b/,className:"variable.constant"},{relevance:0,match:a,scope:"title.class"},{match:[/def/,/\s+/,t],scope:{1:"keyword",3:"title.function"},contains:[b]},{begin:e.IDENT_RE+"::"},{className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"(!|\\?)?:",relevance:0},{className:"symbol",begin:":(?!\\s)",contains:[d,{begin:t}],relevance:0},u,{className:"variable",begin:"(\\$\\W)|((\\$|@@?)(\\w+))(?=[^@$?])(?![A-Za-z])(?![@$?'])"},{className:"params",begin:/\|/,end:/\|/,excludeBegin:!0,excludeEnd:!0,relevance:0,keywords:r},{begin:"("+e.RE_STARTERS_RE+"|unless)\\s*",keywords:"unless",contains:[{className:"regexp",contains:[e.BACKSLASH_ESCAPE,c],illegal:/\n/,variants:[{begin:"/",end:"/[a-z]*"},{begin:/%r\{/,end:/\}[a-z]*/},{begin:"%r\\(",end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[",end:"\\][a-z]*"},]},].concat(l,o),relevance:0},].concat(l,o);return c.contains=m,b.contains=m,o.unshift(l),{name:"Ruby",aliases:["rb","gemspec","podspec","thor","irb"],keywords:r,illegal:/\/\*/,contains:[e.SHEBANG({binary:"ruby"})].concat([{begin:/^\s*=>/,starts:{end:"$",contains:m}},{className:"meta.prompt",begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+[>*]|(\\w+-)?\\d+\\.\\d+\\.\\d+(p\\d+)?[^\\d][^>]+>)(?=[ ])",starts:{end:"$",keywords:r,contains:m}},]).concat(o).concat(m)}},grmr_rust(e){let n=e.regex,t={className:"title.function.invoke",relevance:0,begin:n.concat(/\b/,/(?!let\b)/,e.IDENT_RE,n.lookahead(/\s*\(/))},a="([ui](8|16|32|64|128|size)|f(32|64))?",i=["drop ","Copy","Send","Sized","Sync","Drop","Fn","FnMut","FnOnce","ToOwned","Clone","Debug","PartialEq","PartialOrd","Eq","Ord","AsRef","AsMut","Into","From","Default","Iterator","Extend","IntoIterator","DoubleEndedIterator","ExactSizeIterator","SliceConcatExt","ToString","assert!","assert_eq!","bitflags!","bytes!","cfg!","col!","concat!","concat_idents!","debug_assert!","debug_assert_eq!","env!","panic!","file!","format!","format_args!","include_bytes!","include_str!","line!","local_data_key!","module_path!","option_env!","print!","println!","select!","stringify!","try!","unimplemented!","unreachable!","vec!","write!","writeln!","macro_rules!","assert_ne!","debug_assert_ne!",],r=["i8","i16","i32","i64","i128","isize","u8","u16","u32","u64","u128","usize","f32","f64","str","char","bool","Box","Option","Result","String","Vec",];return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?",type:r,keyword:["abstract","as","async","await","become","box","break","const","continue","crate","do","dyn","else","enum","extern","false","final","fn","for","if","impl","in","let","loop","macro","match","mod","move","mut","override","priv","pub","ref","return","self","Self","static","struct","super","trait","true","try","type","typeof","unsafe","unsized","use","virtual","where","while","yield",],literal:["true","false","Some","None","Ok","Err"],built_in:i},illegal:""},t,]}},grmr_scss(e){let n=X(e),t="@[a-z-]+",a={className:"variable",begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b",relevance:0};return{name:"SCSS",case_insensitive:!0,illegal:"[=/|']",contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,n.CSS_NUMBER_MODE,{className:"selector-id",begin:"#[A-Za-z0-9_-]+",relevance:0},{className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0},n.ATTRIBUTE_SELECTOR_MODE,{className:"selector-tag",begin:"\\b("+V.join("|")+")\\b",relevance:0},{className:"selector-pseudo",begin:":("+Y.join("|")+")"},{className:"selector-pseudo",begin:":(:)?("+ee.join("|")+")"},a,{begin:/\(/,end:/\)/,contains:[n.CSS_NUMBER_MODE]},n.CSS_VARIABLE,{className:"attribute",begin:"\\b("+en.join("|")+")\\b"},{begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b"},{begin:/:/,end:/[;}{]/,relevance:0,contains:[n.BLOCK_COMMENT,a,n.HEXCOLOR,n.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,n.IMPORTANT,n.FUNCTION_DISPATCH,]},{begin:"@(page|font-face)",keywords:{$pattern:t,keyword:"@page @font-face"}},{begin:"@",end:"[{;]",returnBegin:!0,keywords:{$pattern:/[a-z-]+/,keyword:"and or not only",attribute:J.join(" ")},contains:[{begin:t,className:"keyword"},{begin:/[a-z-]+(?=:)/,className:"attribute"},a,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,n.HEXCOLOR,n.CSS_NUMBER_MODE,]},n.FUNCTION_DISPATCH,]}},grmr_shell:e=>({name:"Shell Session",aliases:["console","shellsession"],contains:[{className:"meta.prompt",begin:/^\s{0,3}[/~\w\d[\]()@-]*[>%$#][ ]?/,starts:{end:/[^\\](?=\s*$)/,subLanguage:"bash"}},]}),grmr_sql(e){let n=e.regex,t=e.COMMENT("--","$"),a=["true","false","unknown"],i=["bigint","binary","blob","boolean","char","character","clob","date","dec","decfloat","decimal","float","int","integer","interval","nchar","nclob","national","numeric","real","row","smallint","time","timestamp","varchar","varying","varbinary",],r=["abs","acos","array_agg","asin","atan","avg","cast","ceil","ceiling","coalesce","corr","cos","cosh","count","covar_pop","covar_samp","cume_dist","dense_rank","deref","element","exp","extract","first_value","floor","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","last_value","lead","listagg","ln","log","log10","lower","max","min","mod","nth_value","ntile","nullif","percent_rank","percentile_cont","percentile_disc","position","position_regex","power","rank","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","row_number","sin","sinh","sqrt","stddev_pop","stddev_samp","substring","substring_regex","sum","tan","tanh","translate","translate_regex","treat","trim","trim_array","unnest","upper","value_of","var_pop","var_samp","width_bucket",],s=["create table","insert into","primary key","foreign key","not null","alter table","add constraint","grouping sets","on overflow","character set","respect nulls","ignore nulls","nulls first","nulls last","depth first","breadth first",],l=r,o=["abs","acos","all","allocate","alter","and","any","are","array","array_agg","array_max_cardinality","as","asensitive","asin","asymmetric","at","atan","atomic","authorization","avg","begin","begin_frame","begin_partition","between","bigint","binary","blob","boolean","both","by","call","called","cardinality","cascaded","case","cast","ceil","ceiling","char","char_length","character","character_length","check","classifier","clob","close","coalesce","collate","collect","column","commit","condition","connect","constraint","contains","convert","copy","corr","corresponding","cos","cosh","count","covar_pop","covar_samp","create","cross","cube","cume_dist","current","current_catalog","current_date","current_default_transform_group","current_path","current_role","current_row","current_schema","current_time","current_timestamp","current_path","current_role","current_transform_group_for_type","current_user","cursor","cycle","date","day","deallocate","dec","decimal","decfloat","declare","default","define","delete","dense_rank","deref","describe","deterministic","disconnect","distinct","double","drop","dynamic","each","element","else","empty","end","end_frame","end_partition","end-exec","equals","escape","every","except","exec","execute","exists","exp","external","extract","false","fetch","filter","first_value","float","floor","for","foreign","frame_row","free","from","full","function","fusion","get","global","grant","group","grouping","groups","having","hold","hour","identity","in","indicator","initial","inner","inout","insensitive","insert","int","integer","intersect","intersection","interval","into","is","join","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","language","large","last_value","lateral","lead","leading","left","like","like_regex","listagg","ln","local","localtime","localtimestamp","log","log10","lower","match","match_number","match_recognize","matches","max","member","merge","method","min","minute","mod","modifies","module","month","multiset","national","natural","nchar","nclob","new","no","none","normalize","not","nth_value","ntile","null","nullif","numeric","octet_length","occurrences_regex","of","offset","old","omit","on","one","only","open","or","order","out","outer","over","overlaps","overlay","parameter","partition","pattern","per","percent","percent_rank","percentile_cont","percentile_disc","period","portion","position","position_regex","power","precedes","precision","prepare","primary","procedure","ptf","range","rank","reads","real","recursive","ref","references","referencing","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","release","result","return","returns","revoke","right","rollback","rollup","row","row_number","rows","running","savepoint","scope","scroll","search","second","seek","select","sensitive","session_user","set","show","similar","sin","sinh","skip","smallint","some","specific","specifictype","sql","sqlexception","sqlstate","sqlwarning","sqrt","start","static","stddev_pop","stddev_samp","submultiset","subset","substring","substring_regex","succeeds","sum","symmetric","system","system_time","system_user","table","tablesample","tan","tanh","then","time","timestamp","timezone_hour","timezone_minute","to","trailing","translate","translate_regex","translation","treat","trigger","trim","trim_array","true","truncate","uescape","union","unique","unknown","unnest","update","upper","user","using","value","values","value_of","var_pop","var_samp","varbinary","varchar","varying","versioning","when","whenever","where","width_bucket","window","with","within","without","year","add","asc","collation","desc","final","first","last","view",].filter(e=>!r.includes(e)),c={begin:n.concat(/\b/,n.either(...l),/\s*\(/),relevance:0,keywords:{built_in:l}};return{name:"SQL",case_insensitive:!0,illegal:/[{}]|<\//,keywords:{$pattern:/\b[\w\.]+/,keyword:((e,{exceptions:n,when:t}={})=>{let a=t;return n=n||[],e.map(e=>e.match(/\|\d+$/)||n.includes(e)?e:a(e)?e+"|0":e)})(o,{when:e=>e.length<3}),literal:a,type:i,built_in:["current_catalog","current_date","current_default_transform_group","current_path","current_role","current_schema","current_transform_group_for_type","current_user","session_user","system_time","system_user","current_time","localtime","current_timestamp","localtimestamp",]},contains:[{begin:n.either(...s),relevance:0,keywords:{$pattern:/[\w\.]+/,keyword:o.concat(s),literal:a,type:i}},{className:"type",begin:n.either("double precision","large object","with timezone","without timezone")},c,{className:"variable",begin:/@[a-z0-9]+/},{className:"string",variants:[{begin:/'/,end:/'/,contains:[{begin:/''/}]},]},{begin:/"/,end:/"/,contains:[{begin:/""/},]},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,t,{className:"operator",begin:/[-+*/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?/,relevance:0},]}},grmr_swift(e){let n={match:/\s+/,relevance:0},t=e.COMMENT("/\\*","\\*/",{contains:["self"]}),a=[e.C_LINE_COMMENT_MODE,t],i={match:[/\./,p(...e8,...eh)],className:{2:"keyword"}},r={match:m(/\./,p(...eE)),relevance:0},s=eE.filter(e=>"string"==typeof e).concat(["_|0"]),l={variants:[{className:"keyword",match:p(...eE.filter(e=>"string"!=typeof e).concat(ef).map(ep),...eh)},]},o={$pattern:p(/\b\w+/,/#\w+/),keyword:s.concat(eN),literal:e$},c=[i,r,l],d=[{match:m(/\./,p(...ew)),relevance:0},{className:"built_in",match:m(/\b/,p(...ew),/(?=\()/)},],u={match:/->/,relevance:0},b=[u,{className:"operator",relevance:0,variants:[{match:ek},{match:`\\.(\\.|${ex})+`}]},],h="([0-9a-fA-F]_*)+",f={className:"number",relevance:0,variants:[{match:"\\b(([0-9]_*)+)(\\.(([0-9]_*)+))?([eE][+-]?(([0-9]_*)+))?\\b"},{match:`\\b0x(${h})(\\.(${h}))?([pP][+-]?(([0-9]_*)+))?\\b`},{match:/\b0o([0-7]_*)+\b/},{match:/\b0b([01]_*)+\b/},]},E=(e="")=>({className:"subst",variants:[{match:m(/\\/,e,/[0\\tnr"']/)},{match:m(/\\/,e,/u\{[0-9a-fA-F]{1,8}\}/)},]}),$=(e="")=>({className:"subst",match:m(/\\/,e,/[\t ]*(?:[\r\n]|\r\n)/)}),y=(e="")=>({className:"subst",label:"interpol",begin:m(/\\/,e,/\(/),end:/\)/}),N=(e="")=>({begin:m(e,/"""/),end:m(/"""/,e),contains:[E(e),$(e),y(e)]}),w=(e="")=>({begin:m(e,/"/),end:m(/"/,e),contains:[E(e),y(e)]}),v={className:"string",variants:[N(),N("#"),N("##"),N("###"),w(),w("#"),w("##"),w("###"),]},x={match:m(/`/,eS,/`/)},k=[x,{className:"variable",match:/\$\d+/},{className:"variable",match:`\\$${eO}+`},],M=[{match:/(@|#(un)?)available/,className:"keyword",starts:{contains:[{begin:/\(/,end:/\)/,keywords:eT,contains:[...b,f,v]},]}},{className:"keyword",match:m(/@/,p(...eC))},{className:"meta",match:m(/@/,eS)},],O={match:g(/\b[A-Z]/),relevance:0,contains:[{className:"type",match:m(/(AV|CA|CF|CG|CI|CL|CM|CN|CT|MK|MP|MTK|MTL|NS|SCN|SK|UI|WK|XC)/,eO,"+")},{className:"type",match:eA,relevance:0},{match:/[?!]+/,relevance:0},{match:/\.\.\./,relevance:0},{match:m(/\s+&\s+/,g(eA)),relevance:0},]};O.contains.push({begin://,keywords:o,contains:[...a,...c,...M,u,O]});let S={begin:/\(/,end:/\)/,relevance:0,keywords:o,contains:["self",{match:m(eS,/\s*:/),keywords:"_|0",relevance:0},...a,...c,...d,...b,f,v,...k,...M,O,]},A={begin://,contains:[...a,O]},C={begin:/\(/,end:/\)/,keywords:o,contains:[{begin:p(g(m(eS,/\s*:/)),g(m(eS,/\s+/,eS,/\s*:/))),end:/:/,relevance:0,contains:[{className:"keyword",match:/\b_\b/},{className:"params",match:eS},]},...a,...c,...b,f,v,...M,O,S,],endsParent:!0,illegal:/["']/},T={match:[/func/,/\s+/,p(x.match,eS,ek)],className:{1:"keyword",3:"title.function"},contains:[A,C,n],illegal:[/\[/,/%/]};for(let R of v.variants){let D=R.contains.find(e=>"interpol"===e.label);D.keywords=o;let I=[...c,...d,...b,f,v,...k];D.contains=[...I,{begin:/\(/,end:/\)/,contains:["self",...I]},]}return{name:"Swift",keywords:o,contains:[...a,T,{match:[/\b(?:subscript|init[?!]?)/,/\s*(?=[<(])/],className:{1:"keyword"},contains:[A,C,n],illegal:/\[|%/},{beginKeywords:"struct protocol class extension enum actor",end:"\\{",excludeEnd:!0,keywords:o,contains:[e.inherit(e.TITLE_MODE,{className:"title.class",begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/}),...c,]},{match:[/operator/,/\s+/,ek],className:{1:"keyword",3:"title"}},{begin:[/precedencegroup/,/\s+/,eA],className:{1:"keyword",3:"title"},contains:[O],keywords:[...ey,...e$],end:/}/},{beginKeywords:"import",end:/$/,contains:[...a],relevance:0},...c,...d,...b,f,v,...k,...M,O,S,]}},grmr_typescript(e){let n=em(e),t=["any","void","number","boolean","string","object","never","symbol","bigint","unknown",],a={beginKeywords:"namespace",end:/\{/,excludeEnd:!0,contains:[n.exports.CLASS_REFERENCE]},i={beginKeywords:"interface",end:/\{/,excludeEnd:!0,keywords:{keyword:"interface extends",built_in:t},contains:[n.exports.CLASS_REFERENCE]},r={$pattern:es,keyword:el.concat(["type","namespace","interface","public","private","protected","implements","declare","abstract","readonly","enum","override",]),literal:eo,built_in:eb.concat(t),"variable.language":eu},s={className:"meta",begin:"@[A-Za-z$_][0-9A-Za-z$_]*"},l=(e,n,t)=>{let a=e.contains.findIndex(e=>e.label===n);if(-1===a)throw Error("can not find mode to replace");e.contains.splice(a,1,t)};return Object.assign(n.keywords,r),n.exports.PARAMS_CONTAINS.push(s),n.contains=n.contains.concat([s,a,i]),l(n,"shebang",e.SHEBANG()),l(n,"use_strict",{className:"meta",relevance:10,begin:/^\s*['"]use strict['"]/}),n.contains.find(e=>"func.def"===e.label).relevance=0,Object.assign(n,{name:"TypeScript",aliases:["ts","tsx"]}),n},grmr_vbnet(e){let n=e.regex,t=/\d{1,2}\/\d{1,2}\/\d{4}/,a=/\d{4}-\d{1,2}-\d{1,2}/,i=/(\d|1[012])(:\d+){0,2} *(AM|PM)/,r=/\d{1,2}(:\d{1,2}){1,2}/,s={className:"literal",variants:[{begin:n.concat(/# */,n.either(a,t),/ *#/)},{begin:n.concat(/# */,r,/ *#/)},{begin:n.concat(/# */,i,/ *#/)},{begin:n.concat(/# */,n.either(a,t),/ +/,n.either(i,r),/ *#/)},]},l=e.COMMENT(/'''/,/$/,{contains:[{className:"doctag",begin:/<\/?/,end:/>/}]}),o=e.COMMENT(null,/$/,{variants:[{begin:/'/},{begin:/([\t ]|^)REM(?=\s)/}]});return{name:"Visual Basic .NET",aliases:["vb"],case_insensitive:!0,classNameAliases:{label:"symbol"},keywords:{keyword:"addhandler alias aggregate ansi as async assembly auto binary by byref byval call case catch class compare const continue custom declare default delegate dim distinct do each equals else elseif end enum erase error event exit explicit finally for friend from function get global goto group handles if implements imports in inherits interface into iterator join key let lib loop me mid module mustinherit mustoverride mybase myclass namespace narrowing new next notinheritable notoverridable of off on operator option optional order overloads overridable overrides paramarray partial preserve private property protected public raiseevent readonly redim removehandler resume return select set shadows shared skip static step stop structure strict sub synclock take text then throw to try unicode until using when where while widening with withevents writeonly yield",built_in:"addressof and andalso await directcast gettype getxmlnamespace is isfalse isnot istrue like mod nameof new not or orelse trycast typeof xor cbool cbyte cchar cdate cdbl cdec cint clng cobj csbyte cshort csng cstr cuint culng cushort",type:"boolean byte char date decimal double integer long object sbyte short single string uinteger ulong ushort",literal:"true false nothing"},illegal:"//|\\{|\\}|endif|gosub|variant|wend|^\\$ ",contains:[{className:"string",begin:/"(""|[^/n])"C\b/},{className:"string",begin:/"/,end:/"/,illegal:/\n/,contains:[{begin:/""/}]},s,{className:"number",relevance:0,variants:[{begin:/\b\d[\d_]*((\.[\d_]+(E[+-]?[\d_]+)?)|(E[+-]?[\d_]+))[RFD@!#]?/},{begin:/\b\d[\d_]*((U?[SIL])|[%&])?/},{begin:/&H[\dA-F_]+((U?[SIL])|[%&])?/},{begin:/&O[0-7_]+((U?[SIL])|[%&])?/},{begin:/&B[01_]+((U?[SIL])|[%&])?/},]},{className:"label",begin:/^\w+:/},l,o,{className:"meta",begin:/[\t ]*#(const|disable|else|elseif|enable|end|externalsource|if|region)\b/,end:/$/,keywords:{keyword:"const disable else elseif enable end externalsource if region then"},contains:[o]},]}},grmr_wasm(e){e.regex;let n=e.COMMENT(/\(;/,/;\)/);return n.contains.push("self"),{name:"WebAssembly",keywords:{$pattern:/[\w.]+/,keyword:["anyfunc","block","br","br_if","br_table","call","call_indirect","data","drop","elem","else","end","export","func","global.get","global.set","local.get","local.set","local.tee","get_global","get_local","global","if","import","local","loop","memory","memory.grow","memory.size","module","mut","nop","offset","param","result","return","select","set_global","set_local","start","table","tee_local","then","type","unreachable",]},contains:[e.COMMENT(/;;/,/$/),n,{match:[/(?:offset|align)/,/\s*/,/=/],className:{1:"keyword",3:"operator"}},{className:"variable",begin:/\$[\w_]+/},{match:/(\((?!;)|\))+/,className:"punctuation",relevance:0},{begin:[/(?:func|call|call_indirect)/,/\s+/,/\$[^\s)]+/],className:{1:"keyword",3:"title.function"}},e.QUOTE_STRING_MODE,{match:/(i32|i64|f32|f64)(?!\.)/,className:"type"},{className:"keyword",match:/\b(f32|f64|i32|i64)(?:\.(?:abs|add|and|ceil|clz|const|convert_[su]\/i(?:32|64)|copysign|ctz|demote\/f64|div(?:_[su])?|eqz?|extend_[su]\/i32|floor|ge(?:_[su])?|gt(?:_[su])?|le(?:_[su])?|load(?:(?:8|16|32)_[su])?|lt(?:_[su])?|max|min|mul|nearest|neg?|or|popcnt|promote\/f32|reinterpret\/[fi](?:32|64)|rem_[su]|rot[lr]|shl|shr_[su]|store(?:8|16|32)?|sqrt|sub|trunc(?:_[su]\/f(?:32|64))?|wrap\/i64|xor))\b/},{className:"number",relevance:0,match:/[+-]?\b(?:\d(?:_?\d)*(?:\.\d(?:_?\d)*)?(?:[eE][+-]?\d(?:_?\d)*)?|0x[\da-fA-F](?:_?[\da-fA-F])*(?:\.[\da-fA-F](?:_?[\da-fA-D])*)?(?:[pP][+-]?\d(?:_?\d)*)?)\b|\binf\b|\bnan(?::0x[\da-fA-F](?:_?[\da-fA-D])*)?\b/},]}},grmr_yaml(e){let n="true false yes no null",t="[\\w#;/?:@&=+$,.~*'()[\\]]+",a={className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/\S+/},],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable",variants:[{begin:/\{\{/,end:/\}\}/},{begin:/%\{/,end:/\}/},]},]},i=e.inherit(a,{variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/},]}),r={end:",",endsWithParent:!0,excludeEnd:!0,keywords:n,relevance:0},s=[{className:"attr",variants:[{begin:"\\w[\\w :\\/.-]*:(?=[ ]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ ]|$)'},{begin:"'\\w[\\w :\\/.-]*':(?=[ ]|$)"},]},{className:"meta",begin:"^---\\s*$",relevance:10},{className:"string",begin:"[\\|>]([1-9]?[+-])?[ ]*\\n( +)[^ ][^\\n]*\\n(\\2[^\\n]+\\n?)*"},{begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:"!\\w+!"+t},{className:"type",begin:"!<"+t+">"},{className:"type",begin:"!"+t},{className:"type",begin:"!!"+t},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta",begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"-(?=[ ]|$)",relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{className:"number",begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b"},{className:"number",begin:e.C_NUMBER_RE+"\\b",relevance:0},{begin:/\{/,end:/\}/,contains:[r],illegal:"\\n",relevance:0},{begin:"\\[",end:"\\]",contains:[r],illegal:"\\n",relevance:0},a,],l=[...s];return l.pop(),l.push(i),r.contains=l,{name:"YAML",case_insensitive:!0,aliases:["yml"],contains:s}}});let eD=Q;for(let eI of Object.keys(eR)){let eL=eI.replace("grmr_","").replace("_","-");eD.registerLanguage(eL,eR[eI])}return eD}();"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); \ No newline at end of file diff --git a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Bing.py b/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Bing.py deleted file mode 100644 index 87e04ac82293c7e22068af431ac407bdee435a1b..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Bing.py +++ /dev/null @@ -1,349 +0,0 @@ -import os -import json -import random -import json -import os -import uuid -import ssl -import certifi -import aiohttp -import asyncio - -import requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://bing.com/chat' -model = ['gpt-4'] -supports_stream = True -needs_auth = False - -ssl_context = ssl.create_default_context() -ssl_context.load_verify_locations(certifi.where()) - - -class optionsSets: - optionSet: dict = { - 'tone': str, - 'optionsSets': list - } - - jailbreak: dict = { - "optionsSets": [ - 'saharasugg', - 'enablenewsfc', - 'clgalileo', - 'gencontentv3', - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "h3precise" - # "harmonyv3", - "dtappid", - "cricinfo", - "cricinfov2", - "dv3sugg", - "nojbfedge" - ] - } - - -class Defaults: - delimiter = '\x1e' - ip_address = f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' - - allowedMessageTypes = [ - 'Chat', - 'Disengaged', - 'AdsQuery', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - 'ActionRequest', - 'Context', - 'Progress', - 'AdsQuery', - 'SemanticSerp' - ] - - sliceIds = [ - - # "222dtappid", - # "225cricinfo", - # "224locals0" - - 'winmuid3tf', - 'osbsdusgreccf', - 'ttstmout', - 'crchatrev', - 'winlongmsgtf', - 'ctrlworkpay', - 'norespwtf', - 'tempcacheread', - 'temptacache', - '505scss0', - '508jbcars0', - '515enbotdets0', - '5082tsports', - '515vaoprvs', - '424dagslnv1s0', - 'kcimgattcf', - '427startpms0' - ] - - location = { - 'locale': 'en-US', - 'market': 'en-US', - 'region': 'US', - 'locationHints': [ - { - 'country': 'United States', - 'state': 'California', - 'city': 'Los Angeles', - 'timezoneoffset': 8, - 'countryConfidence': 8, - 'Center': { - 'Latitude': 34.0536909, - 'Longitude': -118.242766 - }, - 'RegionType': 2, - 'SourceType': 1 - } - ], - } - - -def _format(msg: dict) -> str: - return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter - - -async def create_conversation(): - for _ in range(5): - create = requests.get('https://www.bing.com/turing/conversation/create', - headers={ - 'authority': 'edgeservices.bing.com', - 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', - 'accept-language': 'en-US,en;q=0.9', - 'cache-control': 'max-age=0', - 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - 'sec-ch-ua-arch': '"x86"', - 'sec-ch-ua-bitness': '"64"', - 'sec-ch-ua-full-version': '"110.0.1587.69"', - 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-model': '""', - 'sec-ch-ua-platform': '"Windows"', - 'sec-ch-ua-platform-version': '"15.0.0"', - 'sec-fetch-dest': 'document', - 'sec-fetch-mode': 'navigate', - 'sec-fetch-site': 'none', - 'sec-fetch-user': '?1', - 'upgrade-insecure-requests': '1', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', - 'x-edge-shopping-flag': '1', - 'x-forwarded-for': Defaults.ip_address - }) - - conversationId = create.json().get('conversationId') - clientId = create.json().get('clientId') - conversationSignature = create.json().get('conversationSignature') - - if not conversationId or not clientId or not conversationSignature and _ == 4: - raise Exception('Failed to create conversation.') - - return conversationId, clientId, conversationSignature - - -async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets.jailbreak, context: bool or str = False): - timeout = aiohttp.ClientTimeout(total=900) - session = aiohttp.ClientSession(timeout=timeout) - - conversationId, clientId, conversationSignature = await create_conversation() - - wss = await session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', ssl=ssl_context, autoping=False, - headers={ - 'accept': 'application/json', - 'accept-language': 'en-US,en;q=0.9', - 'content-type': 'application/json', - 'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', - 'sec-ch-ua-arch': '"x86"', - 'sec-ch-ua-bitness': '"64"', - 'sec-ch-ua-full-version': '"109.0.1518.78"', - 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-model': '', - 'sec-ch-ua-platform': '"Windows"', - 'sec-ch-ua-platform-version': '"15.0.0"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'x-ms-client-request-id': str(uuid.uuid4()), - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx', - 'Referrer-Policy': 'origin-when-cross-origin', - 'x-forwarded-for': Defaults.ip_address - }) - - await wss.send_str(_format({'protocol': 'json', 'version': 1})) - await wss.receive(timeout=900) - - struct = { - 'arguments': [ - { - **mode, - 'source': 'cib', - 'allowedMessageTypes': Defaults.allowedMessageTypes, - 'sliceIds': Defaults.sliceIds, - 'traceId': os.urandom(16).hex(), - 'isStartOfSession': True, - 'message': Defaults.location | { - 'author': 'user', - 'inputMethod': 'Keyboard', - 'text': prompt, - 'messageType': 'Chat' - }, - 'conversationSignature': conversationSignature, - 'participant': { - 'id': clientId - }, - 'conversationId': conversationId - } - ], - 'invocationId': '0', - 'target': 'chat', - 'type': 4 - } - - if context: - struct['arguments'][0]['previousMessages'] = [ - { - "author": "user", - "description": context, - "contextType": "WebPage", - "messageType": "Context", - "messageId": "discover-web--page-ping-mriduna-----" - } - ] - - await wss.send_str(_format(struct)) - - final = False - draw = False - resp_txt = '' - result_text = '' - resp_txt_no_link = '' - cache_text = '' - - while not final: - msg = await wss.receive(timeout=900) - objects = msg.data.split(Defaults.delimiter) - - for obj in objects: - if obj is None or not obj: - continue - - response = json.loads(obj) - if response.get('type') == 1 and response['arguments'][0].get('messages',): - if not draw: - if (response['arguments'][0]['messages'][0]['contentOrigin'] != 'Apology') and not draw: - resp_txt = result_text + \ - response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get( - 'text', '') - resp_txt_no_link = result_text + \ - response['arguments'][0]['messages'][0].get( - 'text', '') - - if response['arguments'][0]['messages'][0].get('messageType',): - resp_txt = ( - resp_txt - + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text') - + '\n' - ) - result_text = ( - result_text - + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text') - + '\n' - ) - - if cache_text.endswith(' '): - final = True - if wss and not wss.closed: - await wss.close() - if session and not session.closed: - await session.close() - - yield (resp_txt.replace(cache_text, '')) - cache_text = resp_txt - - elif response.get('type') == 2: - if response['item']['result'].get('error'): - if wss and not wss.closed: - await wss.close() - if session and not session.closed: - await session.close() - - raise Exception( - f"{response['item']['result']['value']}: {response['item']['result']['message']}") - - if draw: - cache = response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] - response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] = ( - cache + resp_txt) - - if (response['item']['messages'][-1]['contentOrigin'] == 'Apology' and resp_txt): - response['item']['messages'][-1]['text'] = resp_txt_no_link - response['item']['messages'][-1]['adaptiveCards'][0]['body'][0]['text'] = resp_txt - - # print('Preserved the message from being deleted', file=sys.stderr) - - final = True - if wss and not wss.closed: - await wss.close() - if session and not session.closed: - await session.close() - - -def run(generator): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - gen = generator.__aiter__() - - while True: - try: - next_val = loop.run_until_complete(gen.__anext__()) - yield next_val - - except StopAsyncIteration: - break - #print('Done') - -def convert(messages): - context = "" - - for message in messages: - context += "[%s](#message)\n%s\n\n" % (message['role'], - message['content']) - - return context - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - if len(messages) < 2: - prompt = messages[0]['content'] - context = False - - else: - prompt = messages[-1]['content'] - context = convert(messages[:-1]) - - response = run(stream_generate(prompt, optionsSets.jailbreak, context)) - for token in response: - yield (token) - - #print('Done') - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/ChatgptAi.py b/spaces/g4f/freegpt-webui/g4f/Provider/Providers/ChatgptAi.py deleted file mode 100644 index 46605175d1ac94fcde252b53ddb81ba99f15706e..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/ChatgptAi.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import requests, re -from ...typing import sha256, Dict, get_type_hints - -url = 'https://chatgpt.ai/gpt-4/' -model = ['gpt-4'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - chat = '' - for message in messages: - chat += '%s: %s\n' % (message['role'], message['content']) - chat += 'assistant: ' - - response = requests.get('https://chatgpt.ai/') - nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0] - - headers = { - 'authority': 'chatgpt.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'origin': 'https://chatgpt.ai', - 'pragma': 'no-cache', - 'referer': 'https://chatgpt.ai/gpt-4/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - } - data = { - '_wpnonce': nonce, - 'post_id': post_id, - 'url': 'https://chatgpt.ai/gpt-4', - 'action': 'wpaicg_chat_shortcode_message', - 'message': chat, - 'bot_id': bot_id - } - - response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php', - headers=headers, data=data) - - yield (response.json()['data']) - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/geetu040/video-gen/Dockerfile b/spaces/geetu040/video-gen/Dockerfile deleted file mode 100644 index c59d15d96b6a0b01201d00028e7bfefe79820ed7..0000000000000000000000000000000000000000 --- a/spaces/geetu040/video-gen/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker -# you will also find guides on how best to write your Dockerfile - -FROM python:3.9 - -WORKDIR /code - -# Set the cache directory to a location with write permissions -RUN useradd -ms /bin/bash myuser -USER myuser -ENV HOME=/home/myuser \ - PATH=/home/myuser/.local/bin:$PATH - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/gerardo/elon_or_not/app.py b/spaces/gerardo/elon_or_not/app.py deleted file mode 100644 index 0a7d43d7c3fc1b343a81636f4b2a40a99353ca49..0000000000000000000000000000000000000000 --- a/spaces/gerardo/elon_or_not/app.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -from fastai.vision.all import * - -learn = load_learner('model.pkl') -labels = learn.dls.vocab - -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -input = gr.inputs.Image() -outputs = gr.outputs.Textbox() -iface = gr.Interface(fn=predict, inputs=input, outputs=outputs, - examples=['./elon-musk-12.jpg.webp', './iu.jpeg', './elonwario.jpeg', 'jeffbezos.jpg'] -) -iface.launch() \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Gta5pc3dmcrackv51032.md b/spaces/gotiQspiryo/whisper-ui/examples/Gta5pc3dmcrackv51032.md deleted file mode 100644 index d18049a97972a9cb08e198736f561a8d627b06dc..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Gta5pc3dmcrackv51032.md +++ /dev/null @@ -1,6 +0,0 @@ -

    gta5pc3dmcrackv51032


    Download File ⚙⚙⚙ https://urlgoal.com/2uyN1v



    -
    -RTM.X86.WAVE0.RIP.DVD-WZ.ZUKO uhillvijay · 22Oct2020 !!HOT!! Planning Pratique V2 Torrent · 14Oct2020 · Gta5pc3dmcrackv51032 daeiajack · 14Oct2020 ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/gradio/longformer/tests/test_var_global_attn.py b/spaces/gradio/longformer/tests/test_var_global_attn.py deleted file mode 100644 index ea95b296a229b6f23361dc5060b6649933c6d754..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/tests/test_var_global_attn.py +++ /dev/null @@ -1,74 +0,0 @@ -import torch -import unittest -import numpy as np -import random -from longformer.longformer import LongformerSelfAttention, LongformerConfig - - -class TestLongformerSelfAttention(unittest.TestCase): - - def _run_test(self, attn, hidden_state, attention_mask): - output3 = attn(hidden_states=hidden_state, attention_mask=attention_mask if attention_mask is not None else None)[0] - - output1 = attn(hidden_states=hidden_state[:1], attention_mask=attention_mask[:1] if attention_mask is not None else None)[0] - output2 = attn(hidden_states=hidden_state[1:], attention_mask=attention_mask[1:] if attention_mask is not None else None)[0] - self.assertTrue(torch.allclose(output3, torch.cat((output1, output2), dim=0), atol=1e-7)) - return output3 - - def test_selfattention(self): - np.random.seed(1) - random.seed(1) - torch.manual_seed(1) - torch.cuda.manual_seed(1) - torch.cuda.manual_seed_all(1) - - seqlen = 1024 - embed_dim = 60 - num_heads = 3 - bsz = 3 - config = LongformerConfig() - config.num_attention_heads = num_heads - config.hidden_size = embed_dim - config.attention_probs_dropout_prob = 0.0 - config.attention_window = [256] - config.attention_dilation = [1] - config.attention_mode = 'sliding_chunks' - config.autoregressive = False - - attn = LongformerSelfAttention(config=config, layer_id=0) - - hidden_state = torch.randn(bsz, seqlen, embed_dim) - attention_mask = torch.zeros((bsz, 1, 1, seqlen), dtype=torch.int) # local attention everywhere - - # test None attention_mask (default which is local attention everywhere) - output_nonemask = self._run_test(attn, hidden_state, None) - output = self._run_test(attn, hidden_state, attention_mask) - self.assertTrue(torch.allclose(output, output_nonemask, atol=1e-7)) - - # test padding - attention_mask[:, :, :, -10:] = -1 - self._run_test(attn, hidden_state, attention_mask) - - # test same global attention on all examples - attention_mask[:, :, :, :10] = 1 - self._run_test(attn, hidden_state, attention_mask) - - # test same number of global attention but different locations - attention_mask[:] = 0 - attention_mask[:, :, :, -10:] = -1 - attention_mask[0, :, :, :10] = 1 - attention_mask[1, :, :, 5:15] = 1 - attention_mask[2, :, :, 10:20] = 1 - self._run_test(attn, hidden_state, attention_mask) - - # test variable number of global attention - attention_mask[:] = 0 - attention_mask[:, :, :, -10:] = -1 - attention_mask[0, :, :, 5:15] = 1 - attention_mask[2, :, :, 13:17] = 1 - self._run_test(attn, hidden_state, attention_mask) - - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/hackathon-somos-nlp-2023/leaderboard/README.md b/spaces/hackathon-somos-nlp-2023/leaderboard/README.md deleted file mode 100644 index b826336ffc0c29334e02cf7f581315e7f2ce649f..0000000000000000000000000000000000000000 --- a/spaces/hackathon-somos-nlp-2023/leaderboard/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Leaderboard -emoji: 🌌 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: true -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/open_clip/tokenizer.py b/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/open_clip/tokenizer.py deleted file mode 100644 index 01e9f9d25574cfe757bc43a0ff0d982f5a4efad3..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/open_clip/tokenizer.py +++ /dev/null @@ -1,201 +0,0 @@ -""" CLIP tokenizer - -Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. -""" -import gzip -import html -import os -from functools import lru_cache -from typing import Union, List - -import ftfy -import regex as re -import torch - -# https://stackoverflow.com/q/62691279 -import os -os.environ["TOKENIZERS_PARALLELISM"] = "false" - - -@lru_cache() -def default_bpe(): - return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a significant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8+n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -def basic_clean(text): - text = ftfy.fix_text(text) - text = html.unescape(html.unescape(text)) - return text.strip() - - -def whitespace_clean(text): - text = re.sub(r'\s+', ' ', text) - text = text.strip() - return text - - -class SimpleTokenizer(object): - def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') - merges = merges[1:49152-256-2+1] - merges = [tuple(merge.split()) for merge in merges] - vocab = list(bytes_to_unicode().values()) - vocab = vocab + [v+'' for v in vocab] - for merge in merges: - vocab.append(''.join(merge)) - if not special_tokens: - special_tokens = ['', ''] - else: - special_tokens = ['', ''] + special_tokens - vocab.extend(special_tokens) - self.encoder = dict(zip(vocab, range(len(vocab)))) - self.decoder = {v: k for k, v in self.encoder.items()} - self.bpe_ranks = dict(zip(merges, range(len(merges)))) - self.cache = {t:t for t in special_tokens} - special = "|".join(special_tokens) - self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) - - self.vocab_size = len(self.encoder) - self.all_special_ids = [self.encoder[t] for t in special_tokens] - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token[:-1]) + ( token[-1] + '',) - pairs = get_pairs(word) - - if not pairs: - return token+'' - - while True: - bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word)-1 and word[i+1] == second: - new_word.append(first+second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = ' '.join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - text = whitespace_clean(basic_clean(text)).lower() - for token in re.findall(self.pat, text): - token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) - bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) - return bpe_tokens - - def decode(self, tokens): - text = ''.join([self.decoder[token] for token in tokens]) - text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') - return text - - -_tokenizer = SimpleTokenizer() - - -def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - context_length : int - The context length to use; all CLIP models use 77 as the context length - - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder[""] - eot_token = _tokenizer.encoder[""] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - tokens = tokens[:context_length] # Truncate - tokens[-1] = eot_token - result[i, :len(tokens)] = torch.tensor(tokens) - - return result - - -class HFTokenizer: - "HuggingFace tokenizer wrapper" - def __init__(self, tokenizer_name:str): - from transformers import AutoTokenizer - self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) - - def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor: - # same cleaning as for default tokenizer, except lowercasing - # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance - if isinstance(texts, str): - texts = [texts] - texts = [whitespace_clean(basic_clean(text)) for text in texts] - input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids - return input_ids diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/tsv.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/tsv.py deleted file mode 100644 index 1ebf75eb7ab154924ac74d5a7eaa6ac741306db3..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/tsv.py +++ /dev/null @@ -1,420 +0,0 @@ -import os -import os.path as op -import json -# import logging -import base64 -import yaml -import errno -import io -import math -from PIL import Image, ImageDraw - -from maskrcnn_benchmark.structures.bounding_box import BoxList -from .box_label_loader import LabelLoader - - -def load_linelist_file(linelist_file): - if linelist_file is not None: - line_list = [] - with open(linelist_file, 'r') as fp: - for i in fp: - line_list.append(int(i.strip())) - return line_list - - -def img_from_base64(imagestring): - try: - img = Image.open(io.BytesIO(base64.b64decode(imagestring))) - return img.convert('RGB') - except ValueError: - return None - - -def load_from_yaml_file(yaml_file): - with open(yaml_file, 'r') as fp: - return yaml.load(fp, Loader=yaml.CLoader) - - -def find_file_path_in_yaml(fname, root): - if fname is not None: - if op.isfile(fname): - return fname - elif op.isfile(op.join(root, fname)): - return op.join(root, fname) - else: - raise FileNotFoundError( - errno.ENOENT, os.strerror(errno.ENOENT), op.join(root, fname) - ) - - -def create_lineidx(filein, idxout): - idxout_tmp = idxout + '.tmp' - with open(filein, 'r') as tsvin, open(idxout_tmp, 'w') as tsvout: - fsize = os.fstat(tsvin.fileno()).st_size - fpos = 0 - while fpos != fsize: - tsvout.write(str(fpos) + "\n") - tsvin.readline() - fpos = tsvin.tell() - os.rename(idxout_tmp, idxout) - - -def read_to_character(fp, c): - result = [] - while True: - s = fp.read(32) - assert s != '' - if c in s: - result.append(s[: s.index(c)]) - break - else: - result.append(s) - return ''.join(result) - - -class TSVFile(object): - def __init__(self, tsv_file, generate_lineidx=False): - self.tsv_file = tsv_file - self.lineidx = op.splitext(tsv_file)[0] + '.lineidx' - self._fp = None - self._lineidx = None - # the process always keeps the process which opens the file. - # If the pid is not equal to the currrent pid, we will re-open the file. - self.pid = None - # generate lineidx if not exist - if not op.isfile(self.lineidx) and generate_lineidx: - create_lineidx(self.tsv_file, self.lineidx) - - def __del__(self): - if self._fp: - self._fp.close() - - def __str__(self): - return "TSVFile(tsv_file='{}')".format(self.tsv_file) - - def __repr__(self): - return str(self) - - def num_rows(self): - self._ensure_lineidx_loaded() - return len(self._lineidx) - - def seek(self, idx): - self._ensure_tsv_opened() - self._ensure_lineidx_loaded() - try: - pos = self._lineidx[idx] - except: - # logging.info('{}-{}'.format(self.tsv_file, idx)) - raise - self._fp.seek(pos) - return [s.strip() for s in self._fp.readline().split('\t')] - - def seek_first_column(self, idx): - self._ensure_tsv_opened() - self._ensure_lineidx_loaded() - pos = self._lineidx[idx] - self._fp.seek(pos) - return read_to_character(self._fp, '\t') - - def get_key(self, idx): - return self.seek_first_column(idx) - - def __getitem__(self, index): - return self.seek(index) - - def __len__(self): - return self.num_rows() - - def _ensure_lineidx_loaded(self): - if self._lineidx is None: - # logging.info('loading lineidx: {}'.format(self.lineidx)) - with open(self.lineidx, 'r') as fp: - self._lineidx = [int(i.strip()) for i in fp.readlines()] - - def _ensure_tsv_opened(self): - if self._fp is None: - self._fp = open(self.tsv_file, 'r') - self.pid = os.getpid() - - if self.pid != os.getpid(): - # logging.info('re-open {} because the process id changed'.format(self.tsv_file)) - self._fp = open(self.tsv_file, 'r') - self.pid = os.getpid() - - -class CompositeTSVFile(): - def __init__(self, file_list, seq_file, root='.'): - if isinstance(file_list, str): - self.file_list = load_list_file(file_list) - else: - assert isinstance(file_list, list) - self.file_list = file_list - - self.seq_file = seq_file - self.root = root - self.initialized = False - self.initialize() - - def get_key(self, index): - idx_source, idx_row = self.seq[index] - k = self.tsvs[idx_source].get_key(idx_row) - return '_'.join([self.file_list[idx_source], k]) - - def num_rows(self): - return len(self.seq) - - def __getitem__(self, index): - idx_source, idx_row = self.seq[index] - return self.tsvs[idx_source].seek(idx_row) - - def __len__(self): - return len(self.seq) - - def initialize(self): - ''' - this function has to be called in init function if cache_policy is - enabled. Thus, let's always call it in init funciton to make it simple. - ''' - if self.initialized: - return - self.seq = [] - with open(self.seq_file, 'r') as fp: - for line in fp: - parts = line.strip().split('\t') - self.seq.append([int(parts[0]), int(parts[1])]) - self.tsvs = [TSVFile(op.join(self.root, f)) for f in self.file_list] - self.initialized = True - - -def load_list_file(fname): - with open(fname, 'r') as fp: - lines = fp.readlines() - result = [line.strip() for line in lines] - if len(result) > 0 and result[-1] == '': - result = result[:-1] - return result - - -class TSVDataset(object): - def __init__(self, img_file, label_file=None, hw_file=None, - linelist_file=None, imageid2idx_file=None): - """Constructor. - Args: - img_file: Image file with image key and base64 encoded image str. - label_file: An optional label file with image key and label information. - A label_file is required for training and optional for testing. - hw_file: An optional file with image key and image height/width info. - linelist_file: An optional file with a list of line indexes to load samples. - It is useful to select a subset of samples or duplicate samples. - """ - self.img_file = img_file - self.label_file = label_file - self.hw_file = hw_file - self.linelist_file = linelist_file - - self.img_tsv = TSVFile(img_file) - self.label_tsv = None if label_file is None else TSVFile(label_file, generate_lineidx=True) - self.hw_tsv = None if hw_file is None else TSVFile(hw_file) - self.line_list = load_linelist_file(linelist_file) - self.imageid2idx = None - if imageid2idx_file is not None: - self.imageid2idx = json.load(open(imageid2idx_file, 'r')) - - self.transforms = None - - def __len__(self): - if self.line_list is None: - if self.imageid2idx is not None: - assert self.label_tsv is not None, "label_tsv is None!!!" - return self.label_tsv.num_rows() - return self.img_tsv.num_rows() - else: - return len(self.line_list) - - def __getitem__(self, idx): - img = self.get_image(idx) - img_size = img.size # w, h - annotations = self.get_annotations(idx) - # print(idx, annotations) - target = self.get_target_from_annotations(annotations, img_size, idx) - img, target = self.apply_transforms(img, target) - - if self.transforms is None: - return img, target, idx, 1.0 - else: - new_img_size = img.shape[1:] - scale = math.sqrt(float(new_img_size[0] * new_img_size[1]) / float(img_size[0] * img_size[1])) - return img, target, idx, scale - - def get_line_no(self, idx): - return idx if self.line_list is None else self.line_list[idx] - - def get_image(self, idx): - line_no = self.get_line_no(idx) - if self.imageid2idx is not None: - assert self.label_tsv is not None, "label_tsv is None!!!" - row = self.label_tsv.seek(line_no) - annotations = json.loads(row[1]) - imageid = annotations["img_id"] - line_no = self.imageid2idx[imageid] - row = self.img_tsv.seek(line_no) - # use -1 to support old format with multiple columns. - img = img_from_base64(row[-1]) - return img - - def get_annotations(self, idx): - line_no = self.get_line_no(idx) - if self.label_tsv is not None: - row = self.label_tsv.seek(line_no) - annotations = json.loads(row[1]) - return annotations - else: - return [] - - def get_target_from_annotations(self, annotations, img_size, idx): - # This function will be overwritten by each dataset to - # decode the labels to specific formats for each task. - return annotations - - def apply_transforms(self, image, target=None): - # This function will be overwritten by each dataset to - # apply transforms to image and targets. - return image, target - - def get_img_info(self, idx): - if self.imageid2idx is not None: - assert self.label_tsv is not None, "label_tsv is None!!!" - line_no = self.get_line_no(idx) - row = self.label_tsv.seek(line_no) - annotations = json.loads(row[1]) - return {"height": int(annotations["img_w"]), "width": int(annotations["img_w"])} - - if self.hw_tsv is not None: - line_no = self.get_line_no(idx) - row = self.hw_tsv.seek(line_no) - try: - # json string format with "height" and "width" being the keys - data = json.loads(row[1]) - if type(data) == list: - return data[0] - elif type(data) == dict: - return data - except ValueError: - # list of strings representing height and width in order - hw_str = row[1].split(' ') - hw_dict = {"height": int(hw_str[0]), "width": int(hw_str[1])} - return hw_dict - - def get_img_key(self, idx): - line_no = self.get_line_no(idx) - # based on the overhead of reading each row. - if self.imageid2idx is not None: - assert self.label_tsv is not None, "label_tsv is None!!!" - row = self.label_tsv.seek(line_no) - annotations = json.loads(row[1]) - return annotations["img_id"] - - if self.hw_tsv: - return self.hw_tsv.seek(line_no)[0] - elif self.label_tsv: - return self.label_tsv.seek(line_no)[0] - else: - return self.img_tsv.seek(line_no)[0] - - -class TSVYamlDataset(TSVDataset): - """ TSVDataset taking a Yaml file for easy function call - """ - - def __init__(self, yaml_file, root=None, replace_clean_label=False): - print("Reading {}".format(yaml_file)) - self.cfg = load_from_yaml_file(yaml_file) - if root: - self.root = root - else: - self.root = op.dirname(yaml_file) - img_file = find_file_path_in_yaml(self.cfg['img'], self.root) - label_file = find_file_path_in_yaml(self.cfg.get('label', None), - self.root) - hw_file = find_file_path_in_yaml(self.cfg.get('hw', None), self.root) - linelist_file = find_file_path_in_yaml(self.cfg.get('linelist', None), - self.root) - imageid2idx_file = find_file_path_in_yaml(self.cfg.get('imageid2idx', None), - self.root) - - if replace_clean_label: - assert ("raw_label" in label_file) - label_file = label_file.replace("raw_label", "clean_label") - - super(TSVYamlDataset, self).__init__( - img_file, label_file, hw_file, linelist_file, imageid2idx_file) - - -class ODTSVDataset(TSVYamlDataset): - """ - Generic TSV dataset format for Object Detection. - """ - - def __init__(self, yaml_file, extra_fields=(), transforms=None, - is_load_label=True, **kwargs): - if yaml_file is None: - return - super(ODTSVDataset, self).__init__(yaml_file) - - self.transforms = transforms - self.is_load_label = is_load_label - self.attribute_on = False - # self.attribute_on = kwargs['args'].MODEL.ATTRIBUTE_ON if "args" in kwargs else False - - if self.is_load_label: - # construct maps - jsondict_file = find_file_path_in_yaml( - self.cfg.get("labelmap", None), self.root - ) - if jsondict_file is None: - jsondict_file = find_file_path_in_yaml( - self.cfg.get("jsondict", None), self.root - ) - if "json" in jsondict_file: - jsondict = json.load(open(jsondict_file, 'r')) - if "label_to_idx" not in jsondict: - jsondict = {'label_to_idx': jsondict} - elif "tsv" in jsondict_file: - label_to_idx = {} - counter = 1 - with open(jsondict_file) as f: - for line in f: - label_to_idx[line.strip()] = counter - counter += 1 - jsondict = {'label_to_idx': label_to_idx} - else: - assert (0) - - self.labelmap = {} - self.class_to_ind = jsondict['label_to_idx'] - self.class_to_ind['__background__'] = 0 - self.ind_to_class = {v: k for k, v in self.class_to_ind.items()} - self.labelmap['class_to_ind'] = self.class_to_ind - - if self.attribute_on: - self.attribute_to_ind = jsondict['attribute_to_idx'] - self.attribute_to_ind['__no_attribute__'] = 0 - self.ind_to_attribute = {v: k for k, v in self.attribute_to_ind.items()} - self.labelmap['attribute_to_ind'] = self.attribute_to_ind - - self.label_loader = LabelLoader( - labelmap=self.labelmap, - extra_fields=extra_fields, - ) - - def get_target_from_annotations(self, annotations, img_size, idx): - if isinstance(annotations, list): - annotations = {"objects": annotations} - if self.is_load_label: - return self.label_loader(annotations['objects'], img_size) - - def apply_transforms(self, img, target=None): - if self.transforms is not None: - img, target = self.transforms(img, target) - return img, target diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/evaluation/sem_seg_evaluation.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/evaluation/sem_seg_evaluation.py deleted file mode 100644 index fb3b28d79284a5eeb335fc8ee8d859b4e46510ef..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/evaluation/sem_seg_evaluation.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import itertools -import json -import logging -import numpy as np -import os -from collections import OrderedDict -import PIL.Image as Image -import pycocotools.mask as mask_util -import torch -from fvcore.common.file_io import PathManager - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.utils.comm import all_gather, is_main_process, synchronize - -from .evaluator import DatasetEvaluator - - -class SemSegEvaluator(DatasetEvaluator): - """ - Evaluate semantic segmentation - """ - - def __init__(self, dataset_name, distributed, num_classes, ignore_label=255, output_dir=None): - """ - Args: - dataset_name (str): name of the dataset to be evaluated. - distributed (True): if True, will collect results from all ranks for evaluation. - Otherwise, will evaluate the results in the current process. - num_classes (int): number of classes - ignore_label (int): value in semantic segmentation ground truth. Predictions for the - corresponding pixels should be ignored. - output_dir (str): an output directory to dump results. - """ - self._dataset_name = dataset_name - self._distributed = distributed - self._output_dir = output_dir - self._num_classes = num_classes - self._ignore_label = ignore_label - self._N = num_classes + 1 - - self._cpu_device = torch.device("cpu") - self._logger = logging.getLogger(__name__) - - self.input_file_to_gt_file = { - dataset_record["file_name"]: dataset_record["sem_seg_file_name"] - for dataset_record in DatasetCatalog.get(dataset_name) - } - - meta = MetadataCatalog.get(dataset_name) - # Dict that maps contiguous training ids to COCO category ids - try: - c2d = meta.stuff_dataset_id_to_contiguous_id - self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} - except AttributeError: - self._contiguous_id_to_dataset_id = None - self._class_names = meta.stuff_classes - - def reset(self): - self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) - self._predictions = [] - - def process(self, inputs, outputs): - """ - Args: - inputs: the inputs to a model. - It is a list of dicts. Each dict corresponds to an image and - contains keys like "height", "width", "file_name". - outputs: the outputs of a model. It is either list of semantic segmentation predictions - (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic - segmentation prediction in the same format. - """ - for input, output in zip(inputs, outputs): - output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) - pred = np.array(output, dtype=np.int) - with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f: - gt = np.array(Image.open(f), dtype=np.int) - - gt[gt == self._ignore_label] = self._num_classes - - self._conf_matrix += np.bincount( - self._N * pred.reshape(-1) + gt.reshape(-1), minlength=self._N ** 2 - ).reshape(self._N, self._N) - - self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) - - def evaluate(self): - """ - Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): - - * Mean intersection-over-union averaged across classes (mIoU) - * Frequency Weighted IoU (fwIoU) - * Mean pixel accuracy averaged across classes (mACC) - * Pixel Accuracy (pACC) - """ - if self._distributed: - synchronize() - conf_matrix_list = all_gather(self._conf_matrix) - self._predictions = all_gather(self._predictions) - self._predictions = list(itertools.chain(*self._predictions)) - if not is_main_process(): - return - - self._conf_matrix = np.zeros_like(self._conf_matrix) - for conf_matrix in conf_matrix_list: - self._conf_matrix += conf_matrix - - if self._output_dir: - PathManager.mkdirs(self._output_dir) - file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") - with PathManager.open(file_path, "w") as f: - f.write(json.dumps(self._predictions)) - - acc = np.full(self._num_classes, np.nan, dtype=np.float) - iou = np.full(self._num_classes, np.nan, dtype=np.float) - tp = self._conf_matrix.diagonal()[:-1].astype(np.float) - pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) - class_weights = pos_gt / np.sum(pos_gt) - pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) - acc_valid = pos_gt > 0 - acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] - iou_valid = (pos_gt + pos_pred) > 0 - union = pos_gt + pos_pred - tp - iou[acc_valid] = tp[acc_valid] / union[acc_valid] - macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) - miou = np.sum(iou[acc_valid]) / np.sum(iou_valid) - fiou = np.sum(iou[acc_valid] * class_weights[acc_valid]) - pacc = np.sum(tp) / np.sum(pos_gt) - - res = {} - res["mIoU"] = 100 * miou - res["fwIoU"] = 100 * fiou - for i, name in enumerate(self._class_names): - res["IoU-{}".format(name)] = 100 * iou[i] - res["mACC"] = 100 * macc - res["pACC"] = 100 * pacc - for i, name in enumerate(self._class_names): - res["ACC-{}".format(name)] = 100 * acc[i] - - if self._output_dir: - file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") - with PathManager.open(file_path, "wb") as f: - torch.save(res, f) - results = OrderedDict({"sem_seg": res}) - self._logger.info(results) - return results - - def encode_json_sem_seg(self, sem_seg, input_file_name): - """ - Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. - See http://cocodataset.org/#format-results - """ - json_list = [] - for label in np.unique(sem_seg): - if self._contiguous_id_to_dataset_id is not None: - assert ( - label in self._contiguous_id_to_dataset_id - ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) - dataset_id = self._contiguous_id_to_dataset_id[label] - else: - dataset_id = int(label) - mask = (sem_seg == label).astype(np.uint8) - mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] - mask_rle["counts"] = mask_rle["counts"].decode("utf-8") - json_list.append( - {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} - ) - return json_list diff --git a/spaces/huggingface-projects/InstructPix2Pix-Chatbot-ui/frontend/README.md b/spaces/huggingface-projects/InstructPix2Pix-Chatbot-ui/frontend/README.md deleted file mode 100644 index 5c91169b0ca6508bb24301c957a9edea5abf2b01..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/InstructPix2Pix-Chatbot-ui/frontend/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# create-svelte - -Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte). - -## Creating a project - -If you're seeing this, you've probably already done this step. Congrats! - -```bash -# create a new project in the current directory -npm create svelte@latest - -# create a new project in my-app -npm create svelte@latest my-app -``` - -## Developing - -Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: - -```bash -npm run dev - -# or start the server and open the app in a new browser tab -npm run dev -- --open -``` - -## Building - -To create a production version of your app: - -```bash -npm run build -``` - -You can preview the production build with `npm run preview`. - -> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment. diff --git a/spaces/huggingface-projects/llama-2-13b-chat/style.css b/spaces/huggingface-projects/llama-2-13b-chat/style.css deleted file mode 100644 index 60878febc13db001635a52688abfe34d95e6c309..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/llama-2-13b-chat/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; - color: white; - background: #1565c0; - border-radius: 100vh; -} - -.contain { - max-width: 900px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/hugginglearners/emotion_in_tweets/README.md b/spaces/hugginglearners/emotion_in_tweets/README.md deleted file mode 100644 index d1586a9ea8dbef303beff5510438b3b2591db37c..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/emotion_in_tweets/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Emotion In Tweets -emoji: 👻 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/imageomics/Andromeda/Dockerfile b/spaces/imageomics/Andromeda/Dockerfile deleted file mode 100644 index be94dcd4c86f5fdab269685c5ab05fc7388237fc..0000000000000000000000000000000000000000 --- a/spaces/imageomics/Andromeda/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM ghcr.io/imageomics/andromeda:1.1.1 diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Abacre Restaurant Point Of Sale 6 Keygen 69.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Abacre Restaurant Point Of Sale 6 Keygen 69.md deleted file mode 100644 index 0910a3341a7b84701f3a9ff8a51e504ff7b5269e..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Abacre Restaurant Point Of Sale 6 Keygen 69.md +++ /dev/null @@ -1,111 +0,0 @@ - -

    Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    If you are looking for a reliable and powerful software to manage your restaurant business, you might want to check out Abacre Restaurant Point Of Sale 6. This is a new generation of restaurant management software that can handle all aspects of your operation, from taking orders and billing to inventory and reporting.

    - -

    Abacre Restaurant Point Of Sale 6 is designed to work with any hardware configuration, including touch screens, printers, cash drawers, scanners, and more. You can customize the interface and the layout of the guest bill to suit your preferences and needs. You can also create multiple menus, discounts, taxes, and currencies for different locations and occasions.

    -

    Abacre Restaurant Point Of Sale 6 Keygen 69


    Download ★★★★★ https://urlin.us/2uEyqi



    - -

    One of the best features of Abacre Restaurant Point Of Sale 6 is its ability to generate various types of charts and reports that can help you analyze your performance and make informed decisions. You can track your sales, profits, costs, inventory, staff, customers, and more. You can also export your data to Excel, PDF, HTML, or text formats for further processing.

    - -

    But how can you get Abacre Restaurant Point Of Sale 6 for free? Well, you might be tempted to look for a keygen or a crack online, but that is not a good idea. Not only are these methods illegal and unethical, but they can also expose your computer to viruses, malware, and other threats. Moreover, you might end up with a corrupted or outdated version of the software that can cause errors and glitches.

    - -

    The best way to get Abacre Restaurant Point Of Sale 6 is to download it from the official website and use the free trial version for 30 days. This way, you can test all the features and functions of the software and see if it meets your expectations and requirements. If you are satisfied with the results, you can purchase a license key for a reasonable price and enjoy the full benefits of the software.

    - -

    Abacre Restaurant Point Of Sale 6 is a great solution for any restaurant owner who wants to improve their efficiency and profitability. It is easy to use, flexible, and affordable. It can help you streamline your workflow, increase your customer satisfaction, and grow your business. Don't miss this opportunity and download Abacre Restaurant Point Of Sale 6 today!

    -

    How to Install Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    Installing Abacre Restaurant Point Of Sale 6 is not a difficult task, but you need to follow some steps carefully to avoid any problems. Here are the steps you need to take:

    - -
      -
    1. Download Abacre Restaurant Point Of Sale 6 from the official website and save it on your computer.
    2. -
    3. Extract the zip file and run the setup.exe file to start the installation process.
    4. -
    5. Follow the instructions on the screen and choose the destination folder and the components you want to install.
    6. -
    7. When the installation is complete, do not run the program yet.
    8. -
    9. Copy the keygen.exe file from the crack folder and paste it into the installation folder.
    10. -
    11. Run the keygen.exe file and generate a serial number for your software.
    12. -
    13. Launch Abacre Restaurant Point Of Sale 6 and enter the serial number when prompted.
    14. -
    15. Enjoy your full version of Abacre Restaurant Point Of Sale 6!
    16. -
    - -

    Note: You should always scan any downloaded files with an antivirus program before opening them. Also, you should disable your internet connection and firewall before using the keygen or crack to avoid any detection or interference.

    - -

    Why Choose Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    Abacre Restaurant Point Of Sale 6 is one of the best software solutions for restaurant management on the market. It has many advantages over other similar products, such as:

    -

    - -
      -
    • It is compatible with any hardware configuration and any operating system, including Windows XP, Vista, 7, 8, 8.1, and 10.
    • -
    • It is easy to use and customize, with a user-friendly interface and a flexible layout of the guest bill.
    • -
    • It is fast and reliable, with a high-performance database engine and a secure encryption system.
    • -
    • It is comprehensive and versatile, with a wide range of features and functions that can cover all aspects of your restaurant business, from ordering and billing to inventory and reporting.
    • -
    • It is affordable and cost-effective, with a reasonable price and a free trial version that you can use for 30 days without any limitations.
    • -
    - -

    With Abacre Restaurant Point Of Sale 6, you can improve your efficiency and profitability, increase your customer satisfaction and loyalty, and grow your business to new heights. Don't hesitate and get Abacre Restaurant Point Of Sale 6 today!

    -

    What are the Benefits of Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    Abacre Restaurant Point Of Sale 6 is not just a software, but a complete solution for your restaurant business. It can help you achieve many benefits, such as:

    - -
      -
    • Save time and money: Abacre Restaurant Point Of Sale 6 can automate many tasks and processes that would otherwise require manual labor and paper work. You can reduce your operational costs and increase your productivity and efficiency.
    • -
    • Improve customer service: Abacre Restaurant Point Of Sale 6 can enhance your customer experience and satisfaction by providing fast and accurate service, personalized attention, and loyalty programs. You can also communicate with your customers via email or SMS and send them promotions and offers.
    • -
    • Manage your inventory: Abacre Restaurant Point Of Sale 6 can help you keep track of your stock levels, purchases, and suppliers. You can avoid overstocking or running out of items, and optimize your ordering and replenishment processes.
    • -
    • Analyze your data: Abacre Restaurant Point Of Sale 6 can provide you with valuable insights and information about your business performance and trends. You can generate various types of charts and reports that can help you make informed decisions and improve your strategies.
    • -
    • Secure your data: Abacre Restaurant Point Of Sale 6 can protect your data from unauthorized access, loss, or corruption. You can encrypt your database, backup your data, and restore it in case of emergency.
    • -
    - -

    Abacre Restaurant Point Of Sale 6 is a software that can make a difference in your restaurant business. It can help you manage your operations, improve your service, and grow your profits. It is a software that you can trust and rely on.

    - -

    How to Get Support for Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    If you have any questions or issues regarding Abacre Restaurant Point Of Sale 6, you can always contact the support team for assistance. They are available 24/7 via email, phone, or live chat. You can also visit the official website for more information and resources, such as:

    - -
      -
    • FAQ: You can find answers to the most common questions and problems that users encounter with Abacre Restaurant Point Of Sale 6.
    • -
    • Manual: You can download the user manual for Abacre Restaurant Point Of Sale 6 and learn how to use all the features and functions of the software.
    • -
    • Forum: You can join the online community of Abacre Restaurant Point Of Sale 6 users and share your experiences, tips, feedback, and suggestions.
    • -
    • Blog: You can read the latest news and updates about Abacre Restaurant Point Of Sale 6 and get tips and tricks on how to use the software effectively.
    • -
    - -

    Abacre Restaurant Point Of Sale 6 is a software that cares about its users and provides them with the best support possible. You can always count on them to help you with any issue or question you might have.

    -

    How to Update Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    Abacre Restaurant Point Of Sale 6 is a software that is constantly updated and improved by the developers. They release new versions and patches regularly to fix bugs, add new features, and enhance the performance and security of the software. You can update your Abacre Restaurant Point Of Sale 6 easily and quickly by following these steps:

    - -
      -
    1. Open Abacre Restaurant Point Of Sale 6 and go to the Help menu.
    2. -
    3. Select Check for Updates and wait for the program to connect to the server and check for available updates.
    4. -
    5. If there is a new version or patch available, you will see a message with the details and the download link.
    6. -
    7. Click on the download link and save the update file on your computer.
    8. -
    9. Close Abacre Restaurant Point Of Sale 6 and run the update file to install the new version or patch.
    10. -
    11. Restart Abacre Restaurant Point Of Sale 6 and enjoy the latest features and improvements.
    12. -
    - -

    Note: You should always backup your data before updating your software, in case something goes wrong during the installation process. You can use the Backup Database feature in Abacre Restaurant Point Of Sale 6 to create a copy of your database and store it in a safe location.

    - -

    How to Uninstall Abacre Restaurant Point Of Sale 6 Keygen 69

    - -

    If you want to uninstall Abacre Restaurant Point Of Sale 6 from your computer, you can do so easily and cleanly by following these steps:

    - -
      -
    1. Go to the Start menu and open the Control Panel.
    2. -
    3. Select Programs and Features and find Abacre Restaurant Point Of Sale 6 in the list of installed programs.
    4. -
    5. Right-click on Abacre Restaurant Point Of Sale 6 and select Uninstall.
    6. -
    7. Follow the instructions on the screen and confirm your choice to remove the software.
    8. -
    9. Wait for the uninstallation process to finish and restart your computer if prompted.
    10. -
    - -

    Note: You should also delete any leftover files and folders related to Abacre Restaurant Point Of Sale 6 from your computer, such as the installation folder, the database folder, and the registry entries. You can use a third-party tool like CCleaner or Revo Uninstaller to help you with this task.

    -

    Conclusion

    - -

    Abacre Restaurant Point Of Sale 6 is a software that can help you manage your restaurant business with ease and efficiency. It is a software that can handle all aspects of your operation, from taking orders and billing to inventory and reporting. It is a software that can work with any hardware configuration and any operating system. It is a software that can provide you with various benefits, such as saving time and money, improving customer service, managing your inventory, analyzing your data, and securing your data.

    - -

    However, Abacre Restaurant Point Of Sale 6 is not a free software. You need to purchase a license key to use the full version of the software. You might be tempted to look for a keygen or a crack online, but that is not a good idea. Not only are these methods illegal and unethical, but they can also expose your computer to viruses, malware, and other threats. Moreover, you might end up with a corrupted or outdated version of the software that can cause errors and glitches.

    - -

    The best way to get Abacre Restaurant Point Of Sale 6 is to download it from the official website and use the free trial version for 30 days. This way, you can test all the features and functions of the software and see if it meets your expectations and requirements. If you are satisfied with the results, you can purchase a license key for a reasonable price and enjoy the full benefits of the software.

    - -

    Abacre Restaurant Point Of Sale 6 is a software that can make a difference in your restaurant business. It can help you streamline your workflow, increase your customer satisfaction, and grow your business. It is a software that you can trust and rely on. Don't miss this opportunity and download Abacre Restaurant Point Of Sale 6 today!

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Cinema 4d R14 Plugin Pack Torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Cinema 4d R14 Plugin Pack Torrent.md deleted file mode 100644 index 770dee6065714730a5cadad21eff052124c61cc6..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Cinema 4d R14 Plugin Pack Torrent.md +++ /dev/null @@ -1,8 +0,0 @@ - -

    this effect works in a way that you add the flowers directly to a mesh, then when you render, everything is reflected in the face of the model automatically. simply place a flower on a plane and render. it can also be extended to objects that are added in such a way that they can be applied to faces. this plugin works with any simulation type, including dynamic fluids, flames, smoke, etc. it is a very useful tool for creating crazy renderings like unreal in an easy way. with the factor property you can determine which faces are affected and how many times they are affected. in a second step you can determine how much the flowers will grow. the algorithm used will be called every time the simulation is done.

    -

    x123 is a robust pre-built library for cinema 4d. it is a collection of 120 textures, animations and shaders, which provides you with the best results with very little effort. it is a fully customizable texture pack, which includes more than 120 optimized textures. the x123 texture pack is the essential addition to your cinema 4d creative toolbox. the x123 texture pack includes complete texture pack, as well as a carefully crafted and detailed manual.

    -

    cinema 4d r14 plugin pack torrent


    Download File » https://urlin.us/2uEvYL



    -

    the tree plugin displays real-time particles. it can be used for different particle systems, like the following: – smoke: – fire: – wind: – rain: – bees: – particles for smoke or fire can also be used for different effects: – dynamic fluctuations – particles covering the surface of the model as clouds

    -

    the smokex plugin creates a mixture of smoke and clouds with realistically affecting parameters. the problem is: how do you create clouds in cinema 4d? take a look at the infographic and see for yourself. the key to success is to use an additive/subtractive material in order to allow the particles to fly in the scene. the particles are displayed during the simulation and only in the viewport with the help of the particle settings. you can easily change the number of particles and the size and thickness of the particles. in order to change the thickness of the smoke, you may also need to change the colors in order to create a thickness gradient.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/DriverPack Solution 18.7.78.5 Full Final - DailyAppload Full Version.md b/spaces/inplisQlawa/anything-midjourney-v4-1/DriverPack Solution 18.7.78.5 Full Final - DailyAppload Full Version.md deleted file mode 100644 index 1ac66c53f7a15579ca299e482daa463e7eb4c9ed..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/DriverPack Solution 18.7.78.5 Full Final - DailyAppload Full Version.md +++ /dev/null @@ -1,6 +0,0 @@ - -

    for Windows. You can use it on two PCs or Laptops. You can download the PC or Laptop drivers for both 32 bit (x86) and 64 bit (x64) versions of Windows. The Download File Size Is about 533 KB (515,527 bytes)




    DriverPack Solution 18.7.78.5 Full Final - DailyAppload Full Version More About This Software: DriverPack Solution 18.7.78.5 Full Final - DailyAppload Full Version

    DriverPack Solution is a program that contains bundles of drivers for VGA Sound Card Chipset Lan Card Wifi Printer etc. may be used for several versions of Windows (32/64 bits) that you should use in various kinds of computers and Laptop difficulty to locate drivers is one of the major obstacles in the process of reinstalling our PC or laptop Drivers is very important because without the hardware-hardware drivers installed on our device wont work normally because of the actual driver function to connect between hardware and software.

    -

    All informations about programs or games on this amazing site have been within open sources on the Internet. So All programs Software and games not hosted on our site. When visitor click Button [Download] Driverpack Solutionwill downloading directly from official sources(owners sites). Admin is strongly against the piracy we do not support any manifestation of piracy. If you were to think that app/game you have the copyrights is listed on our website and you want to eliminate it, please contact us. Were DMCA-compliant and gladly to utilize you. Please find the DMCA / Removal Request below.You might send a message to support If you need support, please email us at Contact Admin This Site. We make an effort to respond to all support

    -

    DriverPack Solution 18.7.78.5 Full Final - DailyAppload Full Version


    Download Ziphttps://urlin.us/2uEvYH



    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Fabrication CAMduct 2014 Covadis 2014 Gratuit A Telecharger.rar.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Fabrication CAMduct 2014 Covadis 2014 Gratuit A Telecharger.rar.md deleted file mode 100644 index 0f9cc3c8c96f7d7bacf98b6c9eada9fb1ce5bce1..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Fabrication CAMduct 2014 Covadis 2014 Gratuit A Telecharger.rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Fabrication CAMduct 2014 Covadis 2014, Gratuit A Telecharger.rar


    Download File 🗸 https://urlin.us/2uEyRC



    -
    - d5da3c52bf
    -
    -
    -

    diff --git a/spaces/inreVtussa/clothingai/Examples/Buzzsaw 2017 XFORCE Torrent UPDATED.md b/spaces/inreVtussa/clothingai/Examples/Buzzsaw 2017 XFORCE Torrent UPDATED.md deleted file mode 100644 index facf5f60f1d2d36189613541f4d3818d567578b6..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Buzzsaw 2017 XFORCE Torrent UPDATED.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Buzzsaw 2017 XFORCE Torrent


    Download File –––––>>> https://tiurll.com/2uCirN



    - - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/jbilcke-hf/Panoremix/src/components/ui/tooltip.tsx b/spaces/jbilcke-hf/Panoremix/src/components/ui/tooltip.tsx deleted file mode 100644 index 15f831b13198545d236d3d7b2cb62970eb20854c..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/Panoremix/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -"use client" - -import * as React from "react" -import * as TooltipPrimitive from "@radix-ui/react-tooltip" - -import { cn } from "@/lib/utils" - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/jbilcke-hf/VideoQuest/src/app/queries/getBackground.ts b/spaces/jbilcke-hf/VideoQuest/src/app/queries/getBackground.ts deleted file mode 100644 index 35ef00cb90c525c2d1f05541b9046beff077dfdf..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/VideoQuest/src/app/queries/getBackground.ts +++ /dev/null @@ -1,75 +0,0 @@ -import { Game } from "@/app/games/types" -import { createLlamaPrompt } from "@/lib/createLlamaPrompt" - -import { getBase } from "./getBase" -import { predict } from "./predict" - -export const getBackground = async ({ - game, - situation = "", - lastEvent = "", - newActionnables = [], -}: { - game: Game; - situation: string; - lastEvent: string; - newActionnables: string[], -}) => { - - const { - currentPrompt, - initialPrompt, - userSituationPrompt - } = getBase({ - game, - situation, - lastEvent - }) - - const basePrompt = initialPrompt !== currentPrompt - ? `You must imagine a very short caption for a background photo image, based on current and past situation. -Here is the original scene in which the user was located at first, which will inform you about the general game mood to follow (you must respect this): "${initialPrompt}".` - : "" - - const prompt = createLlamaPrompt([ - { - role: "system", - content: [ - `You are a photo director.`, - basePrompt, - `You are going to receive new information about the current activity of the player.`, - `Please write in a single sentence a photo caption for the next plausible scene, using a few words for each of those categories: the environment, era, characters, objects, textures, lighting.`, - `Separate each of those category descriptions using a comma.`, - `You MUST mention the following important objects that the user can click on: ${newActionnables}.`, - `Be brief in your caption don't add your own comments. Be straight to the point, and never reply things like "As the player approaches.." or "As the player clicks.." or "the scene shifts to.." (the best is not not mention the player at all)` - ].filter(item => item).join("\n") - }, - { - role: "user", - content: userSituationPrompt - } - ]) - - - let result = "" - try { - result = await predict(prompt) - if (!result.trim().length) { - throw new Error("empty result!") - } - } catch (err) { - console.log(`prediction of the background failed, trying again..`) - try { - result = await predict(prompt+".") - if (!result.trim().length) { - throw new Error("empty result!") - } - } catch (err) { - console.error(`prediction of the background failed again!`) - throw new Error(`failed to generate the background ${err}`) - } - } - - const tmp = result.split("Caption:").pop() || result - return tmp.replaceAll("\n", ", ") -} \ No newline at end of file diff --git a/spaces/jbilcke-hf/observer/src/lib/blobToBase64Uri.ts b/spaces/jbilcke-hf/observer/src/lib/blobToBase64Uri.ts deleted file mode 100644 index 6b572c82a4448d2eae90a3a4ec48542cdf60d2ee..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/observer/src/lib/blobToBase64Uri.ts +++ /dev/null @@ -1,18 +0,0 @@ -export function blobToBase64Uri(blob?: Blob): Promise { - return new Promise((resolve, reject) => { - if (!blob || typeof window === "undefined" || !window.FileReader) { - resolve("") - return - } - - const reader = new window.FileReader() - reader.readAsDataURL(blob) - reader.onloadend = () => { - resolve(`${reader.result || ""}`) - } - reader.onerror = () => { - // reject("error while converting blob to base64") - resolve("") - } - }) -} \ No newline at end of file diff --git a/spaces/jgurzoni/image_background_swapper/models/ade20k/utils.py b/spaces/jgurzoni/image_background_swapper/models/ade20k/utils.py deleted file mode 100644 index f337db7db54c82be041698d694e1403e8918c4c0..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/models/ade20k/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch""" - -import os -import sys - -import numpy as np -import torch - -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve - - -def load_url(url, model_dir='./pretrained', map_location=None): - if not os.path.exists(model_dir): - os.makedirs(model_dir) - filename = url.split('/')[-1] - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - urlretrieve(url, cached_file) - return torch.load(cached_file, map_location=map_location) - - -def color_encode(labelmap, colors, mode='RGB'): - labelmap = labelmap.astype('int') - labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3), - dtype=np.uint8) - for label in np.unique(labelmap): - if label < 0: - continue - labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \ - np.tile(colors[label], - (labelmap.shape[0], labelmap.shape[1], 1)) - - if mode == 'BGR': - return labelmap_rgb[:, :, ::-1] - else: - return labelmap_rgb diff --git a/spaces/joaogabriellima/Real-Time-Voice-Cloning/encoder/data_objects/utterance.py b/spaces/joaogabriellima/Real-Time-Voice-Cloning/encoder/data_objects/utterance.py deleted file mode 100644 index 0768c3420f422a7464f305b4c1fb6752c57ceda7..0000000000000000000000000000000000000000 --- a/spaces/joaogabriellima/Real-Time-Voice-Cloning/encoder/data_objects/utterance.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np - - -class Utterance: - def __init__(self, frames_fpath, wave_fpath): - self.frames_fpath = frames_fpath - self.wave_fpath = wave_fpath - - def get_frames(self): - return np.load(self.frames_fpath) - - def random_partial(self, n_frames): - """ - Crops the frames into a partial utterance of n_frames - - :param n_frames: The number of frames of the partial utterance - :return: the partial utterance frames and a tuple indicating the start and end of the - partial utterance in the complete utterance. - """ - frames = self.get_frames() - if frames.shape[0] == n_frames: - start = 0 - else: - start = np.random.randint(0, frames.shape[0] - n_frames) - end = start + n_frames - return frames[start:end], (start, end) \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_CTR.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_CTR.py deleted file mode 100644 index 6fc43ef72f73b65f79f1339aed788e139558cc68..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_CTR.py +++ /dev/null @@ -1,472 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2015, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import unittest -from binascii import hexlify, unhexlify - -from Crypto.SelfTest.st_common import list_test_cases -from Crypto.Util.py3compat import tobytes, bchr -from Crypto.Cipher import AES, DES3 -from Crypto.Hash import SHAKE128, SHA256 -from Crypto.Util import Counter - -def get_tag_random(tag, length): - return SHAKE128.new(data=tobytes(tag)).read(length) - -class CtrTests(unittest.TestCase): - - key_128 = get_tag_random("key_128", 16) - key_192 = get_tag_random("key_192", 24) - nonce_32 = get_tag_random("nonce_32", 4) - nonce_64 = get_tag_random("nonce_64", 8) - ctr_64 = Counter.new(32, prefix=nonce_32) - ctr_128 = Counter.new(64, prefix=nonce_64) - - def test_loopback_128(self): - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - pt = get_tag_random("plaintext", 16 * 100) - ct = cipher.encrypt(pt) - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - pt2 = cipher.decrypt(ct) - self.assertEqual(pt, pt2) - - def test_loopback_64(self): - cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64) - pt = get_tag_random("plaintext", 8 * 100) - ct = cipher.encrypt(pt) - - cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64) - pt2 = cipher.decrypt(ct) - self.assertEqual(pt, pt2) - - def test_invalid_counter_parameter(self): - # Counter object is required for ciphers with short block size - self.assertRaises(TypeError, DES3.new, self.key_192, AES.MODE_CTR) - # Positional arguments are not allowed (Counter must be passed as - # keyword) - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR, self.ctr_128) - - def test_nonce_attribute(self): - # Nonce attribute is the prefix passed to Counter (DES3) - cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64) - self.assertEqual(cipher.nonce, self.nonce_32) - - # Nonce attribute is the prefix passed to Counter (AES) - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - self.assertEqual(cipher.nonce, self.nonce_64) - - # Nonce attribute is not defined if suffix is used in Counter - counter = Counter.new(64, prefix=self.nonce_32, suffix=self.nonce_32) - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - self.assertFalse(hasattr(cipher, "nonce")) - - def test_nonce_parameter(self): - # Nonce parameter becomes nonce attribute - cipher1 = AES.new(self.key_128, AES.MODE_CTR, nonce=self.nonce_64) - self.assertEqual(cipher1.nonce, self.nonce_64) - - counter = Counter.new(64, prefix=self.nonce_64, initial_value=0) - cipher2 = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher1.nonce, cipher2.nonce) - - pt = get_tag_random("plaintext", 65536) - self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt)) - - # Nonce is implicitly created (for AES) when no parameters are passed - nonce1 = AES.new(self.key_128, AES.MODE_CTR).nonce - nonce2 = AES.new(self.key_128, AES.MODE_CTR).nonce - self.assertNotEqual(nonce1, nonce2) - self.assertEqual(len(nonce1), 8) - - # Nonce can be zero-length - cipher = AES.new(self.key_128, AES.MODE_CTR, nonce=b"") - self.assertEqual(b"", cipher.nonce) - cipher.encrypt(b'0'*300) - - # Nonce and Counter are mutually exclusive - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR, - counter=self.ctr_128, nonce=self.nonce_64) - - def test_initial_value_parameter(self): - # Test with nonce parameter - cipher1 = AES.new(self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, initial_value=0xFFFF) - counter = Counter.new(64, prefix=self.nonce_64, initial_value=0xFFFF) - cipher2 = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - pt = get_tag_random("plaintext", 65536) - self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt)) - - # Test without nonce parameter - cipher1 = AES.new(self.key_128, AES.MODE_CTR, - initial_value=0xFFFF) - counter = Counter.new(64, prefix=cipher1.nonce, initial_value=0xFFFF) - cipher2 = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - pt = get_tag_random("plaintext", 65536) - self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt)) - - # Initial_value and Counter are mutually exclusive - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR, - counter=self.ctr_128, initial_value=0) - - def test_initial_value_bytes_parameter(self): - # Same result as when passing an integer - cipher1 = AES.new(self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, - initial_value=b"\x00"*6+b"\xFF\xFF") - cipher2 = AES.new(self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, initial_value=0xFFFF) - pt = get_tag_random("plaintext", 65536) - self.assertEqual(cipher1.encrypt(pt), cipher2.encrypt(pt)) - - # Fail if the iv is too large - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR, - initial_value=b"5"*17) - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, initial_value=b"5"*9) - - # Fail if the iv is too short - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR, - initial_value=b"5"*15) - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, initial_value=b"5"*7) - - def test_iv_with_matching_length(self): - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR, - counter=Counter.new(120)) - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CTR, - counter=Counter.new(136)) - - def test_block_size_128(self): - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - self.assertEqual(cipher.block_size, AES.block_size) - - def test_block_size_64(self): - cipher = DES3.new(self.key_192, DES3.MODE_CTR, counter=self.ctr_64) - self.assertEqual(cipher.block_size, DES3.block_size) - - def test_unaligned_data_128(self): - plaintexts = [ b"7777777" ] * 100 - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - ciphertexts = [ cipher.encrypt(x) for x in plaintexts ] - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts))) - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - ciphertexts = [ cipher.encrypt(x) for x in plaintexts ] - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts))) - - def test_unaligned_data_64(self): - plaintexts = [ b"7777777" ] * 100 - cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64) - ciphertexts = [ cipher.encrypt(x) for x in plaintexts ] - cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64) - self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts))) - - cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64) - ciphertexts = [ cipher.encrypt(x) for x in plaintexts ] - cipher = DES3.new(self.key_192, AES.MODE_CTR, counter=self.ctr_64) - self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts))) - - def test_unknown_parameters(self): - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR, - 7, counter=self.ctr_128) - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_CTR, - counter=self.ctr_128, unknown=7) - # But some are only known by the base cipher (e.g. use_aesni consumed by the AES module) - AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128, use_aesni=False) - - def test_null_encryption_decryption(self): - for func in "encrypt", "decrypt": - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - result = getattr(cipher, func)(b"") - self.assertEqual(result, b"") - - def test_either_encrypt_or_decrypt(self): - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - cipher.encrypt(b"") - self.assertRaises(TypeError, cipher.decrypt, b"") - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=self.ctr_128) - cipher.decrypt(b"") - self.assertRaises(TypeError, cipher.encrypt, b"") - - def test_wrap_around(self): - # Counter is only 8 bits, so we can only encrypt/decrypt 256 blocks (=4096 bytes) - counter = Counter.new(8, prefix=bchr(9) * 15) - max_bytes = 4096 - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - cipher.encrypt(b'9' * max_bytes) - self.assertRaises(OverflowError, cipher.encrypt, b'9') - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - self.assertRaises(OverflowError, cipher.encrypt, b'9' * (max_bytes + 1)) - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - cipher.decrypt(b'9' * max_bytes) - self.assertRaises(OverflowError, cipher.decrypt, b'9') - - cipher = AES.new(self.key_128, AES.MODE_CTR, counter=counter) - self.assertRaises(OverflowError, cipher.decrypt, b'9' * (max_bytes + 1)) - - def test_bytearray(self): - data = b"1" * 16 - iv = b"\x00" * 6 + b"\xFF\xFF" - - # Encrypt - cipher1 = AES.new(self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, - initial_value=iv) - ref1 = cipher1.encrypt(data) - - cipher2 = AES.new(self.key_128, AES.MODE_CTR, - nonce=bytearray(self.nonce_64), - initial_value=bytearray(iv)) - ref2 = cipher2.encrypt(bytearray(data)) - - self.assertEqual(ref1, ref2) - self.assertEqual(cipher1.nonce, cipher2.nonce) - - # Decrypt - cipher3 = AES.new(self.key_128, AES.MODE_CTR, - nonce=self.nonce_64, - initial_value=iv) - ref3 = cipher3.decrypt(data) - - cipher4 = AES.new(self.key_128, AES.MODE_CTR, - nonce=bytearray(self.nonce_64), - initial_value=bytearray(iv)) - ref4 = cipher4.decrypt(bytearray(data)) - - self.assertEqual(ref3, ref4) - - def test_very_long_data(self): - cipher = AES.new(b'A' * 32, AES.MODE_CTR, nonce=b'') - ct = cipher.encrypt(b'B' * 1000000) - digest = SHA256.new(ct).hexdigest() - self.assertEqual(digest, "96204fc470476561a3a8f3b6fe6d24be85c87510b638142d1d0fb90989f8a6a6") - - def test_output_param(self): - - pt = b'5' * 128 - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - ct = cipher.encrypt(pt) - - output = bytearray(128) - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - res = cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - self.assertEqual(res, None) - - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - res = cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - self.assertEqual(res, None) - - def test_output_param_memoryview(self): - - pt = b'5' * 128 - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - ct = cipher.encrypt(pt) - - output = memoryview(bytearray(128)) - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - - def test_output_param_neg(self): - LEN_PT = 128 - - pt = b'5' * LEN_PT - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - ct = cipher.encrypt(pt) - - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - self.assertRaises(TypeError, cipher.encrypt, pt, output=b'0' * LEN_PT) - - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - self.assertRaises(TypeError, cipher.decrypt, ct, output=b'0' * LEN_PT) - - shorter_output = bytearray(LEN_PT - 1) - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - self.assertRaises(ValueError, cipher.encrypt, pt, output=shorter_output) - cipher = AES.new(b'4'*16, AES.MODE_CTR, nonce=self.nonce_64) - self.assertRaises(ValueError, cipher.decrypt, ct, output=shorter_output) - - -class SP800TestVectors(unittest.TestCase): - """Class exercising the CTR test vectors found in Section F.5 - of NIST SP 800-38A""" - - def test_aes_128(self): - plaintext = '6bc1bee22e409f96e93d7e117393172a' +\ - 'ae2d8a571e03ac9c9eb76fac45af8e51' +\ - '30c81c46a35ce411e5fbc1191a0a52ef' +\ - 'f69f2445df4f9b17ad2b417be66c3710' - ciphertext = '874d6191b620e3261bef6864990db6ce' +\ - '9806f66b7970fdff8617187bb9fffdff' +\ - '5ae4df3edbd5d35e5b4f09020db03eab' +\ - '1e031dda2fbe03d1792170a0f3009cee' - key = '2b7e151628aed2a6abf7158809cf4f3c' - counter = Counter.new(nbits=16, - prefix=unhexlify('f0f1f2f3f4f5f6f7f8f9fafbfcfd'), - initial_value=0xfeff) - - key = unhexlify(key) - plaintext = unhexlify(plaintext) - ciphertext = unhexlify(ciphertext) - - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher.encrypt(plaintext), ciphertext) - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher.decrypt(ciphertext), plaintext) - - def test_aes_192(self): - plaintext = '6bc1bee22e409f96e93d7e117393172a' +\ - 'ae2d8a571e03ac9c9eb76fac45af8e51' +\ - '30c81c46a35ce411e5fbc1191a0a52ef' +\ - 'f69f2445df4f9b17ad2b417be66c3710' - ciphertext = '1abc932417521ca24f2b0459fe7e6e0b' +\ - '090339ec0aa6faefd5ccc2c6f4ce8e94' +\ - '1e36b26bd1ebc670d1bd1d665620abf7' +\ - '4f78a7f6d29809585a97daec58c6b050' - key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b' - counter = Counter.new(nbits=16, - prefix=unhexlify('f0f1f2f3f4f5f6f7f8f9fafbfcfd'), - initial_value=0xfeff) - - key = unhexlify(key) - plaintext = unhexlify(plaintext) - ciphertext = unhexlify(ciphertext) - - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher.encrypt(plaintext), ciphertext) - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher.decrypt(ciphertext), plaintext) - - def test_aes_256(self): - plaintext = '6bc1bee22e409f96e93d7e117393172a' +\ - 'ae2d8a571e03ac9c9eb76fac45af8e51' +\ - '30c81c46a35ce411e5fbc1191a0a52ef' +\ - 'f69f2445df4f9b17ad2b417be66c3710' - ciphertext = '601ec313775789a5b7a7f504bbf3d228' +\ - 'f443e3ca4d62b59aca84e990cacaf5c5' +\ - '2b0930daa23de94ce87017ba2d84988d' +\ - 'dfc9c58db67aada613c2dd08457941a6' - key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4' - counter = Counter.new(nbits=16, - prefix=unhexlify('f0f1f2f3f4f5f6f7f8f9fafbfcfd'), - initial_value=0xfeff) - key = unhexlify(key) - plaintext = unhexlify(plaintext) - ciphertext = unhexlify(ciphertext) - - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher.encrypt(plaintext), ciphertext) - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - self.assertEqual(cipher.decrypt(ciphertext), plaintext) - - -class RFC3686TestVectors(unittest.TestCase): - - # Each item is a test vector with: - # - plaintext - # - ciphertext - # - key (AES 128, 192 or 256 bits) - # - counter prefix (4 byte nonce + 8 byte nonce) - data = ( - ('53696e676c6520626c6f636b206d7367', - 'e4095d4fb7a7b3792d6175a3261311b8', - 'ae6852f8121067cc4bf7a5765577f39e', - '000000300000000000000000'), - ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f', - '5104a106168a72d9790d41ee8edad388eb2e1efc46da57c8fce630df9141be28', - '7e24067817fae0d743d6ce1f32539163', - '006cb6dbc0543b59da48d90b'), - ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223', - 'c1cf48a89f2ffdd9cf4652e9efdb72d74540a42bde6d7836d59a5ceaaef3105325b2072f', - '7691be035e5020a8ac6e618529f9a0dc', - '00e0017b27777f3f4a1786f0'), - ('53696e676c6520626c6f636b206d7367', - '4b55384fe259c9c84e7935a003cbe928', - '16af5b145fc9f579c175f93e3bfb0eed863d06ccfdb78515', - '0000004836733c147d6d93cb'), - ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f', - '453243fc609b23327edfaafa7131cd9f8490701c5ad4a79cfc1fe0ff42f4fb00', - '7c5cb2401b3dc33c19e7340819e0f69c678c3db8e6f6a91a', - '0096b03b020c6eadc2cb500d'), - ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223', - '96893fc55e5c722f540b7dd1ddf7e758d288bc95c69165884536c811662f2188abee0935', - '02bf391ee8ecb159b959617b0965279bf59b60a786d3e0fe', - '0007bdfd5cbd60278dcc0912'), - ('53696e676c6520626c6f636b206d7367', - '145ad01dbf824ec7560863dc71e3e0c0', - '776beff2851db06f4c8a0542c8696f6c6a81af1eec96b4d37fc1d689e6c1c104', - '00000060db5672c97aa8f0b2'), - ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f', - 'f05e231b3894612c49ee000b804eb2a9b8306b508f839d6a5530831d9344af1c', - 'f6d66d6bd52d59bb0796365879eff886c66dd51a5b6a99744b50590c87a23884', - '00faac24c1585ef15a43d875'), - ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223', - 'eb6c52821d0bbbf7ce7594462aca4faab407df866569fd07f48cc0b583d6071f1ec0e6b8', - 'ff7a617ce69148e4f1726e2f43581de2aa62d9f805532edff1eed687fb54153d', - '001cc5b751a51d70a1c11148') - ) - - bindata = [] - for tv in data: - bindata.append([unhexlify(x) for x in tv]) - - def runTest(self): - for pt, ct, key, prefix in self.bindata: - counter = Counter.new(32, prefix=prefix) - cipher = AES.new(key, AES.MODE_CTR, counter=counter) - result = cipher.encrypt(pt) - self.assertEqual(hexlify(ct), hexlify(result)) - - -def get_tests(config={}): - tests = [] - tests += list_test_cases(CtrTests) - tests += list_test_cases(SP800TestVectors) - tests += [ RFC3686TestVectors() ] - return tests - - -if __name__ == '__main__': - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_tasks.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_tasks.py deleted file mode 100644 index e9d9c2bd67f105d9e728ffed5496b010051b1452..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_tasks.py +++ /dev/null @@ -1,180 +0,0 @@ -from __future__ import annotations - -import math -from types import TracebackType -from warnings import warn - -from ..abc._tasks import TaskGroup, TaskStatus -from ._compat import ( - DeprecatedAsyncContextManager, - DeprecatedAwaitable, - DeprecatedAwaitableFloat, -) -from ._eventloop import get_asynclib - - -class _IgnoredTaskStatus(TaskStatus[object]): - def started(self, value: object = None) -> None: - pass - - -TASK_STATUS_IGNORED = _IgnoredTaskStatus() - - -class CancelScope(DeprecatedAsyncContextManager["CancelScope"]): - """ - Wraps a unit of work that can be made separately cancellable. - - :param deadline: The time (clock value) when this scope is cancelled automatically - :param shield: ``True`` to shield the cancel scope from external cancellation - """ - - def __new__( - cls, *, deadline: float = math.inf, shield: bool = False - ) -> CancelScope: - return get_asynclib().CancelScope(shield=shield, deadline=deadline) - - def cancel(self) -> DeprecatedAwaitable: - """Cancel this scope immediately.""" - raise NotImplementedError - - @property - def deadline(self) -> float: - """ - The time (clock value) when this scope is cancelled automatically. - - Will be ``float('inf')`` if no timeout has been set. - - """ - raise NotImplementedError - - @deadline.setter - def deadline(self, value: float) -> None: - raise NotImplementedError - - @property - def cancel_called(self) -> bool: - """``True`` if :meth:`cancel` has been called.""" - raise NotImplementedError - - @property - def shield(self) -> bool: - """ - ``True`` if this scope is shielded from external cancellation. - - While a scope is shielded, it will not receive cancellations from outside. - - """ - raise NotImplementedError - - @shield.setter - def shield(self, value: bool) -> None: - raise NotImplementedError - - def __enter__(self) -> CancelScope: - raise NotImplementedError - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - raise NotImplementedError - - -def open_cancel_scope(*, shield: bool = False) -> CancelScope: - """ - Open a cancel scope. - - :param shield: ``True`` to shield the cancel scope from external cancellation - :return: a cancel scope - - .. deprecated:: 3.0 - Use :class:`~CancelScope` directly. - - """ - warn( - "open_cancel_scope() is deprecated -- use CancelScope() directly", - DeprecationWarning, - ) - return get_asynclib().CancelScope(shield=shield) - - -class FailAfterContextManager(DeprecatedAsyncContextManager[CancelScope]): - def __init__(self, cancel_scope: CancelScope): - self._cancel_scope = cancel_scope - - def __enter__(self) -> CancelScope: - return self._cancel_scope.__enter__() - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - retval = self._cancel_scope.__exit__(exc_type, exc_val, exc_tb) - if self._cancel_scope.cancel_called: - raise TimeoutError - - return retval - - -def fail_after(delay: float | None, shield: bool = False) -> FailAfterContextManager: - """ - Create a context manager which raises a :class:`TimeoutError` if does not finish in time. - - :param delay: maximum allowed time (in seconds) before raising the exception, or ``None`` to - disable the timeout - :param shield: ``True`` to shield the cancel scope from external cancellation - :return: a context manager that yields a cancel scope - :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\] - - """ - deadline = ( - (get_asynclib().current_time() + delay) if delay is not None else math.inf - ) - cancel_scope = get_asynclib().CancelScope(deadline=deadline, shield=shield) - return FailAfterContextManager(cancel_scope) - - -def move_on_after(delay: float | None, shield: bool = False) -> CancelScope: - """ - Create a cancel scope with a deadline that expires after the given delay. - - :param delay: maximum allowed time (in seconds) before exiting the context block, or ``None`` - to disable the timeout - :param shield: ``True`` to shield the cancel scope from external cancellation - :return: a cancel scope - - """ - deadline = ( - (get_asynclib().current_time() + delay) if delay is not None else math.inf - ) - return get_asynclib().CancelScope(deadline=deadline, shield=shield) - - -def current_effective_deadline() -> DeprecatedAwaitableFloat: - """ - Return the nearest deadline among all the cancel scopes effective for the current task. - - :return: a clock value from the event loop's internal clock (or ``float('inf')`` if - there is no deadline in effect, or ``float('-inf')`` if the current scope has - been cancelled) - :rtype: float - - """ - return DeprecatedAwaitableFloat( - get_asynclib().current_effective_deadline(), current_effective_deadline - ) - - -def create_task_group() -> TaskGroup: - """ - Create a task group. - - :return: a task group - - """ - return get_asynclib().TaskGroup() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/implementations/cache_mapper.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/implementations/cache_mapper.py deleted file mode 100644 index 000ccebc83304369cd00fcd8b3458b852c366530..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/implementations/cache_mapper.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -import abc -import hashlib -from typing import TYPE_CHECKING - -from fsspec.implementations.local import make_path_posix - -if TYPE_CHECKING: - from typing import Any - - -class AbstractCacheMapper(abc.ABC): - """Abstract super-class for mappers from remote URLs to local cached - basenames. - """ - - @abc.abstractmethod - def __call__(self, path: str) -> str: - ... - - def __eq__(self, other: Any) -> bool: - # Identity only depends on class. When derived classes have attributes - # they will need to be included. - return isinstance(other, type(self)) - - def __hash__(self) -> int: - # Identity only depends on class. When derived classes have attributes - # they will need to be included. - return hash(type(self)) - - -class BasenameCacheMapper(AbstractCacheMapper): - """Cache mapper that uses the basename of the remote URL and a fixed number - of directory levels above this. - - The default is zero directory levels, meaning different paths with the same - basename will have the same cached basename. - """ - - def __init__(self, directory_levels: int = 0): - if directory_levels < 0: - raise ValueError( - "BasenameCacheMapper requires zero or positive directory_levels" - ) - self.directory_levels = directory_levels - - # Separator for directories when encoded as strings. - self._separator = "_@_" - - def __call__(self, path: str) -> str: - path = make_path_posix(path) - prefix, *bits = path.rsplit("/", self.directory_levels + 1) - if bits: - return self._separator.join(bits) - else: - return prefix # No separator found, simple filename - - def __eq__(self, other: Any) -> bool: - return super().__eq__(other) and self.directory_levels == other.directory_levels - - def __hash__(self) -> int: - return super().__hash__() ^ hash(self.directory_levels) - - -class HashCacheMapper(AbstractCacheMapper): - """Cache mapper that uses a hash of the remote URL.""" - - def __call__(self, path: str) -> str: - return hashlib.sha256(path.encode()).hexdigest() - - -def create_cache_mapper(same_names: bool) -> AbstractCacheMapper: - """Factory method to create cache mapper for backward compatibility with - ``CachingFileSystem`` constructor using ``same_names`` kwarg. - """ - if same_names: - return BasenameCacheMapper() - else: - return HashCacheMapper() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/vector_stores/types.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/vector_stores/types.py deleted file mode 100644 index bd4e74488930a088fe9eb71296e312e65e96be77..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/vector_stores/types.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Vector store index types.""" - - -from dataclasses import dataclass -from typing import Any, List, Optional, Protocol - -from gpt_index.data_structs.data_structs import Node - - -@dataclass -class NodeEmbeddingResult: - """Node embedding result. - - Args: - id (str): Node id - node (Node): Node - embedding (List[float]): Embedding - - """ - - id: str - node: Node - embedding: List[float] - doc_id: str - - -@dataclass -class VectorStoreQueryResult: - """Vector store query result.""" - - nodes: Optional[List[Node]] = None - similarities: Optional[List[float]] = None - ids: Optional[List[str]] = None - - -class VectorStore(Protocol): - """Abstract vector store protocol.""" - - stores_text: bool - - @property - def client(self) -> Any: - """Get client.""" - ... - - @property - def config_dict(self) -> dict: - """Get config dict.""" - ... - - def add( - self, - embedding_results: List[NodeEmbeddingResult], - ) -> List[str]: - """Add embedding results to vector store.""" - ... - - def delete(self, doc_id: str, **delete_kwargs: Any) -> None: - """Delete doc.""" - ... - - def query( - self, - query_embedding: List[float], - similarity_top_k: int, - doc_ids: Optional[List[str]] = None, - ) -> VectorStoreQueryResult: - """Query vector store.""" - ... diff --git a/spaces/josedolot/HybridNet_Demo2/encoders/mobilenet.py b/spaces/josedolot/HybridNet_Demo2/encoders/mobilenet.py deleted file mode 100644 index 8bfdb1095b5343020a386b101fdad7aaebb2f009..0000000000000000000000000000000000000000 --- a/spaces/josedolot/HybridNet_Demo2/encoders/mobilenet.py +++ /dev/null @@ -1,83 +0,0 @@ -""" Each encoder should have following attributes and methods and be inherited from `_base.EncoderMixin` - -Attributes: - - _out_channels (list of int): specify number of channels for each encoder feature tensor - _depth (int): specify number of stages in decoder (in other words number of downsampling operations) - _in_channels (int): default number of input channels in first Conv2d layer for encoder (usually 3) - -Methods: - - forward(self, x: torch.Tensor) - produce list of features of different spatial resolutions, each feature is a 4D torch.tensor of - shape NCHW (features should be sorted in descending order according to spatial resolution, starting - with resolution same as input `x` tensor). - - Input: `x` with shape (1, 3, 64, 64) - Output: [f0, f1, f2, f3, f4, f5] - features with corresponding shapes - [(1, 3, 64, 64), (1, 64, 32, 32), (1, 128, 16, 16), (1, 256, 8, 8), - (1, 512, 4, 4), (1, 1024, 2, 2)] (C - dim may differ) - - also should support number of features according to specified depth, e.g. if depth = 5, - number of feature tensors = 6 (one with same resolution as input and 5 downsampled), - depth = 3 -> number of feature tensors = 4 (one with same resolution as input and 3 downsampled). -""" - -import torchvision -import torch.nn as nn - -from ._base import EncoderMixin - - -class MobileNetV2Encoder(torchvision.models.MobileNetV2, EncoderMixin): - - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - del self.classifier - - def get_stages(self): - return [ - nn.Identity(), - self.features[:2], - self.features[2:4], - self.features[4:7], - self.features[7:14], - self.features[14:], - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("classifier.1.bias", None) - state_dict.pop("classifier.1.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -mobilenet_encoders = { - "mobilenet_v2": { - "encoder": MobileNetV2Encoder, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth", - "input_space": "RGB", - "input_range": [0, 1], - }, - }, - "params": { - "out_channels": (3, 16, 24, 32, 96, 1280), - }, - }, -} diff --git a/spaces/jpjpjpjpjp/HylandDocumentVisualQA/app.py b/spaces/jpjpjpjpjp/HylandDocumentVisualQA/app.py deleted file mode 100644 index 088047d41f191dfc726cae51552394116cac0b1c..0000000000000000000000000000000000000000 --- a/spaces/jpjpjpjpjp/HylandDocumentVisualQA/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -import base64 -import io -import requests -import json -from PIL import Image - - -def analyze_image(image, question): - - #img64 = base64.b64decode(image) - im = Image.fromarray(image) - in_mem_file = io.BytesIO() - im.save(in_mem_file, format="png") - - payload = { - "model":"Baseline", - "tasktype":"Extraction", - "questions":[{"Pages":[1],"Text":question}], - "image": base64.b64encode(in_mem_file.getvalue()).decode() - } - url = "https://ky8mfb27dj.execute-api.us-east-1.amazonaws.com/dev/analyzedocument/submit" - - payload = json.dumps(payload) - headers = {'Content-Type': 'application/json'} - - response = requests.request("POST", url, headers=headers, data=payload) - - jsonresponse = json.loads(response.text) - return "Answer: {0} \nConfidence: {1}".format(jsonresponse['body'][0]['result'][0]['answer'][0], jsonresponse['body'][0]['result'][0]['score']) - -description = "Hyland Demo for Document Question & Answering , fine-tuned on DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below." -title = "DocVQA" -article = "

    DocVQA: Challenge | Overview - Document Visual Question Answering

    " -examples =[['publaynet_example.jpeg']] -css = ".output-image, .input-image, .image-preview {height: 600px !important}" - -demo = gr.Interface(fn=analyze_image, - inputs=[gr.inputs.Image(type="pil", label="Document image"),"text"], - outputs=gr.outputs.Textbox(type="auto", label="Answer"), - title=title, - description=description, - article=article, - css=css, - enable_queue=True) -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/juancopi81/multitrack-midi-music-generator/Dockerfile b/spaces/juancopi81/multitrack-midi-music-generator/Dockerfile deleted file mode 100644 index 3b72aae1806d72a1fbbeeeb2b78683b344ab3a1c..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/multitrack-midi-music-generator/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -FROM ubuntu:20.04 - -WORKDIR /code - -ENV SYSTEM=spaces -ENV SPACE_ID=juancopi81/multitrack-midi-music-generator - -COPY ./requirements.txt /code/requirements.txt - -# Preconfigure tzdata -RUN DEBIAN_FRONTEND="noninteractive" apt-get -qq update && \ - DEBIAN_FRONTEND="noninteractive" apt-get install -y tzdata - -RUN apt-get update -qq && \ - apt-get install -qq python3-pip build-essential libasound2-dev libjack-dev wget cmake pkg-config libglib2.0-dev ffmpeg - -# Download libfluidsynth source -RUN wget https://github.com/FluidSynth/fluidsynth/archive/refs/tags/v2.3.3.tar.gz && \ - tar xzf v2.3.3.tar.gz && \ - cd fluidsynth-2.3.3 && \ - mkdir build && \ - cd build && \ - cmake .. && \ - make && \ - make install && \ - cd ../../ && \ - rm -rf fluidsynth-2.3.3 v2.3.3.tar.gz - -ENV LD_LIBRARY_PATH=/usr/local/lib:${LD_LIBRARY_PATH} -RUN ldconfig - -RUN pip3 install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -CMD ["python3", "main.py"] diff --git a/spaces/justin-zk/Personalize-SAM/per_segment_anything/modeling/transformer.py b/spaces/justin-zk/Personalize-SAM/per_segment_anything/modeling/transformer.py deleted file mode 100644 index e1e8c7662d0c3321cc38f7f597aa26fd470b64b5..0000000000000000000000000000000000000000 --- a/spaces/justin-zk/Personalize-SAM/per_segment_anything/modeling/transformer.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import Tensor, nn - -import math -from typing import Tuple, Type - -from .common import MLPBlock - - -class TwoWayTransformer(nn.Module): - def __init__( - self, - depth: int, - embedding_dim: int, - num_heads: int, - mlp_dim: int, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - ) -> None: - """ - A transformer decoder that attends to an input image using - queries whose positional embedding is supplied. - - Args: - depth (int): number of layers in the transformer - embedding_dim (int): the channel dimension for the input embeddings - num_heads (int): the number of heads for multihead attention. Must - divide embedding_dim - mlp_dim (int): the channel dimension internal to the MLP block - activation (nn.Module): the activation to use in the MLP block - """ - super().__init__() - self.depth = depth - self.embedding_dim = embedding_dim - self.num_heads = num_heads - self.mlp_dim = mlp_dim - self.layers = nn.ModuleList() - - for i in range(depth): - self.layers.append( - TwoWayAttentionBlock( - embedding_dim=embedding_dim, - num_heads=num_heads, - mlp_dim=mlp_dim, - activation=activation, - attention_downsample_rate=attention_downsample_rate, - skip_first_layer_pe=(i == 0), - ) - ) - - self.final_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm_final_attn = nn.LayerNorm(embedding_dim) - - def forward( - self, - image_embedding: Tensor, - image_pe: Tensor, - point_embedding: Tensor, - attn_sim: Tensor, - target_embedding=None - ) -> Tuple[Tensor, Tensor]: - """ - Args: - image_embedding (torch.Tensor): image to attend to. Should be shape - B x embedding_dim x h x w for any h and w. - image_pe (torch.Tensor): the positional encoding to add to the image. Must - have the same shape as image_embedding. - point_embedding (torch.Tensor): the embedding to add to the query points. - Must have shape B x N_points x embedding_dim for any N_points. - - Returns: - torch.Tensor: the processed point_embedding - torch.Tensor: the processed image_embedding - """ - # BxCxHxW -> BxHWxC == B x N_image_tokens x C - bs, c, h, w = image_embedding.shape - image_embedding = image_embedding.flatten(2).permute(0, 2, 1) - image_pe = image_pe.flatten(2).permute(0, 2, 1) - - # Prepare queries - queries = point_embedding - keys = image_embedding - - # Apply transformer blocks and final layernorm - for layer in self.layers: - if target_embedding is not None: - queries += target_embedding - queries, keys = layer( - queries=queries, - keys=keys, - query_pe=point_embedding, - key_pe=image_pe, - attn_sim=attn_sim, - ) - - # Apply the final attention layer from the points to the image - q = queries + point_embedding - k = keys + image_pe - - if target_embedding is not None: - q += target_embedding - attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm_final_attn(queries) - - return queries, keys - - -class TwoWayAttentionBlock(nn.Module): - def __init__( - self, - embedding_dim: int, - num_heads: int, - mlp_dim: int = 2048, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - skip_first_layer_pe: bool = False, - ) -> None: - """ - A transformer block with four layers: (1) self-attention of sparse - inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp - block on sparse inputs, and (4) cross attention of dense inputs to sparse - inputs. - - Arguments: - embedding_dim (int): the channel dimension of the embeddings - num_heads (int): the number of heads in the attention layers - mlp_dim (int): the hidden dimension of the mlp block - activation (nn.Module): the activation of the mlp block - skip_first_layer_pe (bool): skip the PE on the first layer - """ - super().__init__() - self.self_attn = Attention(embedding_dim, num_heads) - self.norm1 = nn.LayerNorm(embedding_dim) - - self.cross_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm2 = nn.LayerNorm(embedding_dim) - - self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) - self.norm3 = nn.LayerNorm(embedding_dim) - - self.norm4 = nn.LayerNorm(embedding_dim) - self.cross_attn_image_to_token = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - - self.skip_first_layer_pe = skip_first_layer_pe - - def forward( - self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor, attn_sim: Tensor - ) -> Tuple[Tensor, Tensor]: - # Self attention block - if self.skip_first_layer_pe: - queries = self.self_attn(q=queries, k=queries, v=queries) - else: - q = queries + query_pe - attn_out = self.self_attn(q=q, k=q, v=queries) - queries = queries + attn_out - queries = self.norm1(queries) - - # Cross attention block, tokens attending to image embedding - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys, attn_sim=attn_sim) - queries = queries + attn_out - queries = self.norm2(queries) - - # MLP block - mlp_out = self.mlp(queries) - queries = queries + mlp_out - queries = self.norm3(queries) - - # Cross attention block, image embedding attending to tokens - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) - keys = keys + attn_out - keys = self.norm4(keys) - - return queries, keys - - -class Attention(nn.Module): - """ - An attention layer that allows for downscaling the size of the embedding - after projection to queries, keys, and values. - """ - - def __init__( - self, - embedding_dim: int, - num_heads: int, - downsample_rate: int = 1, - ) -> None: - super().__init__() - self.embedding_dim = embedding_dim - self.internal_dim = embedding_dim // downsample_rate - self.num_heads = num_heads - assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." - - self.q_proj = nn.Linear(embedding_dim, self.internal_dim) - self.k_proj = nn.Linear(embedding_dim, self.internal_dim) - self.v_proj = nn.Linear(embedding_dim, self.internal_dim) - self.out_proj = nn.Linear(self.internal_dim, embedding_dim) - - def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: - b, n, c = x.shape - x = x.reshape(b, n, num_heads, c // num_heads) - return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head - - def _recombine_heads(self, x: Tensor) -> Tensor: - b, n_heads, n_tokens, c_per_head = x.shape - x = x.transpose(1, 2) - return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C - - def forward(self, q: Tensor, k: Tensor, v: Tensor, attn_sim: Tensor = None) -> Tensor: - # Input projections - q = self.q_proj(q) - k = self.k_proj(k) - v = self.v_proj(v) - - # Separate into heads - q = self._separate_heads(q, self.num_heads) - k = self._separate_heads(k, self.num_heads) - v = self._separate_heads(v, self.num_heads) - - # Attention - _, _, _, c_per_head = q.shape - attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens - attn = attn / math.sqrt(c_per_head) - attn = torch.softmax(attn, dim=-1) - - if attn_sim is not None: - attn = attn + attn_sim - attn = torch.softmax(attn, dim=-1) - - # Get output - out = attn @ v - out = self._recombine_heads(out) - out = self.out_proj(out) - - return out diff --git a/spaces/kadirnar/Tune-A-Video/tuneavideo/pipelines/pipeline_tuneavideo.py b/spaces/kadirnar/Tune-A-Video/tuneavideo/pipelines/pipeline_tuneavideo.py deleted file mode 100644 index 87c26785a509f1dcbae3408dacecbe7d23530cf0..0000000000000000000000000000000000000000 --- a/spaces/kadirnar/Tune-A-Video/tuneavideo/pipelines/pipeline_tuneavideo.py +++ /dev/null @@ -1,407 +0,0 @@ -# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py - -import inspect -from typing import Callable, List, Optional, Union -from dataclasses import dataclass - -import numpy as np -import torch - -from diffusers.utils import is_accelerate_available -from packaging import version -from transformers import CLIPTextModel, CLIPTokenizer - -from diffusers.configuration_utils import FrozenDict -from diffusers.models import AutoencoderKL -from diffusers.pipeline_utils import DiffusionPipeline -from diffusers.schedulers import ( - DDIMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, -) -from diffusers.utils import deprecate, logging, BaseOutput - -from einops import rearrange - -from ..models.unet import UNet3DConditionModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -@dataclass -class TuneAVideoPipelineOutput(BaseOutput): - videos: Union[torch.Tensor, np.ndarray] - - -class TuneAVideoPipeline(DiffusionPipeline): - _optional_components = [] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet3DConditionModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - def enable_vae_slicing(self): - self.vae.enable_slicing() - - def disable_vae_slicing(self): - self.vae.disable_slicing() - - def enable_sequential_cpu_offload(self, gpu_id=0): - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - - @property - def _execution_device(self): - if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - text_embeddings = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - text_embeddings = text_embeddings[0] - - # duplicate text embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = text_embeddings.shape - text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) - text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - uncond_embeddings = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - uncond_embeddings = uncond_embeddings[0] - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = uncond_embeddings.shape[1] - uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) - uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - return text_embeddings - - def decode_latents(self, latents): - video_length = latents.shape[2] - latents = 1 / 0.18215 * latents - latents = rearrange(latents, "b c f h w -> (b f) c h w") - video = self.vae.decode(latents).sample - video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) - video = (video / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - video = video.cpu().float().numpy() - return video - - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs(self, prompt, height, width, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - rand_device = "cpu" if device.type == "mps" else device - - if isinstance(generator, list): - shape = (1,) + shape[1:] - latents = [ - torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) - for i in range(batch_size) - ] - latents = torch.cat(latents, dim=0).to(device) - else: - latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - video_length: Optional[int], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_videos_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "tensor", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - **kwargs, - ): - # Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - # Check inputs. Raise error if not correct - self.check_inputs(prompt, height, width, callback_steps) - - # Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # Encode input prompt - text_embeddings = self._encode_prompt( - prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt - ) - - # Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # Prepare latent variables - num_channels_latents = self.unet.in_channels - latents = self.prepare_latents( - batch_size * num_videos_per_prompt, - num_channels_latents, - video_length, - height, - width, - text_embeddings.dtype, - device, - generator, - latents, - ) - latents_dtype = latents.dtype - - # Prepare extra step kwargs. - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype) - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # Post-processing - video = self.decode_latents(latents) - - # Convert to tensor - if output_type == "tensor": - video = torch.from_numpy(video) - - if not return_dict: - return video - - return TuneAVideoPipelineOutput(videos=video) \ No newline at end of file diff --git a/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index b412ba2814e114ca7bb00b6fd6ef217f63d788a3..0000000000000000000000000000000000000000 --- a/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,86 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/kangvcar/RealChar/realtime_ai_character/database/chroma.py b/spaces/kangvcar/RealChar/realtime_ai_character/database/chroma.py deleted file mode 100644 index c7aaa68ea5f7a3e371372c3a3987e3cbd9477731..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/realtime_ai_character/database/chroma.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -from dotenv import load_dotenv -from langchain.vectorstores import Chroma -from langchain.embeddings import OpenAIEmbeddings -from realtime_ai_character.logger import get_logger - -load_dotenv() -logger = get_logger(__name__) - -embedding = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY")) -if os.getenv('OPENAI_API_TYPE') == 'azure': - embedding = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY"), deployment=os.getenv( - "OPENAI_API_EMBEDDING_DEPLOYMENT_NAME", "text-embedding-ada-002"), chunk_size=1) - - -def get_chroma(): - chroma = Chroma( - collection_name='llm', - embedding_function=embedding, - persist_directory='/home/user/app/chroma.db' - ) - return chroma diff --git a/spaces/kasun/git-large/app.py b/spaces/kasun/git-large/app.py deleted file mode 100644 index 5a51a20b648c585def753a4729f44be29462ca5a..0000000000000000000000000000000000000000 --- a/spaces/kasun/git-large/app.py +++ /dev/null @@ -1,125 +0,0 @@ -import gradio as gr -from transformers import AutoProcessor, BlipForConditionalGeneration, AutoModelForCausalLM, AutoImageProcessor, VisionEncoderDecoderModel, AutoTokenizer - -# from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, Blip2ForConditionalGeneration, VisionEncoderDecoderModel -import torch -import open_clip - -from huggingface_hub import hf_hub_download - -torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg') -torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png') -torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg') - -# git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco") -# git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco") - -git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco") -git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco") - -# git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps") -# git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps") - -# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") -# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") - -# blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large") -# blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") - -# blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") -# blip2_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) - -# blip2_processor_8_bit = AutoProcessor.from_pretrained("Salesforce/blip2-opt-6.7b") -# blip2_model_8_bit = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-6.7b", device_map="auto", load_in_8bit=True) - -# vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -# vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -# vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning") - -# coca_model, _, coca_transform = open_clip.create_model_and_transforms( -# model_name="coca_ViT-L-14", -# pretrained="mscoco_finetuned_laion2B-s13B-b90k" -# ) - -device = "cuda" if torch.cuda.is_available() else "cpu" - -# git_model_base.to(device) -# blip_model_base.to(device) -git_model_large_coco.to(device) -# git_model_large_textcaps.to(device) -# blip_model_large.to(device) -# vitgpt_model.to(device) -# coca_model.to(device) -# blip2_model.to(device) - -def generate_caption(processor, model, image, tokenizer=None, use_float_16=False): - inputs = processor(images=image, return_tensors="pt").to(device) - - if use_float_16: - inputs = inputs.to(torch.float16) - - generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50) - - if tokenizer is not None: - generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - return generated_caption - - -def generate_caption_coca(model, transform, image): - im = transform(image).unsqueeze(0).to(device) - with torch.no_grad(), torch.cuda.amp.autocast(): - generated = model.generate(im, seq_len=20) - return open_clip.decode(generated[0].detach()).split("")[0].replace("", "") - - -def generate_captions(image): - # caption_git_base = generate_caption(git_processor_base, git_model_base, image) - - caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image) - - # caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image) - - # caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image) - - # caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image) - - # caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer) - - # caption_coca = generate_caption_coca(coca_model, coca_transform, image) - - # caption_blip2 = generate_caption(blip2_processor, blip2_model, image, use_float_16=True).strip() - - # caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip() - - # return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit - return caption_git_large_coco - - - -examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]] -# outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by CoCa"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")] -outputs = [ - gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), - # gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), - # gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), - # gr.outputs.Textbox(label="Caption generated by BLIP-base"), - # gr.outputs.Textbox(label="Caption generated by BLIP-large"), - # gr.outputs.Textbox(label="Caption generated by vitgpt") - ] - -title = "Interactive demo: comparing image captioning models" -description = "Gradio Demo to compare GIT, BLIP, CoCa, and BLIP-2, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below." -article = "

    BLIP docs | GIT docs

    " - -interface = gr.Interface(fn=generate_captions, - inputs=gr.inputs.Image(type="pil"), - outputs=outputs, - examples=examples, - title=title, - description=description, - article=article, - enable_queue=True) -interface.launch() \ No newline at end of file diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/data_objects/speaker_batch.py b/spaces/keithhon/Real-Time-Voice-Cloning/encoder/data_objects/speaker_batch.py deleted file mode 100644 index 56651dba5804a0c59c334e49ac18f8f5a4bfa444..0000000000000000000000000000000000000000 --- a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/data_objects/speaker_batch.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from typing import List -from encoder.data_objects.speaker import Speaker - -class SpeakerBatch: - def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int): - self.speakers = speakers - self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers} - - # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with - # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40) - self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]]) diff --git a/spaces/kepl/gpt/g4f/Provider/Providers/Lockchat.py b/spaces/kepl/gpt/g4f/Provider/Providers/Lockchat.py deleted file mode 100644 index 1bce74035403bf8615e68ccfcc9deb7e0151817a..0000000000000000000000000000000000000000 --- a/spaces/kepl/gpt/g4f/Provider/Providers/Lockchat.py +++ /dev/null @@ -1,32 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints -url = 'http://supertest.lockchat.app' -model = ['gpt-4', 'gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - - payload = { - "temperature": 0.7, - "messages": messages, - "model": model, - "stream": True, - } - headers = { - "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", - } - response = requests.post("http://supertest.lockchat.app/v1/chat/completions", - json=payload, headers=headers, stream=True) - for token in response.iter_lines(): - if b'The model: `gpt-4` does not exist' in token: - print('error, retrying...') - _create_completion(model=model, messages=messages, stream=stream, temperature=temperature, **kwargs) - if b"content" in token: - token = json.loads(token.decode('utf-8').split('data: ')[1])['choices'][0]['delta'].get('content') - if token: yield (token) - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/speaker_encoder/visualizations.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/speaker_encoder/visualizations.py deleted file mode 100644 index ec00fc64d6e9fda2bb8e613531066ac824df1451..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/speaker_encoder/visualizations.py +++ /dev/null @@ -1,178 +0,0 @@ -from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from datetime import datetime -from time import perf_counter as timer -import matplotlib.pyplot as plt -import numpy as np -# import webbrowser -import visdom -import umap - -colormap = np.array([ - [76, 255, 0], - [0, 127, 70], - [255, 0, 0], - [255, 217, 38], - [0, 135, 255], - [165, 0, 165], - [255, 167, 255], - [0, 255, 255], - [255, 96, 38], - [142, 76, 0], - [33, 0, 127], - [0, 0, 0], - [183, 183, 183], -], dtype=np.float) / 255 - - -class Visualizations: - def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False): - # Tracking data - self.last_update_timestamp = timer() - self.update_every = update_every - self.step_times = [] - self.losses = [] - self.eers = [] - print("Updating the visualizations every %d steps." % update_every) - - # If visdom is disabled TODO: use a better paradigm for that - self.disabled = disabled - if self.disabled: - return - - # Set the environment name - now = str(datetime.now().strftime("%d-%m %Hh%M")) - if env_name is None: - self.env_name = now - else: - self.env_name = "%s (%s)" % (env_name, now) - - # Connect to visdom and open the corresponding window in the browser - try: - self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True) - except ConnectionError: - raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to " - "start it.") - # webbrowser.open("http://localhost:8097/env/" + self.env_name) - - # Create the windows - self.loss_win = None - self.eer_win = None - # self.lr_win = None - self.implementation_win = None - self.projection_win = None - self.implementation_string = "" - - def log_params(self): - if self.disabled: - return - from speaker_encoder import params_data - from speaker_encoder import params_model - param_string = "Model parameters:
    " - for param_name in (p for p in dir(params_model) if not p.startswith("__")): - value = getattr(params_model, param_name) - param_string += "\t%s: %s
    " % (param_name, value) - param_string += "Data parameters:
    " - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - param_string += "\t%s: %s
    " % (param_name, value) - self.vis.text(param_string, opts={"title": "Parameters"}) - - def log_dataset(self, dataset: SpeakerVerificationDataset): - if self.disabled: - return - dataset_string = "" - dataset_string += "Speakers: %s\n" % len(dataset.speakers) - dataset_string += "\n" + dataset.get_logs() - dataset_string = dataset_string.replace("\n", "
    ") - self.vis.text(dataset_string, opts={"title": "Dataset"}) - - def log_implementation(self, params): - if self.disabled: - return - implementation_string = "" - for param, value in params.items(): - implementation_string += "%s: %s\n" % (param, value) - implementation_string = implementation_string.replace("\n", "
    ") - self.implementation_string = implementation_string - self.implementation_win = self.vis.text( - implementation_string, - opts={"title": "Training implementation"} - ) - - def update(self, loss, eer, step): - # Update the tracking data - now = timer() - self.step_times.append(1000 * (now - self.last_update_timestamp)) - self.last_update_timestamp = now - self.losses.append(loss) - self.eers.append(eer) - print(".", end="") - - # Update the plots every steps - if step % self.update_every != 0: - return - time_string = "Step time: mean: %5dms std: %5dms" % \ - (int(np.mean(self.step_times)), int(np.std(self.step_times))) - print("\nStep %6d Loss: %.4f EER: %.4f %s" % - (step, np.mean(self.losses), np.mean(self.eers), time_string)) - if not self.disabled: - self.loss_win = self.vis.line( - [np.mean(self.losses)], - [step], - win=self.loss_win, - update="append" if self.loss_win else None, - opts=dict( - legend=["Avg. loss"], - xlabel="Step", - ylabel="Loss", - title="Loss", - ) - ) - self.eer_win = self.vis.line( - [np.mean(self.eers)], - [step], - win=self.eer_win, - update="append" if self.eer_win else None, - opts=dict( - legend=["Avg. EER"], - xlabel="Step", - ylabel="EER", - title="Equal error rate" - ) - ) - if self.implementation_win is not None: - self.vis.text( - self.implementation_string + ("%s" % time_string), - win=self.implementation_win, - opts={"title": "Training implementation"}, - ) - - # Reset the tracking - self.losses.clear() - self.eers.clear() - self.step_times.clear() - - def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None, - max_speakers=10): - max_speakers = min(max_speakers, len(colormap)) - embeds = embeds[:max_speakers * utterances_per_speaker] - - n_speakers = len(embeds) // utterances_per_speaker - ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker) - colors = [colormap[i] for i in ground_truth] - - reducer = umap.UMAP() - projected = reducer.fit_transform(embeds) - plt.scatter(projected[:, 0], projected[:, 1], c=colors) - plt.gca().set_aspect("equal", "datalim") - plt.title("UMAP projection (step %d)" % step) - if not self.disabled: - self.projection_win = self.vis.matplot(plt, win=self.projection_win) - if out_fpath is not None: - plt.savefig(out_fpath) - plt.clf() - - def save(self): - if not self.disabled: - self.vis.save([self.env_name]) - \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/speaker_encoder/ckpt/__init__.py b/spaces/kevinwang676/ChatGLM2-SadTalker/speaker_encoder/ckpt/__init__.py deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/speaker_encoder/ckpt/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r34.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r34.py deleted file mode 100644 index fda2701758a839a7161d09c25f0ca3d26033baff..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r34.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "cosface" -config.network = "r34" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/glint360k" -config.num_classes = 360232 -config.num_image = 17091657 -config.num_epoch = 20 -config.warmup_epoch = -1 -config.decay_epoch = [8, 12, 15, 18] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/kevinwang676/ControlNet-with-GPT-4/app_segmentation.py b/spaces/kevinwang676/ControlNet-with-GPT-4/app_segmentation.py deleted file mode 100644 index 6e4c629728247868cfcfcf41ded105ff9e662279..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ControlNet-with-GPT-4/app_segmentation.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python - -import gradio as gr - -from settings import ( - DEFAULT_IMAGE_RESOLUTION, - DEFAULT_NUM_IMAGES, - MAX_IMAGE_RESOLUTION, - MAX_NUM_IMAGES, - MAX_SEED, -) -from utils import randomize_seed_fn - - -def create_demo(process): - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - image = gr.Image() - prompt = gr.Textbox(label="Prompt") - run_button = gr.Button("Run") - with gr.Accordion("Advanced options", open=False): - preprocessor_name = gr.Radio( - label="Preprocessor", choices=["UPerNet", "None"], type="value", value="UPerNet" - ) - num_samples = gr.Slider( - label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1 - ) - image_resolution = gr.Slider( - label="Image resolution", - minimum=256, - maximum=MAX_IMAGE_RESOLUTION, - value=DEFAULT_IMAGE_RESOLUTION, - step=256, - ) - preprocess_resolution = gr.Slider( - label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1 - ) - num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1) - guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) - seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) - randomize_seed = gr.Checkbox(label="Randomize seed", value=True) - a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed") - n_prompt = gr.Textbox( - label="Negative prompt", - value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality", - ) - with gr.Column(): - result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down") - inputs = [ - image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - preprocess_resolution, - num_steps, - guidance_scale, - seed, - preprocessor_name, - ] - prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - api_name=False, - ).then( - fn=process, - inputs=inputs, - outputs=result, - api_name=False, - ) - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - api_name=False, - ).then( - fn=process, - inputs=inputs, - outputs=result, - api_name="segmentation", - ) - return demo - - -if __name__ == "__main__": - from model import Model - - model = Model(task_name="segmentation") - demo = create_demo(model.process_segmentation) - demo.queue().launch() diff --git a/spaces/kevinwang676/VoiceChangers/src/facerender/modules/mapping.py b/spaces/kevinwang676/VoiceChangers/src/facerender/modules/mapping.py deleted file mode 100644 index 0e3a1c2d1770996080c08e9daafb346f05d7bcdd..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChangers/src/facerender/modules/mapping.py +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class MappingNet(nn.Module): - def __init__(self, coeff_nc, descriptor_nc, layer, num_kp, num_bins): - super( MappingNet, self).__init__() - - self.layer = layer - nonlinearity = nn.LeakyReLU(0.1) - - self.first = nn.Sequential( - torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True)) - - for i in range(layer): - net = nn.Sequential(nonlinearity, - torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3)) - setattr(self, 'encoder' + str(i), net) - - self.pooling = nn.AdaptiveAvgPool1d(1) - self.output_nc = descriptor_nc - - self.fc_roll = nn.Linear(descriptor_nc, num_bins) - self.fc_pitch = nn.Linear(descriptor_nc, num_bins) - self.fc_yaw = nn.Linear(descriptor_nc, num_bins) - self.fc_t = nn.Linear(descriptor_nc, 3) - self.fc_exp = nn.Linear(descriptor_nc, 3*num_kp) - - def forward(self, input_3dmm): - out = self.first(input_3dmm) - for i in range(self.layer): - model = getattr(self, 'encoder' + str(i)) - out = model(out) + out[:,:,3:-3] - out = self.pooling(out) - out = out.view(out.shape[0], -1) - #print('out:', out.shape) - - yaw = self.fc_yaw(out) - pitch = self.fc_pitch(out) - roll = self.fc_roll(out) - t = self.fc_t(out) - exp = self.fc_exp(out) - - return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} \ No newline at end of file diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/config.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/config.py deleted file mode 100644 index 01db96bf9b0be531aa0eaf62fee51543712f8670..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/exp/upernet_global_small/config.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = [ - '../../configs/_base_/models/upernet_uniformer.py', - '../../configs/_base_/datasets/ade20k.py', - '../../configs/_base_/default_runtime.py', - '../../configs/_base_/schedules/schedule_160k.py' -] -model = dict( - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - drop_path_rate=0.25, - windows=False, - hybrid=False - ), - decode_head=dict( - in_channels=[64, 128, 320, 512], - num_classes=150 - ), - auxiliary_head=dict( - in_channels=320, - num_classes=150 - )) - -# AdamW optimizer, no weight decay for position embedding & layer norm in backbone -optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -data=dict(samples_per_gpu=2) \ No newline at end of file diff --git a/spaces/kkinc/gsdf-Counterfeit-V2.5/app.py b/spaces/kkinc/gsdf-Counterfeit-V2.5/app.py deleted file mode 100644 index 3e61c8452c0bc94ea6cf8e7fd4fab00c30fccba4..0000000000000000000000000000000000000000 --- a/spaces/kkinc/gsdf-Counterfeit-V2.5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gsdf/Counterfeit-V2.5").launch() \ No newline at end of file diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/roberta/preprocess_RACE.sh b/spaces/koajoel/PolyFormer/fairseq/examples/roberta/preprocess_RACE.sh deleted file mode 100644 index 932d2ab6e521fecc7d0297f26a8c43857541ef3b..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/roberta/preprocess_RACE.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -# data should be downloaded and processed with reprocess_RACE.py -if [[ $# -ne 2 ]]; then - echo "Run as following:" - echo "./examples/roberta/preprocess_RACE.sh " - exit 1 -fi - -RACE_DATA_FOLDER=$1 -OUT_DATA_FOLDER=$2 - -# download bpe encoder.json, vocabulary and fairseq dictionary -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt' - -SPLITS="train dev test-middle test-high" -INPUT_TYPES="input0 input1 input2 input3 input4" -for INPUT_TYPE in $INPUT_TYPES -do - for SPLIT in $SPLITS - do - echo "BPE encoding $SPLIT/$INPUT_TYPE" - python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "$RACE_DATA_FOLDER/$SPLIT.$INPUT_TYPE" \ - --outputs "$RACE_DATA_FOLDER/$SPLIT.$INPUT_TYPE.bpe" \ - --workers 10 \ - --keep-empty; - - done -done - -for INPUT_TYPE in $INPUT_TYPES - do - LANG="input$INPUT_TYPE" - fairseq-preprocess \ - --only-source \ - --trainpref "$RACE_DATA_FOLDER/train.$INPUT_TYPE.bpe" \ - --validpref "$RACE_DATA_FOLDER/dev.$INPUT_TYPE.bpe" \ - --testpref "$RACE_DATA_FOLDER/test-middle.$INPUT_TYPE.bpe,$RACE_DATA_FOLDER/test-high.$INPUT_TYPE.bpe" \ - --destdir "$OUT_DATA_FOLDER/$INPUT_TYPE" \ - --workers 10 \ - --srcdict dict.txt; -done - -rm -rf "$OUT_DATA_FOLDER/label" -mkdir -p "$OUT_DATA_FOLDER/label" -cp "$RACE_DATA_FOLDER/train.label" "$OUT_DATA_FOLDER/label/" -cp "$RACE_DATA_FOLDER/dev.label" "$OUT_DATA_FOLDER/label/valid.label" -cp "$RACE_DATA_FOLDER/test-middle.label" "$OUT_DATA_FOLDER/label/test.label" -cp "$RACE_DATA_FOLDER/test-high.label" "$OUT_DATA_FOLDER/label/test1.label" diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attrs/validators.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attrs/validators.py deleted file mode 100644 index ab2c9b3024714d3b1caeb2f0773a0274dfc10f01..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attrs/validators.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: MIT - -from attr.validators import * # noqa diff --git a/spaces/leave7/kazunaAI2.0/hubert/__init__ .py b/spaces/leave7/kazunaAI2.0/hubert/__init__ .py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/liangxiaohua/bingo/README.md b/spaces/liangxiaohua/bingo/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/liangxiaohua/bingo/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
    - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
    - - diff --git a/spaces/lithiumice/SadTalker/src/facerender/modules/keypoint_detector.py b/spaces/lithiumice/SadTalker/src/facerender/modules/keypoint_detector.py deleted file mode 100644 index 62a38a962b2f1a4326aac771aced353ec5e22a96..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/facerender/modules/keypoint_detector.py +++ /dev/null @@ -1,179 +0,0 @@ -from torch import nn -import torch -import torch.nn.functional as F - -from src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d -from src.facerender.modules.util import KPHourglass, make_coordinate_grid, AntiAliasInterpolation2d, ResBottleneck - - -class KPDetector(nn.Module): - """ - Detecting canonical keypoints. Return keypoint position and jacobian near each keypoint. - """ - - def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, reshape_channel, reshape_depth, - num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False): - super(KPDetector, self).__init__() - - self.predictor = KPHourglass(block_expansion, in_features=image_channel, - max_features=max_features, reshape_features=reshape_channel, reshape_depth=reshape_depth, num_blocks=num_blocks) - - # self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=7, padding=3) - self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=3, padding=1) - - if estimate_jacobian: - self.num_jacobian_maps = 1 if single_jacobian_map else num_kp - # self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=7, padding=3) - self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=3, padding=1) - ''' - initial as: - [[1 0 0] - [0 1 0] - [0 0 1]] - ''' - self.jacobian.weight.data.zero_() - self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)) - else: - self.jacobian = None - - self.temperature = temperature - self.scale_factor = scale_factor - if self.scale_factor != 1: - self.down = AntiAliasInterpolation2d(image_channel, self.scale_factor) - - def gaussian2kp(self, heatmap): - """ - Extract the mean from a heatmap - """ - shape = heatmap.shape - heatmap = heatmap.unsqueeze(-1) - grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0) - value = (heatmap * grid).sum(dim=(2, 3, 4)) - kp = {'value': value} - - return kp - - def forward(self, x): - if self.scale_factor != 1: - x = self.down(x) - - feature_map = self.predictor(x) - prediction = self.kp(feature_map) - - final_shape = prediction.shape - heatmap = prediction.view(final_shape[0], final_shape[1], -1) - heatmap = F.softmax(heatmap / self.temperature, dim=2) - heatmap = heatmap.view(*final_shape) - - out = self.gaussian2kp(heatmap) - - if self.jacobian is not None: - jacobian_map = self.jacobian(feature_map) - jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 9, final_shape[2], - final_shape[3], final_shape[4]) - heatmap = heatmap.unsqueeze(2) - - jacobian = heatmap * jacobian_map - jacobian = jacobian.view(final_shape[0], final_shape[1], 9, -1) - jacobian = jacobian.sum(dim=-1) - jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 3, 3) - out['jacobian'] = jacobian - - return out - - -class HEEstimator(nn.Module): - """ - Estimating head pose and expression. - """ - - def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, num_bins=66, estimate_jacobian=True): - super(HEEstimator, self).__init__() - - self.conv1 = nn.Conv2d(in_channels=image_channel, out_channels=block_expansion, kernel_size=7, padding=3, stride=2) - self.norm1 = BatchNorm2d(block_expansion, affine=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.conv2 = nn.Conv2d(in_channels=block_expansion, out_channels=256, kernel_size=1) - self.norm2 = BatchNorm2d(256, affine=True) - - self.block1 = nn.Sequential() - for i in range(3): - self.block1.add_module('b1_'+ str(i), ResBottleneck(in_features=256, stride=1)) - - self.conv3 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1) - self.norm3 = BatchNorm2d(512, affine=True) - self.block2 = ResBottleneck(in_features=512, stride=2) - - self.block3 = nn.Sequential() - for i in range(3): - self.block3.add_module('b3_'+ str(i), ResBottleneck(in_features=512, stride=1)) - - self.conv4 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1) - self.norm4 = BatchNorm2d(1024, affine=True) - self.block4 = ResBottleneck(in_features=1024, stride=2) - - self.block5 = nn.Sequential() - for i in range(5): - self.block5.add_module('b5_'+ str(i), ResBottleneck(in_features=1024, stride=1)) - - self.conv5 = nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=1) - self.norm5 = BatchNorm2d(2048, affine=True) - self.block6 = ResBottleneck(in_features=2048, stride=2) - - self.block7 = nn.Sequential() - for i in range(2): - self.block7.add_module('b7_'+ str(i), ResBottleneck(in_features=2048, stride=1)) - - self.fc_roll = nn.Linear(2048, num_bins) - self.fc_pitch = nn.Linear(2048, num_bins) - self.fc_yaw = nn.Linear(2048, num_bins) - - self.fc_t = nn.Linear(2048, 3) - - self.fc_exp = nn.Linear(2048, 3*num_kp) - - def forward(self, x): - out = self.conv1(x) - out = self.norm1(out) - out = F.relu(out) - out = self.maxpool(out) - - out = self.conv2(out) - out = self.norm2(out) - out = F.relu(out) - - out = self.block1(out) - - out = self.conv3(out) - out = self.norm3(out) - out = F.relu(out) - out = self.block2(out) - - out = self.block3(out) - - out = self.conv4(out) - out = self.norm4(out) - out = F.relu(out) - out = self.block4(out) - - out = self.block5(out) - - out = self.conv5(out) - out = self.norm5(out) - out = F.relu(out) - out = self.block6(out) - - out = self.block7(out) - - out = F.adaptive_avg_pool2d(out, 1) - out = out.view(out.shape[0], -1) - - yaw = self.fc_roll(out) - pitch = self.fc_pitch(out) - roll = self.fc_yaw(out) - t = self.fc_t(out) - exp = self.fc_exp(out) - - return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} - diff --git a/spaces/ma-xu/LIVE/pydiffvg/parse_svg.py b/spaces/ma-xu/LIVE/pydiffvg/parse_svg.py deleted file mode 100644 index fb1f3fc286074f3cd82b37baffbdd00440b72a8a..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pydiffvg/parse_svg.py +++ /dev/null @@ -1,583 +0,0 @@ -import torch -import xml.etree.ElementTree as etree -import numpy as np -import diffvg -import os -import pydiffvg -import svgpathtools -import svgpathtools.parser -import re -import warnings -import cssutils -import logging -import matplotlib.colors -cssutils.log.setLevel(logging.ERROR) - -def remove_namespaces(s): - """ - {...} ... -> ... - """ - return re.sub('{.*}', '', s) - -def parse_style(s, defs): - style_dict = {} - for e in s.split(';'): - key_value = e.split(':') - if len(key_value) == 2: - key = key_value[0].strip() - value = key_value[1].strip() - if key == 'fill' or key == 'stroke': - # Special case: convert colors into tensor in definitions so - # that different shapes can share the same color - value = parse_color(value, defs) - style_dict[key] = value - return style_dict - -def parse_hex(s): - """ - Hex to tuple - """ - s = s.lstrip('#') - if len(s) == 3: - s = s[0] + s[0] + s[1] + s[1] + s[2] + s[2] - rgb = tuple(int(s[i:i+2], 16) for i in (0, 2, 4)) - # sRGB to RGB - # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) - return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 1.0) - -def parse_int(s): - """ - trim alphabets - """ - return int(float(''.join(i for i in s if (not i.isalpha())))) - -def parse_color(s, defs): - if s is None: - return None - if isinstance(s, torch.Tensor): - return s - s = s.lstrip(' ') - color = torch.tensor([0.0, 0.0, 0.0, 1.0]) - if s[0] == '#': - color[:3] = parse_hex(s) - elif s[:3] == 'url': - # url(#id) - color = defs[s[4:-1].lstrip('#')] - elif s == 'none': - color = None - elif s[:4] == 'rgb(': - rgb = s[4:-1].split(',') - color = torch.tensor([int(rgb[0]) / 255.0, int(rgb[1]) / 255.0, int(rgb[2]) / 255.0, 1.0]) - elif s == 'none': - return None - else: - try : - rgba = matplotlib.colors.to_rgba(s) - color = torch.tensor(rgba) - except ValueError : - warnings.warn('Unknown color command ' + s) - return color - -# https://github.com/mathandy/svgpathtools/blob/7ebc56a831357379ff22216bec07e2c12e8c5bc6/svgpathtools/parser.py -def _parse_transform_substr(transform_substr): - type_str, value_str = transform_substr.split('(') - value_str = value_str.replace(',', ' ') - values = list(map(float, filter(None, value_str.split(' ')))) - - transform = np.identity(3) - if 'matrix' in type_str: - transform[0:2, 0:3] = np.array([values[0:6:2], values[1:6:2]]) - elif 'translate' in transform_substr: - transform[0, 2] = values[0] - if len(values) > 1: - transform[1, 2] = values[1] - elif 'scale' in transform_substr: - x_scale = values[0] - y_scale = values[1] if (len(values) > 1) else x_scale - transform[0, 0] = x_scale - transform[1, 1] = y_scale - elif 'rotate' in transform_substr: - angle = values[0] * np.pi / 180.0 - if len(values) == 3: - offset = values[1:3] - else: - offset = (0, 0) - tf_offset = np.identity(3) - tf_offset[0:2, 2:3] = np.array([[offset[0]], [offset[1]]]) - tf_rotate = np.identity(3) - tf_rotate[0:2, 0:2] = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) - tf_offset_neg = np.identity(3) - tf_offset_neg[0:2, 2:3] = np.array([[-offset[0]], [-offset[1]]]) - - transform = tf_offset.dot(tf_rotate).dot(tf_offset_neg) - elif 'skewX' in transform_substr: - transform[0, 1] = np.tan(values[0] * np.pi / 180.0) - elif 'skewY' in transform_substr: - transform[1, 0] = np.tan(values[0] * np.pi / 180.0) - else: - # Return an identity matrix if the type of transform is unknown, and warn the user - warnings.warn('Unknown SVG transform type: {0}'.format(type_str)) - return transform - -def parse_transform(transform_str): - """ - Converts a valid SVG transformation string into a 3x3 matrix. - If the string is empty or null, this returns a 3x3 identity matrix - """ - if not transform_str: - return np.identity(3) - elif not isinstance(transform_str, str): - raise TypeError('Must provide a string to parse') - - total_transform = np.identity(3) - transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty - for substr in transform_substrs: - total_transform = total_transform.dot(_parse_transform_substr(substr)) - - return torch.from_numpy(total_transform).type(torch.float32) - -def parse_linear_gradient(node, transform, defs): - begin = torch.tensor([0.0, 0.0]) - end = torch.tensor([0.0, 0.0]) - offsets = [] - stop_colors = [] - # Inherit from parent - for key in node.attrib: - if remove_namespaces(key) == 'href': - value = node.attrib[key] - parent = defs[value.lstrip('#')] - begin = parent.begin - end = parent.end - offsets = parent.offsets - stop_colors = parent.stop_colors - - for attrib in node.attrib: - attrib = remove_namespaces(attrib) - if attrib == 'x1': - begin[0] = float(node.attrib['x1']) - elif attrib == 'y1': - begin[1] = float(node.attrib['y1']) - elif attrib == 'x2': - end[0] = float(node.attrib['x2']) - elif attrib == 'y2': - end[1] = float(node.attrib['y2']) - elif attrib == 'gradientTransform': - transform = transform @ parse_transform(node.attrib['gradientTransform']) - - begin = transform @ torch.cat((begin, torch.ones([1]))) - begin = begin / begin[2] - begin = begin[:2] - end = transform @ torch.cat((end, torch.ones([1]))) - end = end / end[2] - end = end[:2] - - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'stop': - offset = float(child.attrib['offset']) - color = [0.0, 0.0, 0.0, 1.0] - if 'stop-color' in child.attrib: - c = parse_color(child.attrib['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in child.attrib: - color[3] = float(child.attrib['stop-opacity']) - if 'style' in child.attrib: - style = parse_style(child.attrib['style'], defs) - if 'stop-color' in style: - c = parse_color(style['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in style: - color[3] = float(style['stop-opacity']) - offsets.append(offset) - stop_colors.append(color) - if isinstance(offsets, list): - offsets = torch.tensor(offsets) - if isinstance(stop_colors, list): - stop_colors = torch.tensor(stop_colors) - - return pydiffvg.LinearGradient(begin, end, offsets, stop_colors) - - -def parse_radial_gradient(node, transform, defs): - begin = torch.tensor([0.0, 0.0]) - end = torch.tensor([0.0, 0.0]) - center = torch.tensor([0.0, 0.0]) - radius = torch.tensor([0.0, 0.0]) - offsets = [] - stop_colors = [] - # Inherit from parent - for key in node.attrib: - if remove_namespaces(key) == 'href': - value = node.attrib[key] - parent = defs[value.lstrip('#')] - begin = parent.begin - end = parent.end - offsets = parent.offsets - stop_colors = parent.stop_colors - - for attrib in node.attrib: - attrib = remove_namespaces(attrib) - if attrib == 'cx': - center[0] = float(node.attrib['cx']) - elif attrib == 'cy': - center[1] = float(node.attrib['cy']) - elif attrib == 'fx': - radius[0] = float(node.attrib['fx']) - elif attrib == 'fy': - radius[1] = float(node.attrib['fy']) - elif attrib == 'fr': - radius[0] = float(node.attrib['fr']) - radius[1] = float(node.attrib['fr']) - elif attrib == 'gradientTransform': - transform = transform @ parse_transform(node.attrib['gradientTransform']) - - # TODO: this is incorrect - center = transform @ torch.cat((center, torch.ones([1]))) - center = center / center[2] - center = center[:2] - - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'stop': - offset = float(child.attrib['offset']) - color = [0.0, 0.0, 0.0, 1.0] - if 'stop-color' in child.attrib: - c = parse_color(child.attrib['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in child.attrib: - color[3] = float(child.attrib['stop-opacity']) - if 'style' in child.attrib: - style = parse_style(child.attrib['style'], defs) - if 'stop-color' in style: - c = parse_color(style['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in style: - color[3] = float(style['stop-opacity']) - offsets.append(offset) - stop_colors.append(color) - if isinstance(offsets, list): - offsets = torch.tensor(offsets) - if isinstance(stop_colors, list): - stop_colors = torch.tensor(stop_colors) - - return pydiffvg.RadialGradient(begin, end, offsets, stop_colors) - -def parse_stylesheet(node, transform, defs): - # collect CSS classes - sheet = cssutils.parseString(node.text) - for rule in sheet: - if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): - name = rule.selectorText - if len(name) >= 2 and name[0] == '.': - defs[name[1:]] = parse_style(rule.style.getCssText(), defs) - return defs - -def parse_defs(node, transform, defs): - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'linearGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) - elif tag == 'radialGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) - elif tag == 'style': - defs = parse_stylesheet(child, transform, defs) - return defs - -def parse_common_attrib(node, transform, fill_color, defs): - attribs = {} - if 'class' in node.attrib: - attribs.update(defs[node.attrib['class']]) - attribs.update(node.attrib) - - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - - stroke_color = None - stroke_width = torch.tensor(0.5) - use_even_odd_rule = False - - new_transform = transform - if 'transform' in attribs: - new_transform = transform @ parse_transform(attribs['transform']) - if 'fill' in attribs: - fill_color = parse_color(attribs['fill'], defs) - fill_opacity = 1.0 - if 'fill-opacity' in attribs: - fill_opacity *= float(attribs['fill-opacity']) - if 'opacity' in attribs: - fill_opacity *= float(attribs['opacity']) - # Ignore opacity if the color is a gradient - if isinstance(fill_color, torch.Tensor): - fill_color[3] = fill_opacity - - if 'fill-rule' in attribs: - if attribs['fill-rule'] == "evenodd": - use_even_odd_rule = True - elif attribs['fill-rule'] == "nonzero": - use_even_odd_rule = False - else: - warnings.warn('Unknown fill-rule: {}'.format(attribs['fill-rule'])) - - if 'stroke' in attribs: - stroke_color = parse_color(attribs['stroke'], defs) - - if 'stroke-width' in attribs: - stroke_width = attribs['stroke-width'] - if stroke_width[-2:] == 'px': - stroke_width = stroke_width[:-2] - stroke_width = torch.tensor(float(stroke_width) / 2.0) - - if 'style' in attribs: - style = parse_style(attribs['style'], defs) - if 'fill' in style: - fill_color = parse_color(style['fill'], defs) - fill_opacity = 1.0 - if 'fill-opacity' in style: - fill_opacity *= float(style['fill-opacity']) - if 'opacity' in style: - fill_opacity *= float(style['opacity']) - if 'fill-rule' in style: - if style['fill-rule'] == "evenodd": - use_even_odd_rule = True - elif style['fill-rule'] == "nonzero": - use_even_odd_rule = False - else: - warnings.warn('Unknown fill-rule: {}'.format(style['fill-rule'])) - # Ignore opacity if the color is a gradient - if isinstance(fill_color, torch.Tensor): - fill_color[3] = fill_opacity - if 'stroke' in style: - if style['stroke'] != 'none': - stroke_color = parse_color(style['stroke'], defs) - # Ignore opacity if the color is a gradient - if isinstance(stroke_color, torch.Tensor): - if 'stroke-opacity' in style: - stroke_color[3] = float(style['stroke-opacity']) - if 'opacity' in style: - stroke_color[3] *= float(style['opacity']) - if 'stroke-width' in style: - stroke_width = style['stroke-width'] - if stroke_width[-2:] == 'px': - stroke_width = stroke_width[:-2] - stroke_width = torch.tensor(float(stroke_width) / 2.0) - - if isinstance(fill_color, pydiffvg.LinearGradient): - fill_color.begin = new_transform @ torch.cat((fill_color.begin, torch.ones([1]))) - fill_color.begin = fill_color.begin / fill_color.begin[2] - fill_color.begin = fill_color.begin[:2] - fill_color.end = new_transform @ torch.cat((fill_color.end, torch.ones([1]))) - fill_color.end = fill_color.end / fill_color.end[2] - fill_color.end = fill_color.end[:2] - if isinstance(stroke_color, pydiffvg.LinearGradient): - stroke_color.begin = new_transform @ torch.cat((stroke_color.begin, torch.ones([1]))) - stroke_color.begin = stroke_color.begin / stroke_color.begin[2] - stroke_color.begin = stroke_color.begin[:2] - stroke_color.end = new_transform @ torch.cat((stroke_color.end, torch.ones([1]))) - stroke_color.end = stroke_color.end / stroke_color.end[2] - stroke_color.end = stroke_color.end[:2] - if 'filter' in style: - print('*** WARNING ***: Ignoring filter for path with id "{}"'.format(name)) - - return new_transform, fill_color, stroke_color, stroke_width, use_even_odd_rule - -def is_shape(tag): - return tag == 'path' or tag == 'polygon' or tag == 'line' or tag == 'circle' or tag == 'rect' - -def parse_shape(node, transform, fill_color, shapes, shape_groups, defs): - tag = remove_namespaces(node.tag) - new_transform, new_fill_color, stroke_color, stroke_width, use_even_odd_rule = \ - parse_common_attrib(node, transform, fill_color, defs) - if tag == 'path': - d = node.attrib['d'] - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - force_closing = new_fill_color is not None - paths = pydiffvg.from_svg_path(d, new_transform, force_closing) - for idx, path in enumerate(paths): - assert(path.points.shape[1] == 2) - path.stroke_width = stroke_width - path.source_id = name - path.id = "{}-{}".format(name,idx) if len(paths)>1 else name - prev_shapes_size = len(shapes) - shapes = shapes + paths - shape_ids = torch.tensor(list(range(prev_shapes_size, len(shapes)))) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - id = name)) - elif tag == 'polygon': - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - force_closing = new_fill_color is not None - pts = node.attrib['points'].strip() - pts = pts.split(' ') - # import ipdb; ipdb.set_trace() - pts = [[float(y) for y in re.split(',| ', x)] for x in pts if x] - pts = torch.tensor(pts, dtype=torch.float32).view(-1, 2) - polygon = pydiffvg.Polygon(pts, force_closing) - polygon.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(polygon) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform, - id = name)) - elif tag == 'line': - x1 = float(node.attrib['x1']) - y1 = float(node.attrib['y1']) - x2 = float(node.attrib['x2']) - y2 = float(node.attrib['y2']) - p1 = torch.tensor([x1, y1]) - p2 = torch.tensor([x2, y2]) - points = torch.stack((p1, p2)) - line = pydiffvg.Polygon(points, False) - line.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(line) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - elif tag == 'circle': - radius = float(node.attrib['r']) - cx = float(node.attrib['cx']) - cy = float(node.attrib['cy']) - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - center = torch.tensor([cx, cy]) - circle = pydiffvg.Circle(radius = torch.tensor(radius), - center = center) - circle.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(circle) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - elif tag == 'ellipse': - rx = float(node.attrib['rx']) - ry = float(node.attrib['ry']) - cx = float(node.attrib['cx']) - cy = float(node.attrib['cy']) - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - center = torch.tensor([cx, cy]) - circle = pydiffvg.Circle(radius = torch.tensor(radius), - center = center) - circle.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(circle) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - elif tag == 'rect': - x = 0.0 - y = 0.0 - if x in node.attrib: - x = float(node.attrib['x']) - if y in node.attrib: - y = float(node.attrib['y']) - w = float(node.attrib['width']) - h = float(node.attrib['height']) - p_min = torch.tensor([x, y]) - p_max = torch.tensor([x + w, x + h]) - rect = pydiffvg.Rect(p_min = p_min, p_max = p_max) - rect.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(rect) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - return shapes, shape_groups - -def parse_group(node, transform, fill_color, shapes, shape_groups, defs): - if 'transform' in node.attrib: - transform = transform @ parse_transform(node.attrib['transform']) - if 'fill' in node.attrib: - fill_color = parse_color(node.attrib['fill'], defs) - for child in node: - tag = remove_namespaces(child.tag) - if is_shape(tag): - shapes, shape_groups = parse_shape(\ - child, transform, fill_color, shapes, shape_groups, defs) - elif tag == 'g': - shapes, shape_groups = parse_group(\ - child, transform, fill_color, shapes, shape_groups, defs) - return shapes, shape_groups - -def parse_scene(node): - canvas_width = -1 - canvas_height = -1 - defs = {} - shapes = [] - shape_groups = [] - fill_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) - transform = torch.eye(3) - if 'viewBox' in node.attrib: - view_box_array = node.attrib['viewBox'].split() - canvas_width = parse_int(view_box_array[2]) - canvas_height = parse_int(view_box_array[3]) - else: - if 'width' in node.attrib: - canvas_width = parse_int(node.attrib['width']) - else: - print('Warning: Can\'t find canvas width.') - if 'height' in node.attrib: - canvas_height = parse_int(node.attrib['height']) - else: - print('Warning: Can\'t find canvas height.') - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'defs': - defs = parse_defs(child, transform, defs) - elif tag == 'style': - defs = parse_stylesheet(child, transform, defs) - elif tag == 'linearGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) - elif tag == 'radialGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) - elif is_shape(tag): - shapes, shape_groups = parse_shape(\ - child, transform, fill_color, shapes, shape_groups, defs) - elif tag == 'g': - shapes, shape_groups = parse_group(\ - child, transform, fill_color, shapes, shape_groups, defs) - return canvas_width, canvas_height, shapes, shape_groups - -def svg_to_scene(filename): - """ - Load from a SVG file and convert to PyTorch tensors. - """ - - tree = etree.parse(filename) - root = tree.getroot() - cwd = os.getcwd() - if (os.path.dirname(filename) != ''): - os.chdir(os.path.dirname(filename)) - ret = parse_scene(root) - os.chdir(cwd) - return ret diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/complex/catrigf.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/complex/catrigf.h deleted file mode 100644 index aa924717a7c30e380dcbaf8fe9d1a69b52c4f27e..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/complex/catrigf.h +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * Copyright 2013 Filipe RNC Maia - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*- - * Copyright (c) 2012 Stephen Montgomery-Smith - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * Adapted from FreeBSD by Filipe Maia : - * freebsd/lib/msun/src/catrig.c - */ - -#pragma once - -#include -#include -#include -#include -#include - -namespace thrust{ -namespace detail{ -namespace complex{ - -using thrust::complex; - -__host__ __device__ inline - complex clog_for_large_values(complex z); - -/* - * The algorithm is very close to that in "Implementing the complex arcsine - * and arccosine functions using exception handling" by T. E. Hull, Thomas F. - * Fairgrieve, and Ping Tak Peter Tang, published in ACM Transactions on - * Mathematical Software, Volume 23 Issue 3, 1997, Pages 299-335, - * http://dl.acm.org/citation.cfm?id=275324. - * - * See catrig.c for complete comments. - * - * XXX comments were removed automatically, and even short ones on the right - * of statements were removed (all of them), contrary to normal style. Only - * a few comments on the right of declarations remain. - */ - -__host__ __device__ -inline float -f(float a, float b, float hypot_a_b) -{ - if (b < 0.0f) - return ((hypot_a_b - b) / 2.0f); - if (b == 0.0f) - return (a / 2.0f); - return (a * a / (hypot_a_b + b) / 2.0f); -} - -/* - * All the hard work is contained in this function. - * x and y are assumed positive or zero, and less than RECIP_EPSILON. - * Upon return: - * rx = Re(casinh(z)) = -Im(cacos(y + I*x)). - * B_is_usable is set to 1 if the value of B is usable. - * If B_is_usable is set to 0, sqrt_A2my2 = sqrt(A*A - y*y), and new_y = y. - * If returning sqrt_A2my2 has potential to result in an underflow, it is - * rescaled, and new_y is similarly rescaled. - */ -__host__ __device__ -inline void -do_hard_work(float x, float y, float *rx, int *B_is_usable, float *B, - float *sqrt_A2my2, float *new_y) -{ - float R, S, A; /* A, B, R, and S are as in Hull et al. */ - float Am1, Amy; /* A-1, A-y. */ - const float A_crossover = 10; /* Hull et al suggest 1.5, but 10 works better */ - const float FOUR_SQRT_MIN = 4.336808689942017736029811e-19f;; /* =0x1p-61; >= 4 * sqrt(FLT_MIN) */ - const float B_crossover = 0.6417f; /* suggested by Hull et al */ - R = hypotf(x, y + 1); - S = hypotf(x, y - 1); - - A = (R + S) / 2; - if (A < 1) - A = 1; - - if (A < A_crossover) { - if (y == 1 && x < FLT_EPSILON * FLT_EPSILON / 128) { - *rx = sqrtf(x); - } else if (x >= FLT_EPSILON * fabsf(y - 1)) { - Am1 = f(x, 1 + y, R) + f(x, 1 - y, S); - *rx = log1pf(Am1 + sqrtf(Am1 * (A + 1))); - } else if (y < 1) { - *rx = x / sqrtf((1 - y) * (1 + y)); - } else { - *rx = log1pf((y - 1) + sqrtf((y - 1) * (y + 1))); - } - } else { - *rx = logf(A + sqrtf(A * A - 1)); - } - - *new_y = y; - - if (y < FOUR_SQRT_MIN) { - *B_is_usable = 0; - *sqrt_A2my2 = A * (2 / FLT_EPSILON); - *new_y = y * (2 / FLT_EPSILON); - return; - } - - *B = y / A; - *B_is_usable = 1; - - if (*B > B_crossover) { - *B_is_usable = 0; - if (y == 1 && x < FLT_EPSILON / 128) { - *sqrt_A2my2 = sqrtf(x) * sqrtf((A + y) / 2); - } else if (x >= FLT_EPSILON * fabsf(y - 1)) { - Amy = f(x, y + 1, R) + f(x, y - 1, S); - *sqrt_A2my2 = sqrtf(Amy * (A + y)); - } else if (y > 1) { - *sqrt_A2my2 = x * (4 / FLT_EPSILON / FLT_EPSILON) * y / - sqrtf((y + 1) * (y - 1)); - *new_y = y * (4 / FLT_EPSILON / FLT_EPSILON); - } else { - *sqrt_A2my2 = sqrtf((1 - y) * (1 + y)); - } - } - -} - -__host__ __device__ inline -complex -casinhf(complex z) -{ - float x, y, ax, ay, rx, ry, B, sqrt_A2my2, new_y; - int B_is_usable; - complex w; - const float RECIP_EPSILON = 1.0 / FLT_EPSILON; - const float m_ln2 = 6.9314718055994531e-1f; /* 0x162e42fefa39ef.0p-53 */ - x = z.real(); - y = z.imag(); - ax = fabsf(x); - ay = fabsf(y); - - if (isnan(x) || isnan(y)) { - if (isinf(x)) - return (complex(x, y + y)); - if (isinf(y)) - return (complex(y, x + x)); - if (y == 0) - return (complex(x + x, y)); - return (complex(x + 0.0f + (y + 0), x + 0.0f + (y + 0))); - } - - if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { - if (signbit(x) == 0) - w = clog_for_large_values(z) + m_ln2; - else - w = clog_for_large_values(-z) + m_ln2; - return (complex(copysignf(w.real(), x), - copysignf(w.imag(), y))); - } - - if (x == 0 && y == 0) - return (z); - - raise_inexact(); - - const float SQRT_6_EPSILON = 8.4572793338e-4f; /* 0xddb3d7.0p-34 */ - if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) - return (z); - - do_hard_work(ax, ay, &rx, &B_is_usable, &B, &sqrt_A2my2, &new_y); - if (B_is_usable) - ry = asinf(B); - else - ry = atan2f(new_y, sqrt_A2my2); - return (complex(copysignf(rx, x), copysignf(ry, y))); -} - -__host__ __device__ inline -complex casinf(complex z) -{ - complex w = casinhf(complex(z.imag(), z.real())); - - return (complex(w.imag(), w.real())); -} - -__host__ __device__ inline -complex cacosf(complex z) -{ - float x, y, ax, ay, rx, ry, B, sqrt_A2mx2, new_x; - int sx, sy; - int B_is_usable; - complex w; - const float pio2_hi = 1.5707963267948966e0f; /* 0x1921fb54442d18.0p-52 */ - const volatile float pio2_lo = 6.1232339957367659e-17f; /* 0x11a62633145c07.0p-106 */ - const float m_ln2 = 6.9314718055994531e-1f; /* 0x162e42fefa39ef.0p-53 */ - - x = z.real(); - y = z.imag(); - sx = signbit(x); - sy = signbit(y); - ax = fabsf(x); - ay = fabsf(y); - - if (isnan(x) || isnan(y)) { - if (isinf(x)) - return (complex(y + y, -infinity())); - if (isinf(y)) - return (complex(x + x, -y)); - if (x == 0) - return (complex(pio2_hi + pio2_lo, y + y)); - return (complex(x + 0.0f + (y + 0), x + 0.0f + (y + 0))); - } - - const float RECIP_EPSILON = 1.0 / FLT_EPSILON; - if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { - w = clog_for_large_values(z); - rx = fabsf(w.imag()); - ry = w.real() + m_ln2; - if (sy == 0) - ry = -ry; - return (complex(rx, ry)); - } - - if (x == 1 && y == 0) - return (complex(0, -y)); - - raise_inexact(); - - const float SQRT_6_EPSILON = 8.4572793338e-4f; /* 0xddb3d7.0p-34 */ - if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) - return (complex(pio2_hi - (x - pio2_lo), -y)); - - do_hard_work(ay, ax, &ry, &B_is_usable, &B, &sqrt_A2mx2, &new_x); - if (B_is_usable) { - if (sx == 0) - rx = acosf(B); - else - rx = acosf(-B); - } else { - if (sx == 0) - rx = atan2f(sqrt_A2mx2, new_x); - else - rx = atan2f(sqrt_A2mx2, -new_x); - } - if (sy == 0) - ry = -ry; - return (complex(rx, ry)); -} - -__host__ __device__ inline -complex cacoshf(complex z) -{ - complex w; - float rx, ry; - - w = cacosf(z); - rx = w.real(); - ry = w.imag(); - /* cacosh(NaN + I*NaN) = NaN + I*NaN */ - if (isnan(rx) && isnan(ry)) - return (complex(ry, rx)); - /* cacosh(NaN + I*+-Inf) = +Inf + I*NaN */ - /* cacosh(+-Inf + I*NaN) = +Inf + I*NaN */ - if (isnan(rx)) - return (complex(fabsf(ry), rx)); - /* cacosh(0 + I*NaN) = NaN + I*NaN */ - if (isnan(ry)) - return (complex(ry, ry)); - return (complex(fabsf(ry), copysignf(rx, z.imag()))); -} - - /* - * Optimized version of clog() for |z| finite and larger than ~RECIP_EPSILON. - */ -__host__ __device__ inline -complex clog_for_large_values(complex z) -{ - float x, y; - float ax, ay, t; - const float m_e = 2.7182818284590452e0f; /* 0x15bf0a8b145769.0p-51 */ - - x = z.real(); - y = z.imag(); - ax = fabsf(x); - ay = fabsf(y); - if (ax < ay) { - t = ax; - ax = ay; - ay = t; - } - - if (ax > FLT_MAX / 2) - return (complex(logf(hypotf(x / m_e, y / m_e)) + 1, - atan2f(y, x))); - - const float QUARTER_SQRT_MAX = 2.3058430092136939520000000e+18f; /* = 0x1p61; <= sqrt(FLT_MAX) / 4 */ - const float SQRT_MIN = 1.084202172485504434007453e-19f; /* 0x1p-63; >= sqrt(FLT_MIN) */ - if (ax > QUARTER_SQRT_MAX || ay < SQRT_MIN) - return (complex(logf(hypotf(x, y)), atan2f(y, x))); - - return (complex(logf(ax * ax + ay * ay) / 2, atan2f(y, x))); -} - -/* - * ================= - * | catanh, catan | - * ================= - */ - -/* - * sum_squares(x,y) = x*x + y*y (or just x*x if y*y would underflow). - * Assumes x*x and y*y will not overflow. - * Assumes x and y are finite. - * Assumes y is non-negative. - * Assumes fabsf(x) >= FLT_EPSILON. - */ -__host__ __device__ -inline float sum_squares(float x, float y) -{ - const float SQRT_MIN = 1.084202172485504434007453e-19f; /* 0x1p-63; >= sqrt(FLT_MIN) */ - /* Avoid underflow when y is small. */ - if (y < SQRT_MIN) - return (x * x); - - return (x * x + y * y); -} - -__host__ __device__ -inline float real_part_reciprocal(float x, float y) -{ - float scale; - uint32_t hx, hy; - int32_t ix, iy; - - get_float_word(hx, x); - ix = hx & 0x7f800000; - get_float_word(hy, y); - iy = hy & 0x7f800000; - //#define BIAS (FLT_MAX_EXP - 1) - const int BIAS = FLT_MAX_EXP - 1; - //#define CUTOFF (FLT_MANT_DIG / 2 + 1) - const int CUTOFF = (FLT_MANT_DIG / 2 + 1); - if (ix - iy >= CUTOFF << 23 || isinf(x)) - return (1 / x); - if (iy - ix >= CUTOFF << 23) - return (x / y / y); - if (ix <= (BIAS + FLT_MAX_EXP / 2 - CUTOFF) << 23) - return (x / (x * x + y * y)); - set_float_word(scale, 0x7f800000 - ix); - x *= scale; - y *= scale; - return (x / (x * x + y * y) * scale); -} - -#if THRUST_CPP_DIALECT >= 2011 || THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC -__host__ __device__ inline -complex catanhf(complex z) -{ - float x, y, ax, ay, rx, ry; - const volatile float pio2_lo = 6.1232339957367659e-17f; /* 0x11a62633145c07.0p-106 */ - const float pio2_hi = 1.5707963267948966e0f;/* 0x1921fb54442d18.0p-52 */ - - - x = z.real(); - y = z.imag(); - ax = fabsf(x); - ay = fabsf(y); - - - if (y == 0 && ax <= 1) - return (complex(atanhf(x), y)); - - if (x == 0) - return (complex(x, atanf(y))); - - if (isnan(x) || isnan(y)) { - if (isinf(x)) - return (complex(copysignf(0, x), y + y)); - if (isinf(y)) - return (complex(copysignf(0, x), - copysignf(pio2_hi + pio2_lo, y))); - return (complex(x + 0.0f + (y + 0.0f), x + 0.0f + (y + 0.0f))); - } - - const float RECIP_EPSILON = 1.0f / FLT_EPSILON; - if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) - return (complex(real_part_reciprocal(x, y), - copysignf(pio2_hi + pio2_lo, y))); - - const float SQRT_3_EPSILON = 5.9801995673e-4f; /* 0x9cc471.0p-34 */ - if (ax < SQRT_3_EPSILON / 2 && ay < SQRT_3_EPSILON / 2) { - raise_inexact(); - return (z); - } - - const float m_ln2 = 6.9314718056e-1f; /* 0xb17218.0p-24 */ - if (ax == 1 && ay < FLT_EPSILON) - rx = (m_ln2 - logf(ay)) / 2; - else - rx = log1pf(4 * ax / sum_squares(ax - 1, ay)) / 4; - - if (ax == 1) - ry = atan2f(2, -ay) / 2; - else if (ay < FLT_EPSILON) - ry = atan2f(2 * ay, (1 - ax) * (1 + ax)) / 2; - else - ry = atan2f(2 * ay, (1 - ax) * (1 + ax) - ay * ay) / 2; - - return (complex(copysignf(rx, x), copysignf(ry, y))); -} - -__host__ __device__ inline -complexcatanf(complex z){ - complex w = catanhf(complex(z.imag(), z.real())); - return (complex(w.imag(), w.real())); -} -#endif - -} // namespace complex - -} // namespace detail - - -template <> -__host__ __device__ -inline complex acos(const complex& z){ - return detail::complex::cacosf(z); -} - -template <> -__host__ __device__ -inline complex asin(const complex& z){ - return detail::complex::casinf(z); -} - -#if THRUST_CPP_DIALECT >= 2011 || THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC -template <> -__host__ __device__ -inline complex atan(const complex& z){ - return detail::complex::catanf(z); -} -#endif - -template <> -__host__ __device__ -inline complex acosh(const complex& z){ - return detail::complex::cacoshf(z); -} - - -template <> -__host__ __device__ -inline complex asinh(const complex& z){ - return detail::complex::casinhf(z); -} - -#if THRUST_CPP_DIALECT >= 2011 || THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC -template <> -__host__ __device__ -inline complex atanh(const complex& z){ - return detail::complex::catanhf(z); -} -#endif - -} // namespace thrust diff --git a/spaces/ma-xu/LIVE/thrust/thrust/mr/validator.h b/spaces/ma-xu/LIVE/thrust/thrust/mr/validator.h deleted file mode 100644 index 9376ae870b5f6017ef9d27084d580d448fe53e75..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/mr/validator.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include "detail/config.h" -#include "memory_resource.h" - -namespace thrust -{ -namespace mr -{ - -template -struct validator -{ -#if THRUST_CPP_DIALECT >= 2011 - static_assert( - std::is_base_of, MR>::value, - "a type used as a memory resource must derive from memory_resource" - ); -#endif -}; - -template -struct validator2 : private validator, private validator -{ -}; - -template -struct validator2 : private validator -{ -}; - -} // end mr -} // end thrust - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/omp/detail/scan.h b/spaces/ma-xu/LIVE/thrust/thrust/system/omp/detail/scan.h deleted file mode 100644 index f47dbbc3087c613f36de65f704505340bb8a85b0..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/omp/detail/scan.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits scan -#include - diff --git a/spaces/macaodha/batdetect2/bat_detect/train/audio_dataloader.py b/spaces/macaodha/batdetect2/bat_detect/train/audio_dataloader.py deleted file mode 100644 index a36ec0b1b5c746057e80c1b569eb1354f9314333..0000000000000000000000000000000000000000 --- a/spaces/macaodha/batdetect2/bat_detect/train/audio_dataloader.py +++ /dev/null @@ -1,407 +0,0 @@ -import torch -import random -import numpy as np -import copy -import librosa -import torch.nn.functional as F -import torchaudio -import os - -import sys -sys.path.append(os.path.join('..', '..')) -import bat_detect.utils.audio_utils as au - - -def generate_gt_heatmaps(spec_op_shape, sampling_rate, ann, params): - # spec may be resized on input into the network - num_classes = len(params['class_names']) - op_height = spec_op_shape[0] - op_width = spec_op_shape[1] - freq_per_bin = (params['max_freq'] - params['min_freq']) / op_height - - # start and end times - x_pos_start = au.time_to_x_coords(ann['start_times'], sampling_rate, - params['fft_win_length'], params['fft_overlap']) - x_pos_start = (params['resize_factor']*x_pos_start).astype(np.int) - x_pos_end = au.time_to_x_coords(ann['end_times'], sampling_rate, - params['fft_win_length'], params['fft_overlap']) - x_pos_end = (params['resize_factor']*x_pos_end).astype(np.int) - - # location on y axis i.e. frequency - y_pos_low = (ann['low_freqs'] - params['min_freq']) / freq_per_bin - y_pos_low = (op_height - y_pos_low).astype(np.int) - y_pos_high = (ann['high_freqs'] - params['min_freq']) / freq_per_bin - y_pos_high = (op_height - y_pos_high).astype(np.int) - bb_widths = x_pos_end - x_pos_start - bb_heights = (y_pos_low - y_pos_high) - - valid_inds = np.where((x_pos_start >= 0) & (x_pos_start < op_width) & - (y_pos_low >= 0) & (y_pos_low < (op_height-1)))[0] - - ann_aug = {} - ann_aug['x_inds'] = x_pos_start[valid_inds] - ann_aug['y_inds'] = y_pos_low[valid_inds] - keys = ['start_times', 'end_times', 'high_freqs', 'low_freqs', 'class_ids', 'individual_ids'] - for kk in keys: - ann_aug[kk] = ann[kk][valid_inds] - - # if the number of calls is only 1, then it is unique - # TODO would be better if we found these unique calls at the merging stage - if len(ann_aug['individual_ids']) == 1: - ann_aug['individual_ids'][0] = 0 - - y_2d_det = np.zeros((1, op_height, op_width), dtype=np.float32) - y_2d_size = np.zeros((2, op_height, op_width), dtype=np.float32) - # num classes and "background" class - y_2d_classes = np.zeros((num_classes+1, op_height, op_width), dtype=np.float32) - - # create 2D ground truth heatmaps - for ii in valid_inds: - draw_gaussian(y_2d_det[0,:], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma']) - #draw_gaussian(y_2d_det[0,:], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2) - y_2d_size[0, y_pos_low[ii], x_pos_start[ii]] = bb_widths[ii] - y_2d_size[1, y_pos_low[ii], x_pos_start[ii]] = bb_heights[ii] - - cls_id = ann['class_ids'][ii] - if cls_id > -1: - draw_gaussian(y_2d_classes[cls_id, :], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma']) - #draw_gaussian(y_2d_classes[cls_id, :], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2) - - # be careful as this will have a 1.0 places where we have event but dont know gt class - # this will be masked in training anyway - y_2d_classes[num_classes, :] = 1.0 - y_2d_classes.sum(0) - y_2d_classes = y_2d_classes / y_2d_classes.sum(0)[np.newaxis, ...] - y_2d_classes[np.isnan(y_2d_classes)] = 0.0 - - return y_2d_det, y_2d_size, y_2d_classes, ann_aug - - -def draw_gaussian(heatmap, center, sigmax, sigmay=None): - # center is (x, y) - # this edits the heatmap inplace - - if sigmay is None: - sigmay = sigmax - tmp_size = np.maximum(sigmax, sigmay) * 3 - mu_x = int(center[0] + 0.5) - mu_y = int(center[1] + 0.5) - w, h = heatmap.shape[0], heatmap.shape[1] - ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] - br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] - - if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0: - return False - - size = 2 * tmp_size + 1 - x = np.arange(0, size, 1, np.float32) - y = x[:, np.newaxis] - x0 = y0 = size // 2 - #g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) - g = np.exp(- ((x - x0) ** 2)/(2 * sigmax ** 2) - ((y - y0) ** 2)/(2 * sigmay ** 2)) - g_x = max(0, -ul[0]), min(br[0], h) - ul[0] - g_y = max(0, -ul[1]), min(br[1], w) - ul[1] - img_x = max(0, ul[0]), min(br[0], h) - img_y = max(0, ul[1]), min(br[1], w) - heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum( - heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]], - g[g_y[0]:g_y[1], g_x[0]:g_x[1]]) - return True - - -def pad_aray(ip_array, pad_size): - return np.hstack((ip_array, np.ones(pad_size, dtype=np.int)*-1)) - - -def warp_spec_aug(spec, ann, return_spec_for_viz, params): - # This is messy - # Augment spectrogram by randomly stretch and squeezing - # NOTE this also changes the start and stop time in place - - # not taking care of spec for viz - if return_spec_for_viz: - assert False - - delta = params['stretch_squeeze_delta'] - op_size = (spec.shape[1], spec.shape[2]) - resize_fract_r = np.random.rand()*delta*2 - delta + 1.0 - resize_amt = int(spec.shape[2]*resize_fract_r) - if resize_amt >= spec.shape[2]: - spec_r = torch.cat((spec, torch.zeros((1, spec.shape[1], resize_amt-spec.shape[2]), dtype=spec.dtype)), 2) - else: - spec_r = spec[:, :, :resize_amt] - spec = F.interpolate(spec_r.unsqueeze(0), size=op_size, mode='bilinear', align_corners=False).squeeze(0) - ann['start_times'] *= (1.0/resize_fract_r) - ann['end_times'] *= (1.0/resize_fract_r) - return spec - - -def mask_time_aug(spec, params): - # Mask out a random block of time - repeat up to 3 times - # SpecAugment: A Simple Data Augmentation Methodfor Automatic Speech Recognition - fm = torchaudio.transforms.TimeMasking(int(spec.shape[1]*params['mask_max_time_perc'])) - for ii in range(np.random.randint(1, 4)): - spec = fm(spec) - return spec - - -def mask_freq_aug(spec, params): - # Mask out a random frequncy range - repeat up to 3 times - # SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition - fm = torchaudio.transforms.FrequencyMasking(int(spec.shape[1]*params['mask_max_freq_perc'])) - for ii in range(np.random.randint(1, 4)): - spec = fm(spec) - return spec - - -def scale_vol_aug(spec, params): - return spec * np.random.random()*params['spec_amp_scaling'] - - -def echo_aug(audio, sampling_rate, params): - sample_offset = int(params['echo_max_delay']*np.random.random()*sampling_rate) + 1 - audio[:-sample_offset] += np.random.random()*audio[sample_offset:] - return audio - - -def resample_aug(audio, sampling_rate, params): - sampling_rate_old = sampling_rate - sampling_rate = np.random.choice(params['aug_sampling_rates']) - audio = librosa.resample(audio, sampling_rate_old, sampling_rate, res_type='polyphase') - - audio = au.pad_audio(audio, sampling_rate, params['fft_win_length'], - params['fft_overlap'], params['resize_factor'], - params['spec_divide_factor'], params['spec_train_width']) - duration = audio.shape[0] / float(sampling_rate) - return audio, sampling_rate, duration - - -def resample_audio(num_samples, sampling_rate, audio2, sampling_rate2): - if sampling_rate != sampling_rate2: - audio2 = librosa.resample(audio2, sampling_rate2, sampling_rate, res_type='polyphase') - sampling_rate2 = sampling_rate - if audio2.shape[0] < num_samples: - audio2 = np.hstack((audio2, np.zeros((num_samples-audio2.shape[0]), dtype=audio2.dtype))) - elif audio2.shape[0] > num_samples: - audio2 = audio2[:num_samples] - return audio2, sampling_rate2 - - -def combine_audio_aug(audio, sampling_rate, ann, audio2, sampling_rate2, ann2): - - # resample so they are the same - audio2, sampling_rate2 = resample_audio(audio.shape[0], sampling_rate, audio2, sampling_rate2) - - # # set mean and std to be the same - # audio2 = (audio2 - audio2.mean()) - # audio2 = (audio2/audio2.std())*audio.std() - # audio2 = audio2 + audio.mean() - - if ann['annotated'] and (ann2['annotated']) and \ - (sampling_rate2 == sampling_rate) and (audio.shape[0] == audio2.shape[0]): - comb_weight = 0.3 + np.random.random()*0.4 - audio = comb_weight*audio + (1-comb_weight)*audio2 - inds = np.argsort(np.hstack((ann['start_times'], ann2['start_times']))) - for kk in ann.keys(): - - # when combining calls from different files, assume they come from different individuals - if kk == 'individual_ids': - if (ann[kk]>-1).sum() > 0: - ann2[kk][ann2[kk]>-1] += np.max(ann[kk][ann[kk]>-1]) + 1 - - if (kk != 'class_id_file') and (kk != 'annotated'): - ann[kk] = np.hstack((ann[kk], ann2[kk]))[inds] - - return audio, ann - - -class AudioLoader(torch.utils.data.Dataset): - def __init__(self, data_anns_ip, params, dataset_name=None, is_train=False): - - self.data_anns = [] - self.is_train = is_train - self.params = params - self.return_spec_for_viz = False - - for ii in range(len(data_anns_ip)): - dd = copy.deepcopy(data_anns_ip[ii]) - - # filter out unused annotation here - filtered_annotations = [] - for ii, aa in enumerate(dd['annotation']): - - if 'individual' in aa.keys(): - aa['individual'] = int(aa['individual']) - - # if only one call labeled it has to be from the same individual - if len(dd['annotation']) == 1: - aa['individual'] = 0 - - # convert class name into class label - if aa['class'] in self.params['class_names']: - aa['class_id'] = self.params['class_names'].index(aa['class']) - else: - aa['class_id'] = -1 - - if aa['class'] not in self.params['classes_to_ignore']: - filtered_annotations.append(aa) - - dd['annotation'] = filtered_annotations - dd['start_times'] = np.array([aa['start_time'] for aa in dd['annotation']]) - dd['end_times'] = np.array([aa['end_time'] for aa in dd['annotation']]) - dd['high_freqs'] = np.array([float(aa['high_freq']) for aa in dd['annotation']]) - dd['low_freqs'] = np.array([float(aa['low_freq']) for aa in dd['annotation']]) - dd['class_ids'] = np.array([aa['class_id'] for aa in dd['annotation']]).astype(np.int) - dd['individual_ids'] = np.array([aa['individual'] for aa in dd['annotation']]).astype(np.int) - - # file level class name - dd['class_id_file'] = -1 - if 'class_name' in dd.keys(): - if dd['class_name'] in self.params['class_names']: - dd['class_id_file'] = self.params['class_names'].index(dd['class_name']) - - self.data_anns.append(dd) - - ann_cnt = [len(aa['annotation']) for aa in self.data_anns] - self.max_num_anns = 2*np.max(ann_cnt) # x2 because we may be combining files during training - - print('\n') - if dataset_name is not None: - print('Dataset : ' + dataset_name) - if self.is_train: - print('Split type : train') - else: - print('Split type : test') - print('Num files : ' + str(len(self.data_anns))) - print('Num calls : ' + str(np.sum(ann_cnt))) - - - def get_file_and_anns(self, index=None): - - # if no file specified, choose random one - if index == None: - index = np.random.randint(0, len(self.data_anns)) - - audio_file = self.data_anns[index]['file_path'] - sampling_rate, audio_raw = au.load_audio_file(audio_file, self.data_anns[index]['time_exp'], - self.params['target_samp_rate'], self.params['scale_raw_audio']) - - # copy annotation - ann = {} - ann['annotated'] = self.data_anns[index]['annotated'] - ann['class_id_file'] = self.data_anns[index]['class_id_file'] - keys = ['start_times', 'end_times', 'high_freqs', 'low_freqs', 'class_ids', 'individual_ids'] - for kk in keys: - ann[kk] = self.data_anns[index][kk].copy() - - # if train then grab a random crop - if self.is_train: - nfft = int(self.params['fft_win_length']*sampling_rate) - noverlap = int(self.params['fft_overlap']*nfft) - length_samples = self.params['spec_train_width']*(nfft - noverlap) + noverlap - - if audio_raw.shape[0] - length_samples > 0: - sample_crop = np.random.randint(audio_raw.shape[0] - length_samples) - else: - sample_crop = 0 - audio_raw = audio_raw[sample_crop:sample_crop+length_samples] - ann['start_times'] = ann['start_times'] - sample_crop/float(sampling_rate) - ann['end_times'] = ann['end_times'] - sample_crop/float(sampling_rate) - - # pad audio - if self.is_train: - op_spec_target_size = self.params['spec_train_width'] - else: - op_spec_target_size = None - audio_raw = au.pad_audio(audio_raw, sampling_rate, self.params['fft_win_length'], - self.params['fft_overlap'], self.params['resize_factor'], - self.params['spec_divide_factor'], op_spec_target_size) - duration = audio_raw.shape[0] / float(sampling_rate) - - # sort based on time - inds = np.argsort(ann['start_times']) - for kk in ann.keys(): - if (kk != 'class_id_file') and (kk != 'annotated'): - ann[kk] = ann[kk][inds] - - return audio_raw, sampling_rate, duration, ann - - - def __getitem__(self, index): - - # load audio file - audio, sampling_rate, duration, ann = self.get_file_and_anns(index) - - # augment on raw audio - if self.is_train and self.params['augment_at_train']: - # augment - combine with random audio file - if self.params['augment_at_train_combine'] and np.random.random() < self.params['aug_prob']: - audio2, sampling_rate2, duration2, ann2 = self.get_file_and_anns() - audio, ann = combine_audio_aug(audio, sampling_rate, ann, audio2, sampling_rate2, ann2) - - # simulate echo by adding delayed copy of the file - if np.random.random() < self.params['aug_prob']: - audio = echo_aug(audio, sampling_rate, self.params) - - # resample the audio - #if np.random.random() < self.params['aug_prob']: - # audio, sampling_rate, duration = resample_aug(audio, sampling_rate, self.params) - - # create spectrogram - spec, spec_for_viz = au.generate_spectrogram(audio, sampling_rate, self.params, self.return_spec_for_viz) - rsf = self.params['resize_factor'] - spec_op_shape = (int(self.params['spec_height']*rsf), int(spec.shape[1]*rsf)) - - # resize the spec - spec = torch.from_numpy(spec).unsqueeze(0).unsqueeze(0) - spec = F.interpolate(spec, size=spec_op_shape, mode='bilinear', align_corners=False).squeeze(0) - - # augment spectrogram - if self.is_train and self.params['augment_at_train']: - - if np.random.random() < self.params['aug_prob']: - spec = scale_vol_aug(spec, self.params) - - if np.random.random() < self.params['aug_prob']: - spec = warp_spec_aug(spec, ann, self.return_spec_for_viz, self.params) - - if np.random.random() < self.params['aug_prob']: - spec = mask_time_aug(spec, self.params) - - if np.random.random() < self.params['aug_prob']: - spec = mask_freq_aug(spec, self.params) - - outputs = {} - outputs['spec'] = spec - if self.return_spec_for_viz: - outputs['spec_for_viz'] = torch.from_numpy(spec_for_viz).unsqueeze(0) - - # create ground truth heatmaps - outputs['y_2d_det'], outputs['y_2d_size'], outputs['y_2d_classes'], ann_aug =\ - generate_gt_heatmaps(spec_op_shape, sampling_rate, ann, self.params) - - # hack to get around requirement that all vectors are the same length in - # the output batch - pad_size = self.max_num_anns-len(ann_aug['individual_ids']) - outputs['is_valid'] = pad_aray(np.ones(len(ann_aug['individual_ids'])), pad_size) - keys = ['class_ids', 'individual_ids', 'x_inds', 'y_inds', - 'start_times', 'end_times', 'low_freqs', 'high_freqs'] - for kk in keys: - outputs[kk] = pad_aray(ann_aug[kk], pad_size) - - # convert to pytorch - for kk in outputs.keys(): - if type(outputs[kk]) != torch.Tensor: - outputs[kk] = torch.from_numpy(outputs[kk]) - - # scalars - outputs['class_id_file'] = ann['class_id_file'] - outputs['annotated'] = ann['annotated'] - outputs['duration'] = duration - outputs['sampling_rate'] = sampling_rate - outputs['file_id'] = index - - return outputs - - - def __len__(self): - return len(self.data_anns) diff --git a/spaces/marcusj83/MusicGenbruh/audiocraft/__init__.py b/spaces/marcusj83/MusicGenbruh/audiocraft/__init__.py deleted file mode 100644 index 1759733cc109fa348c3f764c5939b5b609521cb3..0000000000000000000000000000000000000000 --- a/spaces/marcusj83/MusicGenbruh/audiocraft/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from . import data, modules, models - -__version__ = '0.0.1' diff --git a/spaces/marioboy/neil-breen/encoder/params_model.py b/spaces/marioboy/neil-breen/encoder/params_model.py deleted file mode 100644 index 3e356472fb5a27f370cb3920976a11d12a76c1b7..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/encoder/params_model.py +++ /dev/null @@ -1,11 +0,0 @@ - -## Model parameters -model_hidden_size = 256 -model_embedding_size = 256 -model_num_layers = 3 - - -## Training parameters -learning_rate_init = 1e-4 -speakers_per_batch = 64 -utterances_per_speaker = 10 diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/face_of_art/predict_landmarks.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/face_of_art/predict_landmarks.py deleted file mode 100644 index 60366134292f572e167361a6de127b5c56dd7870..0000000000000000000000000000000000000000 --- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/face_of_art/predict_landmarks.py +++ /dev/null @@ -1,73 +0,0 @@ -from menpo_functions import * -from deep_heatmaps_model_fusion_net import DeepHeatmapsModel -from scipy.misc import imsave - -# *************** define parameters and paths *************** - -data_dir = '~/AF_dataset2/' -test_data = 'Fernand_Leger' # subdirectory containing portraits for landmark detection (under data_dir) - -use_gt_bb = False # use ground truth bounding box to crop images. if False, use face detector bounding box (relevant -# for challenging, common, full & training sets only) - -out_dir = 'out_pred_landmarks' # directory for saving predicted landmarks -if not os.path.exists(out_dir): - os.mkdir(out_dir) - -model_path = '~/model_foa/deep_heatmaps-60000' # model for estimation stage -pdm_path = 'pdm_clm_models/pdm_models/' # models for correction stage -clm_path = 'pdm_clm_models/clm_models/g_t_all' # model for tuning stage - -outline_tune = False # if true use tuning stage on eyebrows+jaw, else use tuning stage on jaw only -# (see paper for details) - -save_cropped_imgs = False # save input images in their cropped version to out_dir. - -map_landmarks_to_original_image = True # if True, landmark predictions will be mapped to match original -# input image size. otherwise the predicted landmarks will match the cropped version (256x256) of the images - -# *************** load images and model *************** - -# load images - -bb_dir = os.path.join(data_dir, 'Bounding_Boxes') -bb_dictionary = load_bb_dictionary(bb_dir, mode='TEST', test_data=test_data) -if use_gt_bb: - bb_type = 'gt' -else: - bb_type = 'init' - -img_list = load_menpo_image_list( - img_dir=data_dir, test_data=test_data, train_crop_dir=data_dir, img_dir_ns=data_dir, bb_type=bb_type, - bb_dictionary=bb_dictionary, mode='TEST', return_transform=map_landmarks_to_original_image) - -# load model -heatmap_model = DeepHeatmapsModel( - mode='TEST', img_path=data_dir, test_model_path=model_path, test_data=test_data, menpo_verbose=False) - - -# *************** predict landmarks *************** -print ("\npredicting landmarks for: "+os.path.join(data_dir, test_data)) -print ("\nsaving landmarks to: "+out_dir) -for i, img in enumerate(img_list): - if i == 0: - reuse = None - else: - reuse = True - - preds = heatmap_model.get_landmark_predictions(img_list=[img], pdm_models_dir=pdm_path, clm_model_path=clm_path, - reuse=reuse, map_to_input_size=map_landmarks_to_original_image) - - if map_landmarks_to_original_image: - img = img[0] - - if outline_tune: - pred_lms = preds['ECpTp_out'] - else: - pred_lms = preds['ECpTp_jaw'] - - mio.export_landmark_file(PointCloud(pred_lms[0]), os.path.join(out_dir, img.path.stem + '.pts'), - overwrite=True) - if save_cropped_imgs: - imsave(os.path.join(out_dir, img.path.stem + '.png'), img.pixels_with_channels_at_back()) -print ("\nDONE!") diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/metrics/__init__.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/metrics/__init__.py deleted file mode 100644 index 3474bdc4f1c88b21904d2a21ba077c93a8a70c8b..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/metrics/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Metrics like CLAP score, FAD, KLD, Visqol, Chroma similarity, etc. -""" -# flake8: noqa -from .clap_consistency import CLAPTextConsistencyMetric, TextConsistencyMetric -from .chroma_cosinesim import ChromaCosineSimilarityMetric -from .fad import FrechetAudioDistanceMetric -from .kld import KLDivergenceMetric, PasstKLDivergenceMetric -from .rvm import RelativeVolumeMel -from .visqol import ViSQOL diff --git a/spaces/mediaparty2023/test-autotrain/README.md b/spaces/mediaparty2023/test-autotrain/README.md deleted file mode 100644 index c7ab71c4628df2cd75631d1df02456e675689e70..0000000000000000000000000000000000000000 --- a/spaces/mediaparty2023/test-autotrain/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: AutoTrain Advanced -emoji: 🚀 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/merve/anonymization/public/measuring-diversity/script.js b/spaces/merve/anonymization/public/measuring-diversity/script.js deleted file mode 100644 index 002fb32c0d0ee11cf292109725ebda6a2a4b57a4..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/measuring-diversity/script.js +++ /dev/null @@ -1,360 +0,0 @@ -// Seeded random number generator -window.random = new Math.seedrandom('aaaa') -window.randomIndex = new Math.seedrandom('7b') - -window.numRows = 20 -window.shapes = window.shapes || d3.range(21).map(i => randomShape(i, random)) - -window.random2 = new Math.seedrandom('7') -// window.columnShapes = window.columnShapes || d3.range(window.numRows).map(i => d3.range(10).map(i =>randomShape(i, random2))) -window.columnShapes = d3.range(window.numRows).map(i => d3.range(10).map(i =>randomShape(i, random2, true))) - -console.log(window.random3) -function randomShape(i, random, colTargets){ - var color2fill = { - green: '#5A9F8A', - orange: '#DF831F', - blue: '#80BAD4', - } - - var randomItem = function(arr) { - const index = Math.abs(random.int32()) % arr.length - return arr[index] - } - - var color = randomItem(d3.keys(color2fill)) - var size = randomItem(['small', 'large']) - var shape = randomItem(['circle', 'square', 'triangle']) - - if (colTargets && (i == 4 || i == 5)){ - color = 'green' - } - if (colTargets && (i == 4 || i == 15)){ - size = 'small' - } - if (colTargets && (i == 3 || i == 5)){ - shape = 'triangle' - } - - var displayIndex = randomIndex() - - return { - i, - displayIndex, - color, - fill: color2fill[color], - dFill: d3.color(color2fill[color]).darker(1), - size, - sizeVal: size == 'large' ? 1 : .4, - shape, - } -} - -var metrics = [ - { - str: 'Greens', - key: 'green', - field: 'color', - target: .3 - }, - { - str: 'Dot', - key: 'triangle', - field: 'shape', - target: .35 - }, - { - str: 'Smalls', - key: 'small', - field: 'size', - target: .60 - }, -] -window.metrics1 = metrics.map(d => ({...d})) -metrics1[2].target = .5 -window.metrics2 = metrics1.map(d => ({...d})) -metrics2[0].target = 1 - -metrics.forEach(d => { - d.scoreScale = d3.scaleLinear().domain([0, d.target, 1]).range([0, 1, 0]) -}) - - -var pctFmt = d3.format('.0%') -function addMetrics(metrics, {active, topSel, isSmall}){ - var metricSel = topSel - .st({textAlign: 'center'}) - .appendMany('div', metrics) - .st({textAlign: 'center', width: 200, display: 'inline-block'}) - - var width = 120 - - var svg = metricSel.append('svg') - .at({width: 120, height: 100}) - .append('g') - .translate([.5, 40.5]) - - if (isSmall){ - svg.translate((d, i) => [i ? -20.5 : 20.5, 40.5]) - } - - - var xScale = d3.scaleLinear().rangeRound([0, width]) - - var topText = svg.append('text') - .at({y: -20, fontWeight: 500, textAnchor: 'middle', x: width/2}) - - svg.append('path') - .at({d: 'M 0 0 H ' + width, stroke: '#000'}) - - var topTick = svg.append('path') - .at({d: 'M 0 0 V -12.5', stroke: '#000', strokeWidth: 3}) - - - var actualSel = svg.append('g').st({fill: highlightColor}) - - actualSel.append('path') - .at({d: 'M 0 0 V 12.5', stroke: highlightColor, strokeWidth: 3}) - - var actualPct = actualSel.append('text') - .translate(30, 1).at({textAnchor: 'middle'}).st({fontWeight: 300}) - - var actualScore = actualSel.append('text') - .translate(50, 1).at({textAnchor: 'middle'}).st({fontWeight: 300}) - - return () => { - var pcts = metrics.map(d => active.percents[d.key] || 0) - - topText.text(d => (d.str + ' Target: ').replace('s ', ' ') + pctFmt(d.target)) - - topTick.translate(d => xScale(d.target), 0) - actualSel.translate((d, i) => xScale(pcts[i]), 0) - - actualPct.text((d, i) => 'Actual: ' + pctFmt(pcts[i])) - actualScore.text((d, i) => 'Difference: ' + pctFmt(Math.abs(d.target - pcts[i]))) - } -} - - -function scoreActive(active){ - var numActive = d3.sum(active) - return metrics.map(m => { - var v = d3.sum(active, (d, i) => active[i] && shapes[i][m.field] == m.key) - return Math.abs(m.target - v/numActive); - // return m.scoreScale(v/numActive || 0) - }) -} - -var measures = [ - { - str: 'Utilitarian', - display_text: 'Minimize Mean Difference', - ranking_display_text: 'Mean Difference', - fn: s => d3.mean(s)*100, - ppFn: s => d3.format('.2%')(d3.mean(s)), - format: s => 'mean(' + s.map(d => d + '%').join(', ') + ')' - }, - { - str: 'Egalitarian', - display_text: 'Minimize Max Difference', - ranking_display_text: 'Max Difference', - fn: s => { - var srt = _.sortBy(s).map(d => Math.round(d*100)).reverse() - - return srt[0]*100000000 + srt[1]*10000 + srt[2] - }, - ppFn: s => { - var srt = _.sortBy(s).map(d => Math.round(d*100)).reverse() - - return srt[0] + '%' - }, - format: s => 'max(' + s.map(d => d + '%').join(', ') + ')' - } -] -measures2 = measures.map(d => ({...d})) - - -var randomActive = d3.range(10000).map(d => { - var active = shapes.map(d => random() < .3) - - if (d == 0) active = '111111111111101011100'.split('').map(d => +d) - - active.score = scoreActive(active) - measures.forEach(d => { - active[d.str] = d.fn(active.score) - }) - - return active -}) - -function addMetricBestButton(metricIndex, {active, sel, render}){ - var measureSel = sel - .append('div').st({textAlign: 'center', marginTop: 20, marginBottom: -20}) - .append('div.measure').st({width: 200, lineHeight: '1.8em', display: 'inline-block'}) - .html('Show Best') - .on('click', d => { - - // console.log(active) - var pcts = metrics.map(d => active.percents[d.key] || 0) - if (pcts[metricIndex] == metrics[metricIndex].target) return - - var nextActive = _.minBy(randomActive, a => a.score[metricIndex]) - active.forEach((d, i) => active[i] = nextActive[i]) - - measureSel.classed('active', e => e == d) - render() - }) -} - -function addMeasures(measures, {active, sel, render}){ - var measureSel = sel.selectAll('div.measure-container') - - measureSel - .append('div.measure') - .st({width: 200, lineHeight: '1.8em', display: 'inline-block', textAlign: 'center', }) - .html((d, i) => i ? 'Show the set where the highest difference is the smallest' : 'Show the set with
    lowest mean difference') - .html('Show Best') - .on('click', d => { - - var nextActive = _.minBy(randomActive, a => a[d.str]) - active.forEach((d, i) => active[i] = nextActive[i]) - - measureSel.classed('active', e => e == d) - render() - }) - - -} - -function addTotalMetrics(metrics, measures, {active, sel, render}){ - var metricSel = sel.classed('bot', 1).st({textAlign: 'center'}) - .appendMany('div.measure-container', measures) - .append('div', measures) - .st({textAlign: 'center', display: 'inline-block'}) - - - var headlineSel = metricSel.append('div') - var calcSel = metricSel.append('div')//.st({color: highlightColor}) - - return () => { - - measures.forEach(d => { - d.scores = scoreActive(active) - - d.score = Math.round(d.fn(d.scores)*100)/100 - if (d.ppFn) d.score = d.ppFn(d.scores) - }) - - headlineSel.st({fontWeight: 600}) - .text(d => d.ranking_display_text + ': ' + d.score) - - calcSel.text(d => { - var roundedScores = d.scores.map(s => Math.round(s * 100)) - - return d.format(roundedScores) - }) - } -} - - -window.shapeRandom = new Math.seedrandom('aaf') -var defaultActive = shapes.map(d => shapeRandom() < .4) -drawShape('all-shapes') - -drawShape('pick-green', ({active, topSel, sel, render}) => { - active.forEach((d, i) => active[i] = defaultActive[i]) - addMetricBestButton(0, {active, sel, render}) - return addMetrics(metrics.filter(d => d.key == 'green'), {active, topSel}) -}) - -drawShape('pick-triangle', ({active, topSel, sel, render}) => { - active.forEach((d, i) => active[i] = defaultActive[i]) - addMetricBestButton(1, {active, sel, render}) - return addMetrics(metrics.filter(d => d.key == 'triangle'), {active, topSel}) -}) - -drawShape('pick-metric', grid => { - grid.active.forEach((d, i) => grid.active[i] = defaultActive[i]) - - var metricRender = addMetrics(metrics, grid) - var totalMetricRender = addTotalMetrics(metrics, measures, grid) - addMeasures(measures, grid) - - return () => { - metricRender() - totalMetricRender() - } -}) - - -function drawShape(id, initFn=d => e => e){ - var active = shapes.map(d => true) - - var sel = d3.select('#' + id).html('') - - var s = 110 - - var topSel = sel.append('div.top') - var shapeSel = sel.appendMany('div.shape', _.sortBy(shapes, d => d.displayIndex)) - .st({width: s, height: s}) - .on('click', d => { - active[d.i] = !active[d.i] - render() - }) - - shapeSel.append('svg') - .at({width: s, height: s}) - .append('g').translate([s/2, s/2]) - .each(function(d){ - if (d.shape == 'square' || true){ - var rs = Math.round(d.sizeVal*s/3.5) - var shapeSel = d3.select(this).append('rect') - .at({x: -rs, y: -rs, width: rs*2, height: rs*2}) - } else if (d.shape == 'circle'){ - var shapeSel = d3.select(this).append('circle') - .at({r: d.sizeVal*s/3}) - } else if (d.shape == 'triangle'){ - var rs = Math.round(d.sizeVal*s/2.9) - var shapeSel = d3.select(this).append('path') - .translate(rs*Math.pow(3,1/2)/10, 1) - .at({d: [ - 'M', 0, -rs, - 'L', -rs*Math.pow(3,1/2)/2, rs/2, - 'L', +rs*Math.pow(3,1/2)/2, rs/2, - 'Z' - ].join(' ')}) - } - - if (d.shape == 'triangle'){ - d3.select(this).append('circle') - .at({r: 4, fill: '#fff', stroke: '#000', strokeWidth: 1}) - } - - shapeSel.at({fill: d.fill, stroke: d.dFill, strokeWidth: 2}) - }) - - var customRender = initFn({active, topSel, sel, render}) - - shapes.render = render - function render(){ - shapeSel.classed('active', d => active[d.i]) - // console.log(active.map(d => +d).join('')) - - active.percents = {} - active.shapes = shapes.filter(d => active[d.i]) - - d3.nestBy(active.shapes, d => d.color).forEach(d => { - active.percents[d.key] = d.length/active.shapes.length - }) - d3.nestBy(active.shapes, d => d.size).forEach(d => { - active.percents[d.key] = d.length/active.shapes.length - }) - d3.nestBy(active.shapes, d => d.shape).forEach(d => { - active.percents[d.key] = d.length/active.shapes.length - }) - - - customRender() - } - render() -} \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/README.md b/spaces/merve/measuring-fairness/README.md deleted file mode 100644 index 9e6a457186df4a7073476609a28f4cf2fcf9d32d..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: measuring-fairness -emoji: 🪄 -colorFrom: green -colorTo: purple -sdk: static -pinned: false -license: apache-2.0 -app_file: public/measuring-fairness/index.html ---- diff --git a/spaces/merve/uncertainty-calibration/public/third_party/umap.js b/spaces/merve/uncertainty-calibration/public/third_party/umap.js deleted file mode 100644 index 13bb989b285114e7a79d0a213422997c19a3c2f0..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/third_party/umap.js +++ /dev/null @@ -1,6864 +0,0 @@ -// https://github.com/pair-code/umap-js Copyright 2019 Google -(function webpackUniversalModuleDefinition(root, factory) { - if(typeof exports === 'object' && typeof module === 'object') - module.exports = factory(); - else if(typeof define === 'function' && define.amd) - define([], factory); - else { - var a = factory(); - for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i]; - } -})(window, function() { -return /******/ (function(modules) { // webpackBootstrap -/******/ // The module cache -/******/ var installedModules = {}; -/******/ -/******/ // The require function -/******/ function __webpack_require__(moduleId) { -/******/ -/******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) { -/******/ return installedModules[moduleId].exports; -/******/ } -/******/ // Create a new module (and put it into the cache) -/******/ var module = installedModules[moduleId] = { -/******/ i: moduleId, -/******/ l: false, -/******/ exports: {} -/******/ }; -/******/ -/******/ // Execute the module function -/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); -/******/ -/******/ // Flag the module as loaded -/******/ module.l = true; -/******/ -/******/ // Return the exports of the module -/******/ return module.exports; -/******/ } -/******/ -/******/ -/******/ // expose the modules object (__webpack_modules__) -/******/ __webpack_require__.m = modules; -/******/ -/******/ // expose the module cache -/******/ __webpack_require__.c = installedModules; -/******/ -/******/ // define getter function for harmony exports -/******/ __webpack_require__.d = function(exports, name, getter) { -/******/ if(!__webpack_require__.o(exports, name)) { -/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter }); -/******/ } -/******/ }; -/******/ -/******/ // define __esModule on exports -/******/ __webpack_require__.r = function(exports) { -/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { -/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); -/******/ } -/******/ Object.defineProperty(exports, '__esModule', { value: true }); -/******/ }; -/******/ -/******/ // create a fake namespace object -/******/ // mode & 1: value is a module id, require it -/******/ // mode & 2: merge all properties of value into the ns -/******/ // mode & 4: return value when already ns object -/******/ // mode & 8|1: behave like require -/******/ __webpack_require__.t = function(value, mode) { -/******/ if(mode & 1) value = __webpack_require__(value); -/******/ if(mode & 8) return value; -/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; -/******/ var ns = Object.create(null); -/******/ __webpack_require__.r(ns); -/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value }); -/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key)); -/******/ return ns; -/******/ }; -/******/ -/******/ // getDefaultExport function for compatibility with non-harmony modules -/******/ __webpack_require__.n = function(module) { -/******/ var getter = module && module.__esModule ? -/******/ function getDefault() { return module['default']; } : -/******/ function getModuleExports() { return module; }; -/******/ __webpack_require__.d(getter, 'a', getter); -/******/ return getter; -/******/ }; -/******/ -/******/ // Object.prototype.hasOwnProperty.call -/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; -/******/ -/******/ // __webpack_public_path__ -/******/ __webpack_require__.p = ""; -/******/ -/******/ -/******/ // Load entry module and return exports -/******/ return __webpack_require__(__webpack_require__.s = 5); -/******/ }) -/************************************************************************/ -/******/ ([ -/* 0 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -const toString = Object.prototype.toString; - -function isAnyArray(object) { - return toString.call(object).endsWith('Array]'); -} - -module.exports = isAnyArray; - - -/***/ }), -/* 1 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -var __values = (this && this.__values) || function (o) { - var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; - if (m) return m.call(o); - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -function tauRandInt(n, random) { - return Math.floor(random() * n); -} -exports.tauRandInt = tauRandInt; -function tauRand(random) { - return random(); -} -exports.tauRand = tauRand; -function norm(vec) { - var e_1, _a; - var result = 0; - try { - for (var vec_1 = __values(vec), vec_1_1 = vec_1.next(); !vec_1_1.done; vec_1_1 = vec_1.next()) { - var item = vec_1_1.value; - result += Math.pow(item, 2); - } - } - catch (e_1_1) { e_1 = { error: e_1_1 }; } - finally { - try { - if (vec_1_1 && !vec_1_1.done && (_a = vec_1.return)) _a.call(vec_1); - } - finally { if (e_1) throw e_1.error; } - } - return Math.sqrt(result); -} -exports.norm = norm; -function empty(n) { - var output = []; - for (var i = 0; i < n; i++) { - output.push(undefined); - } - return output; -} -exports.empty = empty; -function range(n) { - return empty(n).map(function (_, i) { return i; }); -} -exports.range = range; -function filled(n, v) { - return empty(n).map(function () { return v; }); -} -exports.filled = filled; -function zeros(n) { - return filled(n, 0); -} -exports.zeros = zeros; -function ones(n) { - return filled(n, 1); -} -exports.ones = ones; -function linear(a, b, len) { - return empty(len).map(function (_, i) { - return a + i * ((b - a) / (len - 1)); - }); -} -exports.linear = linear; -function sum(input) { - return input.reduce(function (sum, val) { return sum + val; }); -} -exports.sum = sum; -function mean(input) { - return sum(input) / input.length; -} -exports.mean = mean; -function max(input) { - var max = 0; - for (var i = 0; i < input.length; i++) { - max = input[i] > max ? input[i] : max; - } - return max; -} -exports.max = max; -function max2d(input) { - var max = 0; - for (var i = 0; i < input.length; i++) { - for (var j = 0; j < input[i].length; j++) { - max = input[i][j] > max ? input[i][j] : max; - } - } - return max; -} -exports.max2d = max2d; -function rejectionSample(nSamples, poolSize, random) { - var result = zeros(nSamples); - for (var i = 0; i < nSamples; i++) { - var rejectSample = true; - while (rejectSample) { - var j = tauRandInt(poolSize, random); - var broken = false; - for (var k = 0; k < i; k++) { - if (j === result[k]) { - broken = true; - break; - } - } - if (!broken) { - rejectSample = false; - } - result[i] = j; - } - } - return result; -} -exports.rejectionSample = rejectionSample; -function reshape2d(x, a, b) { - var rows = []; - var count = 0; - var index = 0; - if (x.length !== a * b) { - throw new Error('Array dimensions must match input length.'); - } - for (var i = 0; i < a; i++) { - var col = []; - for (var j = 0; j < b; j++) { - col.push(x[index]); - index += 1; - } - rows.push(col); - count += 1; - } - return rows; -} -exports.reshape2d = reshape2d; - - -/***/ }), -/* 2 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result["default"] = mod; - return result; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -var utils = __importStar(__webpack_require__(1)); -function makeHeap(nPoints, size) { - var makeArrays = function (fillValue) { - return utils.empty(nPoints).map(function () { - return utils.filled(size, fillValue); - }); - }; - var heap = []; - heap.push(makeArrays(-1)); - heap.push(makeArrays(Infinity)); - heap.push(makeArrays(0)); - return heap; -} -exports.makeHeap = makeHeap; -function rejectionSample(nSamples, poolSize, random) { - var result = utils.zeros(nSamples); - for (var i = 0; i < nSamples; i++) { - var rejectSample = true; - var j = 0; - while (rejectSample) { - j = utils.tauRandInt(poolSize, random); - var broken = false; - for (var k = 0; k < i; k++) { - if (j === result[k]) { - broken = true; - break; - } - } - if (!broken) - rejectSample = false; - } - result[i] = j; - } - return result; -} -exports.rejectionSample = rejectionSample; -function heapPush(heap, row, weight, index, flag) { - row = Math.floor(row); - var indices = heap[0][row]; - var weights = heap[1][row]; - var isNew = heap[2][row]; - if (weight >= weights[0]) { - return 0; - } - for (var i = 0; i < indices.length; i++) { - if (index === indices[i]) { - return 0; - } - } - return uncheckedHeapPush(heap, row, weight, index, flag); -} -exports.heapPush = heapPush; -function uncheckedHeapPush(heap, row, weight, index, flag) { - var indices = heap[0][row]; - var weights = heap[1][row]; - var isNew = heap[2][row]; - if (weight >= weights[0]) { - return 0; - } - weights[0] = weight; - indices[0] = index; - isNew[0] = flag; - var i = 0; - var iSwap = 0; - while (true) { - var ic1 = 2 * i + 1; - var ic2 = ic1 + 1; - var heapShape2 = heap[0][0].length; - if (ic1 >= heapShape2) { - break; - } - else if (ic2 >= heapShape2) { - if (weights[ic1] > weight) { - iSwap = ic1; - } - else { - break; - } - } - else if (weights[ic1] >= weights[ic2]) { - if (weight < weights[ic1]) { - iSwap = ic1; - } - else { - break; - } - } - else { - if (weight < weights[ic2]) { - iSwap = ic2; - } - else { - break; - } - } - weights[i] = weights[iSwap]; - indices[i] = indices[iSwap]; - isNew[i] = isNew[iSwap]; - i = iSwap; - } - weights[i] = weight; - indices[i] = index; - isNew[i] = flag; - return 1; -} -exports.uncheckedHeapPush = uncheckedHeapPush; -function buildCandidates(currentGraph, nVertices, nNeighbors, maxCandidates, random) { - var candidateNeighbors = makeHeap(nVertices, maxCandidates); - for (var i = 0; i < nVertices; i++) { - for (var j = 0; j < nNeighbors; j++) { - if (currentGraph[0][i][j] < 0) { - continue; - } - var idx = currentGraph[0][i][j]; - var isn = currentGraph[2][i][j]; - var d = utils.tauRand(random); - heapPush(candidateNeighbors, i, d, idx, isn); - heapPush(candidateNeighbors, idx, d, i, isn); - currentGraph[2][i][j] = 0; - } - } - return candidateNeighbors; -} -exports.buildCandidates = buildCandidates; -function deheapSort(heap) { - var indices = heap[0]; - var weights = heap[1]; - for (var i = 0; i < indices.length; i++) { - var indHeap = indices[i]; - var distHeap = weights[i]; - for (var j = 0; j < indHeap.length - 1; j++) { - var indHeapIndex = indHeap.length - j - 1; - var distHeapIndex = distHeap.length - j - 1; - var temp1 = indHeap[0]; - indHeap[0] = indHeap[indHeapIndex]; - indHeap[indHeapIndex] = temp1; - var temp2 = distHeap[0]; - distHeap[0] = distHeap[distHeapIndex]; - distHeap[distHeapIndex] = temp2; - siftDown(distHeap, indHeap, distHeapIndex, 0); - } - } - return { indices: indices, weights: weights }; -} -exports.deheapSort = deheapSort; -function siftDown(heap1, heap2, ceiling, elt) { - while (elt * 2 + 1 < ceiling) { - var leftChild = elt * 2 + 1; - var rightChild = leftChild + 1; - var swap = elt; - if (heap1[swap] < heap1[leftChild]) { - swap = leftChild; - } - if (rightChild < ceiling && heap1[swap] < heap1[rightChild]) { - swap = rightChild; - } - if (swap === elt) { - break; - } - else { - var temp1 = heap1[elt]; - heap1[elt] = heap1[swap]; - heap1[swap] = temp1; - var temp2 = heap2[elt]; - heap2[elt] = heap2[swap]; - heap2[swap] = temp2; - elt = swap; - } - } -} -function smallestFlagged(heap, row) { - var ind = heap[0][row]; - var dist = heap[1][row]; - var flag = heap[2][row]; - var minDist = Infinity; - var resultIndex = -1; - for (var i = 0; i > ind.length; i++) { - if (flag[i] === 1 && dist[i] < minDist) { - minDist = dist[i]; - resultIndex = i; - } - } - if (resultIndex >= 0) { - flag[resultIndex] = 0; - return Math.floor(ind[resultIndex]); - } - else { - return -1; - } -} -exports.smallestFlagged = smallestFlagged; - - -/***/ }), -/* 3 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -var __read = (this && this.__read) || function (o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } - catch (error) { e = { error: error }; } - finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } - finally { if (e) throw e.error; } - } - return ar; -}; -var __spread = (this && this.__spread) || function () { - for (var ar = [], i = 0; i < arguments.length; i++) ar = ar.concat(__read(arguments[i])); - return ar; -}; -var __values = (this && this.__values) || function (o) { - var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; - if (m) return m.call(o); - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; -}; -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result["default"] = mod; - return result; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -var _a; -var utils = __importStar(__webpack_require__(1)); -var SparseMatrix = (function () { - function SparseMatrix(rows, cols, values, dims) { - this.entries = new Map(); - this.nRows = 0; - this.nCols = 0; - this.rows = __spread(rows); - this.cols = __spread(cols); - this.values = __spread(values); - for (var i = 0; i < values.length; i++) { - var key = this.makeKey(this.rows[i], this.cols[i]); - this.entries.set(key, i); - } - this.nRows = dims[0]; - this.nCols = dims[1]; - } - SparseMatrix.prototype.makeKey = function (row, col) { - return row + ":" + col; - }; - SparseMatrix.prototype.checkDims = function (row, col) { - var withinBounds = row < this.nRows && col < this.nCols; - if (!withinBounds) { - throw new Error('array index out of bounds'); - } - }; - SparseMatrix.prototype.set = function (row, col, value) { - this.checkDims(row, col); - var key = this.makeKey(row, col); - if (!this.entries.has(key)) { - this.rows.push(row); - this.cols.push(col); - this.values.push(value); - this.entries.set(key, this.values.length - 1); - } - else { - var index = this.entries.get(key); - this.values[index] = value; - } - }; - SparseMatrix.prototype.get = function (row, col, defaultValue) { - if (defaultValue === void 0) { defaultValue = 0; } - this.checkDims(row, col); - var key = this.makeKey(row, col); - if (this.entries.has(key)) { - var index = this.entries.get(key); - return this.values[index]; - } - else { - return defaultValue; - } - }; - SparseMatrix.prototype.getDims = function () { - return [this.nRows, this.nCols]; - }; - SparseMatrix.prototype.getRows = function () { - return __spread(this.rows); - }; - SparseMatrix.prototype.getCols = function () { - return __spread(this.cols); - }; - SparseMatrix.prototype.getValues = function () { - return __spread(this.values); - }; - SparseMatrix.prototype.forEach = function (fn) { - for (var i = 0; i < this.values.length; i++) { - fn(this.values[i], this.rows[i], this.cols[i]); - } - }; - SparseMatrix.prototype.map = function (fn) { - var vals = []; - for (var i = 0; i < this.values.length; i++) { - vals.push(fn(this.values[i], this.rows[i], this.cols[i])); - } - var dims = [this.nRows, this.nCols]; - return new SparseMatrix(this.rows, this.cols, vals, dims); - }; - SparseMatrix.prototype.toArray = function () { - var _this = this; - var rows = utils.empty(this.nRows); - var output = rows.map(function () { - return utils.zeros(_this.nCols); - }); - for (var i = 0; i < this.values.length; i++) { - output[this.rows[i]][this.cols[i]] = this.values[i]; - } - return output; - }; - return SparseMatrix; -}()); -exports.SparseMatrix = SparseMatrix; -function transpose(matrix) { - var cols = []; - var rows = []; - var vals = []; - matrix.forEach(function (value, row, col) { - cols.push(row); - rows.push(col); - vals.push(value); - }); - var dims = [matrix.nCols, matrix.nRows]; - return new SparseMatrix(rows, cols, vals, dims); -} -exports.transpose = transpose; -function identity(size) { - var _a = __read(size, 1), rows = _a[0]; - var matrix = new SparseMatrix([], [], [], size); - for (var i = 0; i < rows; i++) { - matrix.set(i, i, 1); - } - return matrix; -} -exports.identity = identity; -function pairwiseMultiply(a, b) { - return elementWise(a, b, function (x, y) { return x * y; }); -} -exports.pairwiseMultiply = pairwiseMultiply; -function add(a, b) { - return elementWise(a, b, function (x, y) { return x + y; }); -} -exports.add = add; -function subtract(a, b) { - return elementWise(a, b, function (x, y) { return x - y; }); -} -exports.subtract = subtract; -function maximum(a, b) { - return elementWise(a, b, function (x, y) { return (x > y ? x : y); }); -} -exports.maximum = maximum; -function multiplyScalar(a, scalar) { - return a.map(function (value) { - return value * scalar; - }); -} -exports.multiplyScalar = multiplyScalar; -function eliminateZeros(m) { - var zeroIndices = new Set(); - var values = m.getValues(); - var rows = m.getRows(); - var cols = m.getCols(); - for (var i = 0; i < values.length; i++) { - if (values[i] === 0) { - zeroIndices.add(i); - } - } - var removeByZeroIndex = function (_, index) { return !zeroIndices.has(index); }; - var nextValues = values.filter(removeByZeroIndex); - var nextRows = rows.filter(removeByZeroIndex); - var nextCols = cols.filter(removeByZeroIndex); - return new SparseMatrix(nextRows, nextCols, nextValues, m.getDims()); -} -exports.eliminateZeros = eliminateZeros; -function normalize(m, normType) { - if (normType === void 0) { normType = "l2"; } - var e_1, _a; - var normFn = normFns[normType]; - var colsByRow = new Map(); - m.forEach(function (_, row, col) { - var cols = colsByRow.get(row) || []; - cols.push(col); - colsByRow.set(row, cols); - }); - var nextMatrix = new SparseMatrix([], [], [], m.getDims()); - var _loop_1 = function (row) { - var cols = colsByRow.get(row).sort(); - var vals = cols.map(function (col) { return m.get(row, col); }); - var norm = normFn(vals); - for (var i = 0; i < norm.length; i++) { - nextMatrix.set(row, cols[i], norm[i]); - } - }; - try { - for (var _b = __values(colsByRow.keys()), _c = _b.next(); !_c.done; _c = _b.next()) { - var row = _c.value; - _loop_1(row); - } - } - catch (e_1_1) { e_1 = { error: e_1_1 }; } - finally { - try { - if (_c && !_c.done && (_a = _b.return)) _a.call(_b); - } - finally { if (e_1) throw e_1.error; } - } - return nextMatrix; -} -exports.normalize = normalize; -var normFns = (_a = {}, - _a["max"] = function (xs) { - var max = -Infinity; - for (var i = 0; i < xs.length; i++) { - max = xs[i] > max ? xs[i] : max; - } - return xs.map(function (x) { return x / max; }); - }, - _a["l1"] = function (xs) { - var sum = 0; - for (var i = 0; i < xs.length; i++) { - sum += xs[i]; - } - return xs.map(function (x) { return x / sum; }); - }, - _a["l2"] = function (xs) { - var sum = 0; - for (var i = 0; i < xs.length; i++) { - sum += Math.pow(xs[i], 2); - } - return xs.map(function (x) { return Math.sqrt(Math.pow(x, 2) / sum); }); - }, - _a); -function elementWise(a, b, op) { - var visited = new Set(); - var rows = []; - var cols = []; - var vals = []; - var operate = function (row, col) { - rows.push(row); - cols.push(col); - var nextValue = op(a.get(row, col), b.get(row, col)); - vals.push(nextValue); - }; - var valuesA = a.getValues(); - var rowsA = a.getRows(); - var colsA = a.getCols(); - for (var i = 0; i < valuesA.length; i++) { - var row = rowsA[i]; - var col = colsA[i]; - var key = row + ":" + col; - visited.add(key); - operate(row, col); - } - var valuesB = b.getValues(); - var rowsB = b.getRows(); - var colsB = b.getCols(); - for (var i = 0; i < valuesB.length; i++) { - var row = rowsB[i]; - var col = colsB[i]; - var key = row + ":" + col; - if (visited.has(key)) - continue; - operate(row, col); - } - var dims = [a.nRows, a.nCols]; - return new SparseMatrix(rows, cols, vals, dims); -} -function getCSR(x) { - var entries = []; - x.forEach(function (value, row, col) { - entries.push({ value: value, row: row, col: col }); - }); - entries.sort(function (a, b) { - if (a.row === b.row) { - return a.col - b.col; - } - else { - return a.row - b.col; - } - }); - var indices = []; - var values = []; - var indptr = []; - var currentRow = -1; - for (var i = 0; i < entries.length; i++) { - var _a = entries[i], row = _a.row, col = _a.col, value = _a.value; - if (row !== currentRow) { - currentRow = row; - indptr.push(i); - } - indices.push(col); - values.push(value); - } - return { indices: indices, values: values, indptr: indptr }; -} -exports.getCSR = getCSR; - - -/***/ }), -/* 4 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -var __read = (this && this.__read) || function (o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } - catch (error) { e = { error: error }; } - finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } - finally { if (e) throw e.error; } - } - return ar; -}; -var __spread = (this && this.__spread) || function () { - for (var ar = [], i = 0; i < arguments.length; i++) ar = ar.concat(__read(arguments[i])); - return ar; -}; -var __values = (this && this.__values) || function (o) { - var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; - if (m) return m.call(o); - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; -}; -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result["default"] = mod; - return result; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -var utils = __importStar(__webpack_require__(1)); -var FlatTree = (function () { - function FlatTree(hyperplanes, offsets, children, indices) { - this.hyperplanes = hyperplanes; - this.offsets = offsets; - this.children = children; - this.indices = indices; - } - return FlatTree; -}()); -exports.FlatTree = FlatTree; -function makeForest(data, nNeighbors, nTrees, random) { - var leafSize = Math.max(10, nNeighbors); - var trees = utils - .range(nTrees) - .map(function (_, i) { return makeTree(data, leafSize, i, random); }); - var forest = trees.map(function (tree) { return flattenTree(tree, leafSize); }); - return forest; -} -exports.makeForest = makeForest; -function makeTree(data, leafSize, n, random) { - if (leafSize === void 0) { leafSize = 30; } - var indices = utils.range(data.length); - var tree = makeEuclideanTree(data, indices, leafSize, n, random); - return tree; -} -function makeEuclideanTree(data, indices, leafSize, q, random) { - if (leafSize === void 0) { leafSize = 30; } - if (indices.length > leafSize) { - var splitResults = euclideanRandomProjectionSplit(data, indices, random); - var indicesLeft = splitResults.indicesLeft, indicesRight = splitResults.indicesRight, hyperplane = splitResults.hyperplane, offset = splitResults.offset; - var leftChild = makeEuclideanTree(data, indicesLeft, leafSize, q + 1, random); - var rightChild = makeEuclideanTree(data, indicesRight, leafSize, q + 1, random); - var node = { leftChild: leftChild, rightChild: rightChild, isLeaf: false, hyperplane: hyperplane, offset: offset }; - return node; - } - else { - var node = { indices: indices, isLeaf: true }; - return node; - } -} -function euclideanRandomProjectionSplit(data, indices, random) { - var dim = data[0].length; - var leftIndex = utils.tauRandInt(indices.length, random); - var rightIndex = utils.tauRandInt(indices.length, random); - rightIndex += leftIndex === rightIndex ? 1 : 0; - rightIndex = rightIndex % indices.length; - var left = indices[leftIndex]; - var right = indices[rightIndex]; - var hyperplaneOffset = 0; - var hyperplaneVector = utils.zeros(dim); - for (var i = 0; i < hyperplaneVector.length; i++) { - hyperplaneVector[i] = data[left][i] - data[right][i]; - hyperplaneOffset -= - (hyperplaneVector[i] * (data[left][i] + data[right][i])) / 2.0; - } - var nLeft = 0; - var nRight = 0; - var side = utils.zeros(indices.length); - for (var i = 0; i < indices.length; i++) { - var margin = hyperplaneOffset; - for (var d = 0; d < dim; d++) { - margin += hyperplaneVector[d] * data[indices[i]][d]; - } - if (margin === 0) { - side[i] = utils.tauRandInt(2, random); - if (side[i] === 0) { - nLeft += 1; - } - else { - nRight += 1; - } - } - else if (margin > 0) { - side[i] = 0; - nLeft += 1; - } - else { - side[i] = 1; - nRight += 1; - } - } - var indicesLeft = utils.zeros(nLeft); - var indicesRight = utils.zeros(nRight); - nLeft = 0; - nRight = 0; - for (var i in utils.range(side.length)) { - if (side[i] === 0) { - indicesLeft[nLeft] = indices[i]; - nLeft += 1; - } - else { - indicesRight[nRight] = indices[i]; - nRight += 1; - } - } - return { - indicesLeft: indicesLeft, - indicesRight: indicesRight, - hyperplane: hyperplaneVector, - offset: hyperplaneOffset, - }; -} -function flattenTree(tree, leafSize) { - var nNodes = numNodes(tree); - var nLeaves = numLeaves(tree); - var hyperplanes = utils - .range(nNodes) - .map(function () { return utils.zeros(tree.hyperplane.length); }); - var offsets = utils.zeros(nNodes); - var children = utils.range(nNodes).map(function () { return [-1, -1]; }); - var indices = utils - .range(nLeaves) - .map(function () { return utils.range(leafSize).map(function () { return -1; }); }); - recursiveFlatten(tree, hyperplanes, offsets, children, indices, 0, 0); - return new FlatTree(hyperplanes, offsets, children, indices); -} -function recursiveFlatten(tree, hyperplanes, offsets, children, indices, nodeNum, leafNum) { - var _a; - if (tree.isLeaf) { - children[nodeNum][0] = -leafNum; - (_a = indices[leafNum]).splice.apply(_a, __spread([0, tree.indices.length], tree.indices)); - leafNum += 1; - return { nodeNum: nodeNum, leafNum: leafNum }; - } - else { - hyperplanes[nodeNum] = tree.hyperplane; - offsets[nodeNum] = tree.offset; - children[nodeNum][0] = nodeNum + 1; - var oldNodeNum = nodeNum; - var res = recursiveFlatten(tree.leftChild, hyperplanes, offsets, children, indices, nodeNum + 1, leafNum); - nodeNum = res.nodeNum; - leafNum = res.leafNum; - children[oldNodeNum][1] = nodeNum + 1; - res = recursiveFlatten(tree.rightChild, hyperplanes, offsets, children, indices, nodeNum + 1, leafNum); - return { nodeNum: res.nodeNum, leafNum: res.leafNum }; - } -} -function numNodes(tree) { - if (tree.isLeaf) { - return 1; - } - else { - return 1 + numNodes(tree.leftChild) + numNodes(tree.rightChild); - } -} -function numLeaves(tree) { - if (tree.isLeaf) { - return 1; - } - else { - return numLeaves(tree.leftChild) + numLeaves(tree.rightChild); - } -} -function makeLeafArray(rpForest) { - var e_1, _a; - if (rpForest.length > 0) { - var output = []; - try { - for (var rpForest_1 = __values(rpForest), rpForest_1_1 = rpForest_1.next(); !rpForest_1_1.done; rpForest_1_1 = rpForest_1.next()) { - var tree = rpForest_1_1.value; - output.push.apply(output, __spread(tree.indices)); - } - } - catch (e_1_1) { e_1 = { error: e_1_1 }; } - finally { - try { - if (rpForest_1_1 && !rpForest_1_1.done && (_a = rpForest_1.return)) _a.call(rpForest_1); - } - finally { if (e_1) throw e_1.error; } - } - return output; - } - else { - return [[-1]]; - } -} -exports.makeLeafArray = makeLeafArray; -function selectSide(hyperplane, offset, point, random) { - var margin = offset; - for (var d = 0; d < point.length; d++) { - margin += hyperplane[d] * point[d]; - } - if (margin === 0) { - var side = utils.tauRandInt(2, random); - return side; - } - else if (margin > 0) { - return 0; - } - else { - return 1; - } -} -function searchFlatTree(point, tree, random) { - var node = 0; - while (tree.children[node][0] > 0) { - var side = selectSide(tree.hyperplanes[node], tree.offsets[node], point, random); - if (side === 0) { - node = tree.children[node][0]; - } - else { - node = tree.children[node][1]; - } - } - var index = -1 * tree.children[node][0]; - return tree.indices[index]; -} -exports.searchFlatTree = searchFlatTree; - - -/***/ }), -/* 5 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -Object.defineProperty(exports, "__esModule", { value: true }); -var umap_1 = __webpack_require__(6); -exports.UMAP = umap_1.UMAP; - - -/***/ }), -/* 6 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -}; -var __generator = (this && this.__generator) || function (thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; - return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (_) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } -}; -var __read = (this && this.__read) || function (o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } - catch (error) { e = { error: error }; } - finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } - finally { if (e) throw e.error; } - } - return ar; -}; -var __spread = (this && this.__spread) || function () { - for (var ar = [], i = 0; i < arguments.length; i++) ar = ar.concat(__read(arguments[i])); - return ar; -}; -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result["default"] = mod; - return result; -}; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -var heap = __importStar(__webpack_require__(2)); -var matrix = __importStar(__webpack_require__(3)); -var nnDescent = __importStar(__webpack_require__(7)); -var tree = __importStar(__webpack_require__(4)); -var utils = __importStar(__webpack_require__(1)); -var ml_levenberg_marquardt_1 = __importDefault(__webpack_require__(8)); -var SMOOTH_K_TOLERANCE = 1e-5; -var MIN_K_DIST_SCALE = 1e-3; -var UMAP = (function () { - function UMAP(params) { - if (params === void 0) { params = {}; } - var _this = this; - this.learningRate = 1.0; - this.localConnectivity = 1.0; - this.minDist = 0.1; - this.nComponents = 2; - this.nEpochs = 0; - this.nNeighbors = 15; - this.negativeSampleRate = 5; - this.random = Math.random; - this.repulsionStrength = 1.0; - this.setOpMixRatio = 1.0; - this.spread = 1.0; - this.transformQueueSize = 4.0; - this.targetMetric = "categorical"; - this.targetWeight = 0.5; - this.targetNNeighbors = this.nNeighbors; - this.distanceFn = euclidean; - this.isInitialized = false; - this.rpForest = []; - this.embedding = []; - this.optimizationState = new OptimizationState(); - var setParam = function (key) { - if (params[key] !== undefined) - _this[key] = params[key]; - }; - setParam('distanceFn'); - setParam('learningRate'); - setParam('localConnectivity'); - setParam('minDist'); - setParam('nComponents'); - setParam('nEpochs'); - setParam('nNeighbors'); - setParam('negativeSampleRate'); - setParam('random'); - setParam('repulsionStrength'); - setParam('setOpMixRatio'); - setParam('spread'); - setParam('transformQueueSize'); - } - UMAP.prototype.fit = function (X) { - this.initializeFit(X); - this.optimizeLayout(); - return this.embedding; - }; - UMAP.prototype.fitAsync = function (X, callback) { - if (callback === void 0) { callback = function () { return true; }; } - return __awaiter(this, void 0, void 0, function () { - return __generator(this, function (_a) { - switch (_a.label) { - case 0: - this.initializeFit(X); - return [4, this.optimizeLayoutAsync(callback)]; - case 1: - _a.sent(); - return [2, this.embedding]; - } - }); - }); - }; - UMAP.prototype.setSupervisedProjection = function (Y, params) { - if (params === void 0) { params = {}; } - this.Y = Y; - this.targetMetric = params.targetMetric || this.targetMetric; - this.targetWeight = params.targetWeight || this.targetWeight; - this.targetNNeighbors = params.targetNNeighbors || this.targetNNeighbors; - }; - UMAP.prototype.setPrecomputedKNN = function (knnIndices, knnDistances) { - this.knnIndices = knnIndices; - this.knnDistances = knnDistances; - }; - UMAP.prototype.initializeFit = function (X) { - if (this.X === X && this.isInitialized) { - return this.getNEpochs(); - } - this.X = X; - if (!this.knnIndices && !this.knnDistances) { - var knnResults = this.nearestNeighbors(X); - this.knnIndices = knnResults.knnIndices; - this.knnDistances = knnResults.knnDistances; - } - this.graph = this.fuzzySimplicialSet(X, this.nNeighbors, this.setOpMixRatio); - this.makeSearchFns(); - this.searchGraph = this.makeSearchGraph(X); - this.processGraphForSupervisedProjection(); - var _a = this.initializeSimplicialSetEmbedding(), head = _a.head, tail = _a.tail, epochsPerSample = _a.epochsPerSample; - this.optimizationState.head = head; - this.optimizationState.tail = tail; - this.optimizationState.epochsPerSample = epochsPerSample; - this.initializeOptimization(); - this.prepareForOptimizationLoop(); - this.isInitialized = true; - return this.getNEpochs(); - }; - UMAP.prototype.makeSearchFns = function () { - var _a = nnDescent.makeInitializations(this.distanceFn), initFromTree = _a.initFromTree, initFromRandom = _a.initFromRandom; - this.initFromTree = initFromTree; - this.initFromRandom = initFromRandom; - this.search = nnDescent.makeInitializedNNSearch(this.distanceFn); - }; - UMAP.prototype.makeSearchGraph = function (X) { - var knnIndices = this.knnIndices; - var knnDistances = this.knnDistances; - var dims = [X.length, X.length]; - var searchGraph = new matrix.SparseMatrix([], [], [], dims); - for (var i = 0; i < knnIndices.length; i++) { - var knn = knnIndices[i]; - var distances = knnDistances[i]; - for (var j = 0; j < knn.length; j++) { - var neighbor = knn[j]; - var distance = distances[j]; - if (distance > 0) { - searchGraph.set(i, neighbor, distance); - } - } - } - var transpose = matrix.transpose(searchGraph); - return matrix.maximum(searchGraph, transpose); - }; - UMAP.prototype.transform = function (toTransform) { - var _this = this; - var rawData = this.X; - if (rawData === undefined || rawData.length === 0) { - throw new Error('No data has been fit.'); - } - var nNeighbors = Math.floor(this.nNeighbors * this.transformQueueSize); - var init = nnDescent.initializeSearch(this.rpForest, rawData, toTransform, nNeighbors, this.initFromRandom, this.initFromTree, this.random); - var result = this.search(rawData, this.searchGraph, init, toTransform); - var _a = heap.deheapSort(result), indices = _a.indices, distances = _a.weights; - indices = indices.map(function (x) { return x.slice(0, _this.nNeighbors); }); - distances = distances.map(function (x) { return x.slice(0, _this.nNeighbors); }); - var adjustedLocalConnectivity = Math.max(0, this.localConnectivity - 1); - var _b = this.smoothKNNDistance(distances, this.nNeighbors, adjustedLocalConnectivity), sigmas = _b.sigmas, rhos = _b.rhos; - var _c = this.computeMembershipStrengths(indices, distances, sigmas, rhos), rows = _c.rows, cols = _c.cols, vals = _c.vals; - var size = [toTransform.length, rawData.length]; - var graph = new matrix.SparseMatrix(rows, cols, vals, size); - var normed = matrix.normalize(graph, "l1"); - var csrMatrix = matrix.getCSR(normed); - var nPoints = toTransform.length; - var eIndices = utils.reshape2d(csrMatrix.indices, nPoints, this.nNeighbors); - var eWeights = utils.reshape2d(csrMatrix.values, nPoints, this.nNeighbors); - var embedding = initTransform(eIndices, eWeights, this.embedding); - var nEpochs = this.nEpochs - ? this.nEpochs / 3 - : graph.nRows <= 10000 - ? 100 - : 30; - var graphMax = graph - .getValues() - .reduce(function (max, val) { return (val > max ? val : max); }, 0); - graph = graph.map(function (value) { return (value < graphMax / nEpochs ? 0 : value); }); - graph = matrix.eliminateZeros(graph); - var epochsPerSample = this.makeEpochsPerSample(graph.getValues(), nEpochs); - var head = graph.getRows(); - var tail = graph.getCols(); - this.assignOptimizationStateParameters({ - headEmbedding: embedding, - tailEmbedding: this.embedding, - head: head, - tail: tail, - currentEpoch: 0, - nEpochs: nEpochs, - nVertices: graph.getDims()[1], - epochsPerSample: epochsPerSample, - }); - this.prepareForOptimizationLoop(); - return this.optimizeLayout(); - }; - UMAP.prototype.processGraphForSupervisedProjection = function () { - var _a = this, Y = _a.Y, X = _a.X; - if (Y) { - if (Y.length !== X.length) { - throw new Error('Length of X and y must be equal'); - } - if (this.targetMetric === "categorical") { - var lt = this.targetWeight < 1.0; - var farDist = lt ? 2.5 * (1.0 / (1.0 - this.targetWeight)) : 1.0e12; - this.graph = this.categoricalSimplicialSetIntersection(this.graph, Y, farDist); - } - } - }; - UMAP.prototype.step = function () { - var currentEpoch = this.optimizationState.currentEpoch; - if (currentEpoch < this.getNEpochs()) { - this.optimizeLayoutStep(currentEpoch); - } - return this.optimizationState.currentEpoch; - }; - UMAP.prototype.getEmbedding = function () { - return this.embedding; - }; - UMAP.prototype.nearestNeighbors = function (X) { - var _a = this, distanceFn = _a.distanceFn, nNeighbors = _a.nNeighbors; - var log2 = function (n) { return Math.log(n) / Math.log(2); }; - var metricNNDescent = nnDescent.makeNNDescent(distanceFn, this.random); - var round = function (n) { - return n === 0.5 ? 0 : Math.round(n); - }; - var nTrees = 5 + Math.floor(round(Math.pow(X.length, 0.5) / 20.0)); - var nIters = Math.max(5, Math.floor(Math.round(log2(X.length)))); - this.rpForest = tree.makeForest(X, nNeighbors, nTrees, this.random); - var leafArray = tree.makeLeafArray(this.rpForest); - var _b = metricNNDescent(X, leafArray, nNeighbors, nIters), indices = _b.indices, weights = _b.weights; - return { knnIndices: indices, knnDistances: weights }; - }; - UMAP.prototype.fuzzySimplicialSet = function (X, nNeighbors, setOpMixRatio) { - if (setOpMixRatio === void 0) { setOpMixRatio = 1.0; } - var _a = this, _b = _a.knnIndices, knnIndices = _b === void 0 ? [] : _b, _c = _a.knnDistances, knnDistances = _c === void 0 ? [] : _c, localConnectivity = _a.localConnectivity; - var _d = this.smoothKNNDistance(knnDistances, nNeighbors, localConnectivity), sigmas = _d.sigmas, rhos = _d.rhos; - var _e = this.computeMembershipStrengths(knnIndices, knnDistances, sigmas, rhos), rows = _e.rows, cols = _e.cols, vals = _e.vals; - var size = [X.length, X.length]; - var sparseMatrix = new matrix.SparseMatrix(rows, cols, vals, size); - var transpose = matrix.transpose(sparseMatrix); - var prodMatrix = matrix.pairwiseMultiply(sparseMatrix, transpose); - var a = matrix.subtract(matrix.add(sparseMatrix, transpose), prodMatrix); - var b = matrix.multiplyScalar(a, setOpMixRatio); - var c = matrix.multiplyScalar(prodMatrix, 1.0 - setOpMixRatio); - var result = matrix.add(b, c); - return result; - }; - UMAP.prototype.categoricalSimplicialSetIntersection = function (simplicialSet, target, farDist, unknownDist) { - if (unknownDist === void 0) { unknownDist = 1.0; } - var intersection = fastIntersection(simplicialSet, target, unknownDist, farDist); - intersection = matrix.eliminateZeros(intersection); - return resetLocalConnectivity(intersection); - }; - UMAP.prototype.smoothKNNDistance = function (distances, k, localConnectivity, nIter, bandwidth) { - if (localConnectivity === void 0) { localConnectivity = 1.0; } - if (nIter === void 0) { nIter = 64; } - if (bandwidth === void 0) { bandwidth = 1.0; } - var target = (Math.log(k) / Math.log(2)) * bandwidth; - var rho = utils.zeros(distances.length); - var result = utils.zeros(distances.length); - for (var i = 0; i < distances.length; i++) { - var lo = 0.0; - var hi = Infinity; - var mid = 1.0; - var ithDistances = distances[i]; - var nonZeroDists = ithDistances.filter(function (d) { return d > 0.0; }); - if (nonZeroDists.length >= localConnectivity) { - var index = Math.floor(localConnectivity); - var interpolation = localConnectivity - index; - if (index > 0) { - rho[i] = nonZeroDists[index - 1]; - if (interpolation > SMOOTH_K_TOLERANCE) { - rho[i] += - interpolation * (nonZeroDists[index] - nonZeroDists[index - 1]); - } - } - else { - rho[i] = interpolation * nonZeroDists[0]; - } - } - else if (nonZeroDists.length > 0) { - rho[i] = utils.max(nonZeroDists); - } - for (var n = 0; n < nIter; n++) { - var psum = 0.0; - for (var j = 1; j < distances[i].length; j++) { - var d = distances[i][j] - rho[i]; - if (d > 0) { - psum += Math.exp(-(d / mid)); - } - else { - psum += 1.0; - } - } - if (Math.abs(psum - target) < SMOOTH_K_TOLERANCE) { - break; - } - if (psum > target) { - hi = mid; - mid = (lo + hi) / 2.0; - } - else { - lo = mid; - if (hi === Infinity) { - mid *= 2; - } - else { - mid = (lo + hi) / 2.0; - } - } - } - result[i] = mid; - if (rho[i] > 0.0) { - var meanIthDistances = utils.mean(ithDistances); - if (result[i] < MIN_K_DIST_SCALE * meanIthDistances) { - result[i] = MIN_K_DIST_SCALE * meanIthDistances; - } - } - else { - var meanDistances = utils.mean(distances.map(utils.mean)); - if (result[i] < MIN_K_DIST_SCALE * meanDistances) { - result[i] = MIN_K_DIST_SCALE * meanDistances; - } - } - } - return { sigmas: result, rhos: rho }; - }; - UMAP.prototype.computeMembershipStrengths = function (knnIndices, knnDistances, sigmas, rhos) { - var nSamples = knnIndices.length; - var nNeighbors = knnIndices[0].length; - var rows = utils.zeros(nSamples * nNeighbors); - var cols = utils.zeros(nSamples * nNeighbors); - var vals = utils.zeros(nSamples * nNeighbors); - for (var i = 0; i < nSamples; i++) { - for (var j = 0; j < nNeighbors; j++) { - var val = 0; - if (knnIndices[i][j] === -1) { - continue; - } - if (knnIndices[i][j] === i) { - val = 0.0; - } - else if (knnDistances[i][j] - rhos[i] <= 0.0) { - val = 1.0; - } - else { - val = Math.exp(-((knnDistances[i][j] - rhos[i]) / sigmas[i])); - } - rows[i * nNeighbors + j] = i; - cols[i * nNeighbors + j] = knnIndices[i][j]; - vals[i * nNeighbors + j] = val; - } - } - return { rows: rows, cols: cols, vals: vals }; - }; - UMAP.prototype.initializeSimplicialSetEmbedding = function () { - var _this = this; - var nEpochs = this.getNEpochs(); - var nComponents = this.nComponents; - var graphValues = this.graph.getValues(); - var graphMax = 0; - for (var i = 0; i < graphValues.length; i++) { - var value = graphValues[i]; - if (graphMax < graphValues[i]) { - graphMax = value; - } - } - var graph = this.graph.map(function (value) { - if (value < graphMax / nEpochs) { - return 0; - } - else { - return value; - } - }); - this.embedding = utils.zeros(graph.nRows).map(function () { - return utils.zeros(nComponents).map(function () { - return utils.tauRand(_this.random) * 20 + -10; - }); - }); - var weights = []; - var head = []; - var tail = []; - for (var i = 0; i < graph.nRows; i++) { - for (var j = 0; j < graph.nCols; j++) { - var value = graph.get(i, j); - if (value) { - weights.push(value); - tail.push(i); - head.push(j); - } - } - } - var epochsPerSample = this.makeEpochsPerSample(weights, nEpochs); - return { head: head, tail: tail, epochsPerSample: epochsPerSample }; - }; - UMAP.prototype.makeEpochsPerSample = function (weights, nEpochs) { - var result = utils.filled(weights.length, -1.0); - var max = utils.max(weights); - var nSamples = weights.map(function (w) { return (w / max) * nEpochs; }); - nSamples.forEach(function (n, i) { - if (n > 0) - result[i] = nEpochs / nSamples[i]; - }); - return result; - }; - UMAP.prototype.assignOptimizationStateParameters = function (state) { - Object.assign(this.optimizationState, state); - }; - UMAP.prototype.prepareForOptimizationLoop = function () { - var _a = this, repulsionStrength = _a.repulsionStrength, learningRate = _a.learningRate, negativeSampleRate = _a.negativeSampleRate; - var _b = this.optimizationState, epochsPerSample = _b.epochsPerSample, headEmbedding = _b.headEmbedding, tailEmbedding = _b.tailEmbedding; - var dim = headEmbedding[0].length; - var moveOther = headEmbedding.length === tailEmbedding.length; - var epochsPerNegativeSample = epochsPerSample.map(function (e) { return e / negativeSampleRate; }); - var epochOfNextNegativeSample = __spread(epochsPerNegativeSample); - var epochOfNextSample = __spread(epochsPerSample); - this.assignOptimizationStateParameters({ - epochOfNextSample: epochOfNextSample, - epochOfNextNegativeSample: epochOfNextNegativeSample, - epochsPerNegativeSample: epochsPerNegativeSample, - moveOther: moveOther, - initialAlpha: learningRate, - alpha: learningRate, - gamma: repulsionStrength, - dim: dim, - }); - }; - UMAP.prototype.initializeOptimization = function () { - var headEmbedding = this.embedding; - var tailEmbedding = this.embedding; - var _a = this.optimizationState, head = _a.head, tail = _a.tail, epochsPerSample = _a.epochsPerSample; - var nEpochs = this.getNEpochs(); - var nVertices = this.graph.nCols; - var _b = findABParams(this.spread, this.minDist), a = _b.a, b = _b.b; - this.assignOptimizationStateParameters({ - headEmbedding: headEmbedding, - tailEmbedding: tailEmbedding, - head: head, - tail: tail, - epochsPerSample: epochsPerSample, - a: a, - b: b, - nEpochs: nEpochs, - nVertices: nVertices, - }); - }; - UMAP.prototype.optimizeLayoutStep = function (n) { - var optimizationState = this.optimizationState; - var head = optimizationState.head, tail = optimizationState.tail, headEmbedding = optimizationState.headEmbedding, tailEmbedding = optimizationState.tailEmbedding, epochsPerSample = optimizationState.epochsPerSample, epochOfNextSample = optimizationState.epochOfNextSample, epochOfNextNegativeSample = optimizationState.epochOfNextNegativeSample, epochsPerNegativeSample = optimizationState.epochsPerNegativeSample, moveOther = optimizationState.moveOther, initialAlpha = optimizationState.initialAlpha, alpha = optimizationState.alpha, gamma = optimizationState.gamma, a = optimizationState.a, b = optimizationState.b, dim = optimizationState.dim, nEpochs = optimizationState.nEpochs, nVertices = optimizationState.nVertices; - var clipValue = 4.0; - for (var i = 0; i < epochsPerSample.length; i++) { - if (epochOfNextSample[i] > n) { - continue; - } - var j = head[i]; - var k = tail[i]; - var current = headEmbedding[j]; - var other = tailEmbedding[k]; - var distSquared = rDist(current, other); - var gradCoeff = 0; - if (distSquared > 0) { - gradCoeff = -2.0 * a * b * Math.pow(distSquared, b - 1.0); - gradCoeff /= a * Math.pow(distSquared, b) + 1.0; - } - for (var d = 0; d < dim; d++) { - var gradD = clip(gradCoeff * (current[d] - other[d]), clipValue); - current[d] += gradD * alpha; - if (moveOther) { - other[d] += -gradD * alpha; - } - } - epochOfNextSample[i] += epochsPerSample[i]; - var nNegSamples = Math.floor((n - epochOfNextNegativeSample[i]) / epochsPerNegativeSample[i]); - for (var p = 0; p < nNegSamples; p++) { - var k_1 = utils.tauRandInt(nVertices, this.random); - var other_1 = tailEmbedding[k_1]; - var distSquared_1 = rDist(current, other_1); - var gradCoeff_1 = 0.0; - if (distSquared_1 > 0.0) { - gradCoeff_1 = 2.0 * gamma * b; - gradCoeff_1 /= - (0.001 + distSquared_1) * (a * Math.pow(distSquared_1, b) + 1); - } - else if (j === k_1) { - continue; - } - for (var d = 0; d < dim; d++) { - var gradD = 4.0; - if (gradCoeff_1 > 0.0) { - gradD = clip(gradCoeff_1 * (current[d] - other_1[d]), clipValue); - } - current[d] += gradD * alpha; - } - } - epochOfNextNegativeSample[i] += nNegSamples * epochsPerNegativeSample[i]; - } - optimizationState.alpha = initialAlpha * (1.0 - n / nEpochs); - optimizationState.currentEpoch += 1; - return headEmbedding; - }; - UMAP.prototype.optimizeLayoutAsync = function (epochCallback) { - var _this = this; - if (epochCallback === void 0) { epochCallback = function () { return true; }; } - return new Promise(function (resolve, reject) { - var step = function () { return __awaiter(_this, void 0, void 0, function () { - var _a, nEpochs, currentEpoch, epochCompleted, shouldStop, isFinished; - return __generator(this, function (_b) { - try { - _a = this.optimizationState, nEpochs = _a.nEpochs, currentEpoch = _a.currentEpoch; - this.embedding = this.optimizeLayoutStep(currentEpoch); - epochCompleted = this.optimizationState.currentEpoch; - shouldStop = epochCallback(epochCompleted) === false; - isFinished = epochCompleted === nEpochs; - if (!shouldStop && !isFinished) { - step(); - } - else { - return [2, resolve(isFinished)]; - } - } - catch (err) { - reject(err); - } - return [2]; - }); - }); }; - step(); - }); - }; - UMAP.prototype.optimizeLayout = function (epochCallback) { - if (epochCallback === void 0) { epochCallback = function () { return true; }; } - var isFinished = false; - var embedding = []; - while (!isFinished) { - var _a = this.optimizationState, nEpochs = _a.nEpochs, currentEpoch = _a.currentEpoch; - embedding = this.optimizeLayoutStep(currentEpoch); - var epochCompleted = this.optimizationState.currentEpoch; - var shouldStop = epochCallback(epochCompleted) === false; - isFinished = epochCompleted === nEpochs || shouldStop; - } - return embedding; - }; - UMAP.prototype.getNEpochs = function () { - var graph = this.graph; - if (this.nEpochs > 0) { - return this.nEpochs; - } - var length = graph.nRows; - if (length <= 2500) { - return 500; - } - else if (length <= 5000) { - return 400; - } - else if (length <= 7500) { - return 300; - } - else { - return 200; - } - }; - return UMAP; -}()); -exports.UMAP = UMAP; -function euclidean(x, y) { - var result = 0; - for (var i = 0; i < x.length; i++) { - result += Math.pow((x[i] - y[i]), 2); - } - return Math.sqrt(result); -} -exports.euclidean = euclidean; -function cosine(x, y) { - var result = 0.0; - var normX = 0.0; - var normY = 0.0; - for (var i = 0; i < x.length; i++) { - result += x[i] * y[i]; - normX += Math.pow(x[i], 2); - normY += Math.pow(y[i], 2); - } - if (normX === 0 && normY === 0) { - return 0; - } - else if (normX === 0 || normY === 0) { - return 1.0; - } - else { - return 1.0 - result / Math.sqrt(normX * normY); - } -} -exports.cosine = cosine; -var OptimizationState = (function () { - function OptimizationState() { - this.currentEpoch = 0; - this.headEmbedding = []; - this.tailEmbedding = []; - this.head = []; - this.tail = []; - this.epochsPerSample = []; - this.epochOfNextSample = []; - this.epochOfNextNegativeSample = []; - this.epochsPerNegativeSample = []; - this.moveOther = true; - this.initialAlpha = 1.0; - this.alpha = 1.0; - this.gamma = 1.0; - this.a = 1.5769434603113077; - this.b = 0.8950608779109733; - this.dim = 2; - this.nEpochs = 500; - this.nVertices = 0; - } - return OptimizationState; -}()); -function clip(x, clipValue) { - if (x > clipValue) - return clipValue; - else if (x < -clipValue) - return -clipValue; - else - return x; -} -function rDist(x, y) { - var result = 0.0; - for (var i = 0; i < x.length; i++) { - result += Math.pow(x[i] - y[i], 2); - } - return result; -} -function findABParams(spread, minDist) { - var curve = function (_a) { - var _b = __read(_a, 2), a = _b[0], b = _b[1]; - return function (x) { - return 1.0 / (1.0 + a * Math.pow(x, (2 * b))); - }; - }; - var xv = utils - .linear(0, spread * 3, 300) - .map(function (val) { return (val < minDist ? 1.0 : val); }); - var yv = utils.zeros(xv.length).map(function (val, index) { - var gte = xv[index] >= minDist; - return gte ? Math.exp(-(xv[index] - minDist) / spread) : val; - }); - var initialValues = [0.5, 0.5]; - var data = { x: xv, y: yv }; - var options = { - damping: 1.5, - initialValues: initialValues, - gradientDifference: 10e-2, - maxIterations: 100, - errorTolerance: 10e-3, - }; - var parameterValues = ml_levenberg_marquardt_1.default(data, curve, options).parameterValues; - var _a = __read(parameterValues, 2), a = _a[0], b = _a[1]; - return { a: a, b: b }; -} -exports.findABParams = findABParams; -function fastIntersection(graph, target, unknownDist, farDist) { - if (unknownDist === void 0) { unknownDist = 1.0; } - if (farDist === void 0) { farDist = 5.0; } - return graph.map(function (value, row, col) { - if (target[row] === -1 || target[col] === -1) { - return value * Math.exp(-unknownDist); - } - else if (target[row] !== target[col]) { - return value * Math.exp(-farDist); - } - else { - return value; - } - }); -} -exports.fastIntersection = fastIntersection; -function resetLocalConnectivity(simplicialSet) { - simplicialSet = matrix.normalize(simplicialSet, "max"); - var transpose = matrix.transpose(simplicialSet); - var prodMatrix = matrix.pairwiseMultiply(transpose, simplicialSet); - simplicialSet = matrix.add(simplicialSet, matrix.subtract(transpose, prodMatrix)); - return matrix.eliminateZeros(simplicialSet); -} -exports.resetLocalConnectivity = resetLocalConnectivity; -function initTransform(indices, weights, embedding) { - var result = utils - .zeros(indices.length) - .map(function (z) { return utils.zeros(embedding[0].length); }); - for (var i = 0; i < indices.length; i++) { - for (var j = 0; j < indices[0].length; j++) { - for (var d = 0; d < embedding[0].length; d++) { - var a = indices[i][j]; - result[i][d] += weights[i][j] * embedding[a][d]; - } - } - } - return result; -} -exports.initTransform = initTransform; - - -/***/ }), -/* 7 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -var __values = (this && this.__values) || function (o) { - var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; - if (m) return m.call(o); - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; -}; -var __importStar = (this && this.__importStar) || function (mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result["default"] = mod; - return result; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -var heap = __importStar(__webpack_require__(2)); -var matrix = __importStar(__webpack_require__(3)); -var tree = __importStar(__webpack_require__(4)); -var utils = __importStar(__webpack_require__(1)); -function makeNNDescent(distanceFn, random) { - return function nNDescent(data, leafArray, nNeighbors, nIters, maxCandidates, delta, rho, rpTreeInit) { - if (nIters === void 0) { nIters = 10; } - if (maxCandidates === void 0) { maxCandidates = 50; } - if (delta === void 0) { delta = 0.001; } - if (rho === void 0) { rho = 0.5; } - if (rpTreeInit === void 0) { rpTreeInit = true; } - var nVertices = data.length; - var currentGraph = heap.makeHeap(data.length, nNeighbors); - for (var i = 0; i < data.length; i++) { - var indices = heap.rejectionSample(nNeighbors, data.length, random); - for (var j = 0; j < indices.length; j++) { - var d = distanceFn(data[i], data[indices[j]]); - heap.heapPush(currentGraph, i, d, indices[j], 1); - heap.heapPush(currentGraph, indices[j], d, i, 1); - } - } - if (rpTreeInit) { - for (var n = 0; n < leafArray.length; n++) { - for (var i = 0; i < leafArray[n].length; i++) { - if (leafArray[n][i] < 0) { - break; - } - for (var j = i + 1; j < leafArray[n].length; j++) { - if (leafArray[n][j] < 0) { - break; - } - var d = distanceFn(data[leafArray[n][i]], data[leafArray[n][j]]); - heap.heapPush(currentGraph, leafArray[n][i], d, leafArray[n][j], 1); - heap.heapPush(currentGraph, leafArray[n][j], d, leafArray[n][i], 1); - } - } - } - } - for (var n = 0; n < nIters; n++) { - var candidateNeighbors = heap.buildCandidates(currentGraph, nVertices, nNeighbors, maxCandidates, random); - var c = 0; - for (var i = 0; i < nVertices; i++) { - for (var j = 0; j < maxCandidates; j++) { - var p = Math.floor(candidateNeighbors[0][i][j]); - if (p < 0 || utils.tauRand(random) < rho) { - continue; - } - for (var k = 0; k < maxCandidates; k++) { - var q = Math.floor(candidateNeighbors[0][i][k]); - var cj = candidateNeighbors[2][i][j]; - var ck = candidateNeighbors[2][i][k]; - if (q < 0 || (!cj && !ck)) { - continue; - } - var d = distanceFn(data[p], data[q]); - c += heap.heapPush(currentGraph, p, d, q, 1); - c += heap.heapPush(currentGraph, q, d, p, 1); - } - } - } - if (c <= delta * nNeighbors * data.length) { - break; - } - } - var sorted = heap.deheapSort(currentGraph); - return sorted; - }; -} -exports.makeNNDescent = makeNNDescent; -function makeInitializations(distanceFn) { - function initFromRandom(nNeighbors, data, queryPoints, _heap, random) { - for (var i = 0; i < queryPoints.length; i++) { - var indices = utils.rejectionSample(nNeighbors, data.length, random); - for (var j = 0; j < indices.length; j++) { - if (indices[j] < 0) { - continue; - } - var d = distanceFn(data[indices[j]], queryPoints[i]); - heap.heapPush(_heap, i, d, indices[j], 1); - } - } - } - function initFromTree(_tree, data, queryPoints, _heap, random) { - for (var i = 0; i < queryPoints.length; i++) { - var indices = tree.searchFlatTree(queryPoints[i], _tree, random); - for (var j = 0; j < indices.length; j++) { - if (indices[j] < 0) { - return; - } - var d = distanceFn(data[indices[j]], queryPoints[i]); - heap.heapPush(_heap, i, d, indices[j], 1); - } - } - return; - } - return { initFromRandom: initFromRandom, initFromTree: initFromTree }; -} -exports.makeInitializations = makeInitializations; -function makeInitializedNNSearch(distanceFn) { - return function nnSearchFn(data, graph, initialization, queryPoints) { - var e_1, _a; - var _b = matrix.getCSR(graph), indices = _b.indices, indptr = _b.indptr; - for (var i = 0; i < queryPoints.length; i++) { - var tried = new Set(initialization[0][i]); - while (true) { - var vertex = heap.smallestFlagged(initialization, i); - if (vertex === -1) { - break; - } - var candidates = indices.slice(indptr[vertex], indptr[vertex + 1]); - try { - for (var candidates_1 = __values(candidates), candidates_1_1 = candidates_1.next(); !candidates_1_1.done; candidates_1_1 = candidates_1.next()) { - var candidate = candidates_1_1.value; - if (candidate === vertex || - candidate === -1 || - tried.has(candidate)) { - continue; - } - var d = distanceFn(data[candidate], queryPoints[i]); - heap.uncheckedHeapPush(initialization, i, d, candidate, 1); - tried.add(candidate); - } - } - catch (e_1_1) { e_1 = { error: e_1_1 }; } - finally { - try { - if (candidates_1_1 && !candidates_1_1.done && (_a = candidates_1.return)) _a.call(candidates_1); - } - finally { if (e_1) throw e_1.error; } - } - } - } - return initialization; - }; -} -exports.makeInitializedNNSearch = makeInitializedNNSearch; -function initializeSearch(forest, data, queryPoints, nNeighbors, initFromRandom, initFromTree, random) { - var e_2, _a; - var results = heap.makeHeap(queryPoints.length, nNeighbors); - initFromRandom(nNeighbors, data, queryPoints, results, random); - if (forest) { - try { - for (var forest_1 = __values(forest), forest_1_1 = forest_1.next(); !forest_1_1.done; forest_1_1 = forest_1.next()) { - var tree_1 = forest_1_1.value; - initFromTree(tree_1, data, queryPoints, results, random); - } - } - catch (e_2_1) { e_2 = { error: e_2_1 }; } - finally { - try { - if (forest_1_1 && !forest_1_1.done && (_a = forest_1.return)) _a.call(forest_1); - } - finally { if (e_2) throw e_2.error; } - } - } - return results; -} -exports.initializeSearch = initializeSearch; - - -/***/ }), -/* 8 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -var mlMatrix = __webpack_require__(9); - -/** - * Calculate current error - * @ignore - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] - * @param {Array} parameters - Array of current parameter values - * @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter - * @return {number} - */ -function errorCalculation( - data, - parameters, - parameterizedFunction -) { - var error = 0; - const func = parameterizedFunction(parameters); - - for (var i = 0; i < data.x.length; i++) { - error += Math.abs(data.y[i] - func(data.x[i])); - } - - return error; -} - -/** - * Difference of the matrix function over the parameters - * @ignore - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] - * @param {Array} evaluatedData - Array of previous evaluated function values - * @param {Array} params - Array of previous parameter values - * @param {number} gradientDifference - Adjustment for decrease the damping parameter - * @param {function} paramFunction - The parameters and returns a function with the independent variable as a parameter - * @return {Matrix} - */ -function gradientFunction( - data, - evaluatedData, - params, - gradientDifference, - paramFunction -) { - const n = params.length; - const m = data.x.length; - - var ans = new Array(n); - - for (var param = 0; param < n; param++) { - ans[param] = new Array(m); - var auxParams = params.concat(); - auxParams[param] += gradientDifference; - var funcParam = paramFunction(auxParams); - - for (var point = 0; point < m; point++) { - ans[param][point] = evaluatedData[point] - funcParam(data.x[point]); - } - } - return new mlMatrix.Matrix(ans); -} - -/** - * Matrix function over the samples - * @ignore - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] - * @param {Array} evaluatedData - Array of previous evaluated function values - * @return {Matrix} - */ -function matrixFunction(data, evaluatedData) { - const m = data.x.length; - - var ans = new Array(m); - - for (var point = 0; point < m; point++) { - ans[point] = data.y[point] - evaluatedData[point]; - } - - return new mlMatrix.Matrix([ans]); -} - -/** - * Iteration for Levenberg-Marquardt - * @ignore - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] - * @param {Array} params - Array of previous parameter values - * @param {number} damping - Levenberg-Marquardt parameter - * @param {number} gradientDifference - Adjustment for decrease the damping parameter - * @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter - * @return {Array} - */ -function step( - data, - params, - damping, - gradientDifference, - parameterizedFunction -) { - var identity = mlMatrix.Matrix.eye(params.length).mul( - damping * gradientDifference * gradientDifference - ); - - var l = data.x.length; - var evaluatedData = new Array(l); - const func = parameterizedFunction(params); - for (var i = 0; i < l; i++) { - evaluatedData[i] = func(data.x[i]); - } - var gradientFunc = gradientFunction( - data, - evaluatedData, - params, - gradientDifference, - parameterizedFunction - ); - var matrixFunc = matrixFunction(data, evaluatedData).transposeView(); - var inverseMatrix = mlMatrix.inverse( - identity.add(gradientFunc.mmul(gradientFunc.transposeView())) - ); - params = new mlMatrix.Matrix([params]); - params = params.sub( - inverseMatrix - .mmul(gradientFunc) - .mmul(matrixFunc) - .mul(gradientDifference) - .transposeView() - ); - - return params.to1DArray(); -} - -/** - * Curve fitting algorithm - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] - * @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter - * @param {object} [options] - Options object - * @param {number} [options.damping] - Levenberg-Marquardt parameter - * @param {number} [options.gradientDifference = 10e-2] - Adjustment for decrease the damping parameter - * @param {Array} [options.initialValues] - Array of initial parameter values - * @param {number} [options.maxIterations = 100] - Maximum of allowed iterations - * @param {number} [options.errorTolerance = 10e-3] - Minimum uncertainty allowed for each point - * @return {{parameterValues: Array, parameterError: number, iterations: number}} - */ -function levenbergMarquardt( - data, - parameterizedFunction, - options = {} -) { - let { - maxIterations = 100, - gradientDifference = 10e-2, - damping = 0, - errorTolerance = 10e-3, - initialValues - } = options; - - if (damping <= 0) { - throw new Error('The damping option must be a positive number'); - } else if (!data.x || !data.y) { - throw new Error('The data parameter must have x and y elements'); - } else if ( - !Array.isArray(data.x) || - data.x.length < 2 || - !Array.isArray(data.y) || - data.y.length < 2 - ) { - throw new Error( - 'The data parameter elements must be an array with more than 2 points' - ); - } else { - let dataLen = data.x.length; - if (dataLen !== data.y.length) { - throw new Error('The data parameter elements must have the same size'); - } - } - - var parameters = - initialValues || new Array(parameterizedFunction.length).fill(1); - - if (!Array.isArray(parameters)) { - throw new Error('initialValues must be an array'); - } - - var error = errorCalculation(data, parameters, parameterizedFunction); - - var converged = error <= errorTolerance; - - for ( - var iteration = 0; - iteration < maxIterations && !converged; - iteration++ - ) { - parameters = step( - data, - parameters, - damping, - gradientDifference, - parameterizedFunction - ); - error = errorCalculation(data, parameters, parameterizedFunction); - converged = error <= errorTolerance; - } - - return { - parameterValues: parameters, - parameterError: error, - iterations: iteration - }; -} - -module.exports = levenbergMarquardt; - - -/***/ }), -/* 9 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -__webpack_require__.r(__webpack_exports__); - -// EXTERNAL MODULE: ./node_modules/is-any-array/src/index.js -var src = __webpack_require__(0); -var src_default = /*#__PURE__*/__webpack_require__.n(src); - -// CONCATENATED MODULE: ./node_modules/ml-array-max/lib-es6/index.js - - -/** - * Computes the maximum of the given values - * @param {Array} input - * @return {number} - */ - -function lib_es6_max(input) { - if (!src_default()(input)) { - throw new TypeError('input must be an array'); - } - - if (input.length === 0) { - throw new TypeError('input must not be empty'); - } - - var max = input[0]; - - for (var i = 1; i < input.length; i++) { - if (input[i] > max) max = input[i]; - } - - return max; -} - -/* harmony default export */ var lib_es6 = (lib_es6_max); - -// CONCATENATED MODULE: ./node_modules/ml-array-min/lib-es6/index.js - - -/** - * Computes the minimum of the given values - * @param {Array} input - * @return {number} - */ - -function lib_es6_min(input) { - if (!src_default()(input)) { - throw new TypeError('input must be an array'); - } - - if (input.length === 0) { - throw new TypeError('input must not be empty'); - } - - var min = input[0]; - - for (var i = 1; i < input.length; i++) { - if (input[i] < min) min = input[i]; - } - - return min; -} - -/* harmony default export */ var ml_array_min_lib_es6 = (lib_es6_min); - -// CONCATENATED MODULE: ./node_modules/ml-array-rescale/lib-es6/index.js - - - - -function rescale(input) { - var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; - - if (!src_default()(input)) { - throw new TypeError('input must be an array'); - } else if (input.length === 0) { - throw new TypeError('input must not be empty'); - } - - var output; - - if (options.output !== undefined) { - if (!src_default()(options.output)) { - throw new TypeError('output option must be an array if specified'); - } - - output = options.output; - } else { - output = new Array(input.length); - } - - var currentMin = ml_array_min_lib_es6(input); - var currentMax = lib_es6(input); - - if (currentMin === currentMax) { - throw new RangeError('minimum and maximum input values are equal. Cannot rescale a constant array'); - } - - var _options$min = options.min, - minValue = _options$min === void 0 ? options.autoMinMax ? currentMin : 0 : _options$min, - _options$max = options.max, - maxValue = _options$max === void 0 ? options.autoMinMax ? currentMax : 1 : _options$max; - - if (minValue >= maxValue) { - throw new RangeError('min option must be smaller than max option'); - } - - var factor = (maxValue - minValue) / (currentMax - currentMin); - - for (var i = 0; i < input.length; i++) { - output[i] = (input[i] - currentMin) * factor + minValue; - } - - return output; -} - -/* harmony default export */ var ml_array_rescale_lib_es6 = (rescale); - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/dc/lu.js - - -/** - * @class LuDecomposition - * @link https://github.com/lutzroeder/Mapack/blob/master/Source/LuDecomposition.cs - * @param {Matrix} matrix - */ -class lu_LuDecomposition { - constructor(matrix) { - matrix = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(matrix); - - var lu = matrix.clone(); - var rows = lu.rows; - var columns = lu.columns; - var pivotVector = new Array(rows); - var pivotSign = 1; - var i, j, k, p, s, t, v; - var LUcolj, kmax; - - for (i = 0; i < rows; i++) { - pivotVector[i] = i; - } - - LUcolj = new Array(rows); - - for (j = 0; j < columns; j++) { - for (i = 0; i < rows; i++) { - LUcolj[i] = lu.get(i, j); - } - - for (i = 0; i < rows; i++) { - kmax = Math.min(i, j); - s = 0; - for (k = 0; k < kmax; k++) { - s += lu.get(i, k) * LUcolj[k]; - } - LUcolj[i] -= s; - lu.set(i, j, LUcolj[i]); - } - - p = j; - for (i = j + 1; i < rows; i++) { - if (Math.abs(LUcolj[i]) > Math.abs(LUcolj[p])) { - p = i; - } - } - - if (p !== j) { - for (k = 0; k < columns; k++) { - t = lu.get(p, k); - lu.set(p, k, lu.get(j, k)); - lu.set(j, k, t); - } - - v = pivotVector[p]; - pivotVector[p] = pivotVector[j]; - pivotVector[j] = v; - - pivotSign = -pivotSign; - } - - if (j < rows && lu.get(j, j) !== 0) { - for (i = j + 1; i < rows; i++) { - lu.set(i, j, lu.get(i, j) / lu.get(j, j)); - } - } - } - - this.LU = lu; - this.pivotVector = pivotVector; - this.pivotSign = pivotSign; - } - - /** - * - * @return {boolean} - */ - isSingular() { - var data = this.LU; - var col = data.columns; - for (var j = 0; j < col; j++) { - if (data[j][j] === 0) { - return true; - } - } - return false; - } - - /** - * - * @param {Matrix} value - * @return {Matrix} - */ - solve(value) { - value = matrix_Matrix.checkMatrix(value); - - var lu = this.LU; - var rows = lu.rows; - - if (rows !== value.rows) { - throw new Error('Invalid matrix dimensions'); - } - if (this.isSingular()) { - throw new Error('LU matrix is singular'); - } - - var count = value.columns; - var X = value.subMatrixRow(this.pivotVector, 0, count - 1); - var columns = lu.columns; - var i, j, k; - - for (k = 0; k < columns; k++) { - for (i = k + 1; i < columns; i++) { - for (j = 0; j < count; j++) { - X[i][j] -= X[k][j] * lu[i][k]; - } - } - } - for (k = columns - 1; k >= 0; k--) { - for (j = 0; j < count; j++) { - X[k][j] /= lu[k][k]; - } - for (i = 0; i < k; i++) { - for (j = 0; j < count; j++) { - X[i][j] -= X[k][j] * lu[i][k]; - } - } - } - return X; - } - - /** - * - * @return {number} - */ - get determinant() { - var data = this.LU; - if (!data.isSquare()) { - throw new Error('Matrix must be square'); - } - var determinant = this.pivotSign; - var col = data.columns; - for (var j = 0; j < col; j++) { - determinant *= data[j][j]; - } - return determinant; - } - - /** - * - * @return {Matrix} - */ - get lowerTriangularMatrix() { - var data = this.LU; - var rows = data.rows; - var columns = data.columns; - var X = new matrix_Matrix(rows, columns); - for (var i = 0; i < rows; i++) { - for (var j = 0; j < columns; j++) { - if (i > j) { - X[i][j] = data[i][j]; - } else if (i === j) { - X[i][j] = 1; - } else { - X[i][j] = 0; - } - } - } - return X; - } - - /** - * - * @return {Matrix} - */ - get upperTriangularMatrix() { - var data = this.LU; - var rows = data.rows; - var columns = data.columns; - var X = new matrix_Matrix(rows, columns); - for (var i = 0; i < rows; i++) { - for (var j = 0; j < columns; j++) { - if (i <= j) { - X[i][j] = data[i][j]; - } else { - X[i][j] = 0; - } - } - } - return X; - } - - /** - * - * @return {Array} - */ - get pivotPermutationVector() { - return this.pivotVector.slice(); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/dc/util.js -function hypotenuse(a, b) { - var r = 0; - if (Math.abs(a) > Math.abs(b)) { - r = b / a; - return Math.abs(a) * Math.sqrt(1 + r * r); - } - if (b !== 0) { - r = a / b; - return Math.abs(b) * Math.sqrt(1 + r * r); - } - return 0; -} - -function getFilled2DArray(rows, columns, value) { - var array = new Array(rows); - for (var i = 0; i < rows; i++) { - array[i] = new Array(columns); - for (var j = 0; j < columns; j++) { - array[i][j] = value; - } - } - return array; -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/dc/svd.js - - - - -/** - * @class SingularValueDecomposition - * @see https://github.com/accord-net/framework/blob/development/Sources/Accord.Math/Decompositions/SingularValueDecomposition.cs - * @param {Matrix} value - * @param {object} [options] - * @param {boolean} [options.computeLeftSingularVectors=true] - * @param {boolean} [options.computeRightSingularVectors=true] - * @param {boolean} [options.autoTranspose=false] - */ -class svd_SingularValueDecomposition { - constructor(value, options = {}) { - value = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(value); - - var m = value.rows; - var n = value.columns; - - const { - computeLeftSingularVectors = true, - computeRightSingularVectors = true, - autoTranspose = false - } = options; - - var wantu = Boolean(computeLeftSingularVectors); - var wantv = Boolean(computeRightSingularVectors); - - var swapped = false; - var a; - if (m < n) { - if (!autoTranspose) { - a = value.clone(); - // eslint-disable-next-line no-console - console.warn( - 'Computing SVD on a matrix with more columns than rows. Consider enabling autoTranspose' - ); - } else { - a = value.transpose(); - m = a.rows; - n = a.columns; - swapped = true; - var aux = wantu; - wantu = wantv; - wantv = aux; - } - } else { - a = value.clone(); - } - - var nu = Math.min(m, n); - var ni = Math.min(m + 1, n); - var s = new Array(ni); - var U = getFilled2DArray(m, nu, 0); - var V = getFilled2DArray(n, n, 0); - - var e = new Array(n); - var work = new Array(m); - - var si = new Array(ni); - for (let i = 0; i < ni; i++) si[i] = i; - - var nct = Math.min(m - 1, n); - var nrt = Math.max(0, Math.min(n - 2, m)); - var mrc = Math.max(nct, nrt); - - for (let k = 0; k < mrc; k++) { - if (k < nct) { - s[k] = 0; - for (let i = k; i < m; i++) { - s[k] = hypotenuse(s[k], a[i][k]); - } - if (s[k] !== 0) { - if (a[k][k] < 0) { - s[k] = -s[k]; - } - for (let i = k; i < m; i++) { - a[i][k] /= s[k]; - } - a[k][k] += 1; - } - s[k] = -s[k]; - } - - for (let j = k + 1; j < n; j++) { - if (k < nct && s[k] !== 0) { - let t = 0; - for (let i = k; i < m; i++) { - t += a[i][k] * a[i][j]; - } - t = -t / a[k][k]; - for (let i = k; i < m; i++) { - a[i][j] += t * a[i][k]; - } - } - e[j] = a[k][j]; - } - - if (wantu && k < nct) { - for (let i = k; i < m; i++) { - U[i][k] = a[i][k]; - } - } - - if (k < nrt) { - e[k] = 0; - for (let i = k + 1; i < n; i++) { - e[k] = hypotenuse(e[k], e[i]); - } - if (e[k] !== 0) { - if (e[k + 1] < 0) { - e[k] = 0 - e[k]; - } - for (let i = k + 1; i < n; i++) { - e[i] /= e[k]; - } - e[k + 1] += 1; - } - e[k] = -e[k]; - if (k + 1 < m && e[k] !== 0) { - for (let i = k + 1; i < m; i++) { - work[i] = 0; - } - for (let i = k + 1; i < m; i++) { - for (let j = k + 1; j < n; j++) { - work[i] += e[j] * a[i][j]; - } - } - for (let j = k + 1; j < n; j++) { - let t = -e[j] / e[k + 1]; - for (let i = k + 1; i < m; i++) { - a[i][j] += t * work[i]; - } - } - } - if (wantv) { - for (let i = k + 1; i < n; i++) { - V[i][k] = e[i]; - } - } - } - } - - let p = Math.min(n, m + 1); - if (nct < n) { - s[nct] = a[nct][nct]; - } - if (m < p) { - s[p - 1] = 0; - } - if (nrt + 1 < p) { - e[nrt] = a[nrt][p - 1]; - } - e[p - 1] = 0; - - if (wantu) { - for (let j = nct; j < nu; j++) { - for (let i = 0; i < m; i++) { - U[i][j] = 0; - } - U[j][j] = 1; - } - for (let k = nct - 1; k >= 0; k--) { - if (s[k] !== 0) { - for (let j = k + 1; j < nu; j++) { - let t = 0; - for (let i = k; i < m; i++) { - t += U[i][k] * U[i][j]; - } - t = -t / U[k][k]; - for (let i = k; i < m; i++) { - U[i][j] += t * U[i][k]; - } - } - for (let i = k; i < m; i++) { - U[i][k] = -U[i][k]; - } - U[k][k] = 1 + U[k][k]; - for (let i = 0; i < k - 1; i++) { - U[i][k] = 0; - } - } else { - for (let i = 0; i < m; i++) { - U[i][k] = 0; - } - U[k][k] = 1; - } - } - } - - if (wantv) { - for (let k = n - 1; k >= 0; k--) { - if (k < nrt && e[k] !== 0) { - for (let j = k + 1; j < n; j++) { - let t = 0; - for (let i = k + 1; i < n; i++) { - t += V[i][k] * V[i][j]; - } - t = -t / V[k + 1][k]; - for (let i = k + 1; i < n; i++) { - V[i][j] += t * V[i][k]; - } - } - } - for (let i = 0; i < n; i++) { - V[i][k] = 0; - } - V[k][k] = 1; - } - } - - var pp = p - 1; - var iter = 0; - var eps = Number.EPSILON; - while (p > 0) { - let k, kase; - for (k = p - 2; k >= -1; k--) { - if (k === -1) { - break; - } - const alpha = - Number.MIN_VALUE + eps * Math.abs(s[k] + Math.abs(s[k + 1])); - if (Math.abs(e[k]) <= alpha || Number.isNaN(e[k])) { - e[k] = 0; - break; - } - } - if (k === p - 2) { - kase = 4; - } else { - let ks; - for (ks = p - 1; ks >= k; ks--) { - if (ks === k) { - break; - } - let t = - (ks !== p ? Math.abs(e[ks]) : 0) + - (ks !== k + 1 ? Math.abs(e[ks - 1]) : 0); - if (Math.abs(s[ks]) <= eps * t) { - s[ks] = 0; - break; - } - } - if (ks === k) { - kase = 3; - } else if (ks === p - 1) { - kase = 1; - } else { - kase = 2; - k = ks; - } - } - - k++; - - switch (kase) { - case 1: { - let f = e[p - 2]; - e[p - 2] = 0; - for (let j = p - 2; j >= k; j--) { - let t = hypotenuse(s[j], f); - let cs = s[j] / t; - let sn = f / t; - s[j] = t; - if (j !== k) { - f = -sn * e[j - 1]; - e[j - 1] = cs * e[j - 1]; - } - if (wantv) { - for (let i = 0; i < n; i++) { - t = cs * V[i][j] + sn * V[i][p - 1]; - V[i][p - 1] = -sn * V[i][j] + cs * V[i][p - 1]; - V[i][j] = t; - } - } - } - break; - } - case 2: { - let f = e[k - 1]; - e[k - 1] = 0; - for (let j = k; j < p; j++) { - let t = hypotenuse(s[j], f); - let cs = s[j] / t; - let sn = f / t; - s[j] = t; - f = -sn * e[j]; - e[j] = cs * e[j]; - if (wantu) { - for (let i = 0; i < m; i++) { - t = cs * U[i][j] + sn * U[i][k - 1]; - U[i][k - 1] = -sn * U[i][j] + cs * U[i][k - 1]; - U[i][j] = t; - } - } - } - break; - } - case 3: { - const scale = Math.max( - Math.abs(s[p - 1]), - Math.abs(s[p - 2]), - Math.abs(e[p - 2]), - Math.abs(s[k]), - Math.abs(e[k]) - ); - const sp = s[p - 1] / scale; - const spm1 = s[p - 2] / scale; - const epm1 = e[p - 2] / scale; - const sk = s[k] / scale; - const ek = e[k] / scale; - const b = ((spm1 + sp) * (spm1 - sp) + epm1 * epm1) / 2; - const c = sp * epm1 * (sp * epm1); - let shift = 0; - if (b !== 0 || c !== 0) { - if (b < 0) { - shift = 0 - Math.sqrt(b * b + c); - } else { - shift = Math.sqrt(b * b + c); - } - shift = c / (b + shift); - } - let f = (sk + sp) * (sk - sp) + shift; - let g = sk * ek; - for (let j = k; j < p - 1; j++) { - let t = hypotenuse(f, g); - if (t === 0) t = Number.MIN_VALUE; - let cs = f / t; - let sn = g / t; - if (j !== k) { - e[j - 1] = t; - } - f = cs * s[j] + sn * e[j]; - e[j] = cs * e[j] - sn * s[j]; - g = sn * s[j + 1]; - s[j + 1] = cs * s[j + 1]; - if (wantv) { - for (let i = 0; i < n; i++) { - t = cs * V[i][j] + sn * V[i][j + 1]; - V[i][j + 1] = -sn * V[i][j] + cs * V[i][j + 1]; - V[i][j] = t; - } - } - t = hypotenuse(f, g); - if (t === 0) t = Number.MIN_VALUE; - cs = f / t; - sn = g / t; - s[j] = t; - f = cs * e[j] + sn * s[j + 1]; - s[j + 1] = -sn * e[j] + cs * s[j + 1]; - g = sn * e[j + 1]; - e[j + 1] = cs * e[j + 1]; - if (wantu && j < m - 1) { - for (let i = 0; i < m; i++) { - t = cs * U[i][j] + sn * U[i][j + 1]; - U[i][j + 1] = -sn * U[i][j] + cs * U[i][j + 1]; - U[i][j] = t; - } - } - } - e[p - 2] = f; - iter = iter + 1; - break; - } - case 4: { - if (s[k] <= 0) { - s[k] = s[k] < 0 ? -s[k] : 0; - if (wantv) { - for (let i = 0; i <= pp; i++) { - V[i][k] = -V[i][k]; - } - } - } - while (k < pp) { - if (s[k] >= s[k + 1]) { - break; - } - let t = s[k]; - s[k] = s[k + 1]; - s[k + 1] = t; - if (wantv && k < n - 1) { - for (let i = 0; i < n; i++) { - t = V[i][k + 1]; - V[i][k + 1] = V[i][k]; - V[i][k] = t; - } - } - if (wantu && k < m - 1) { - for (let i = 0; i < m; i++) { - t = U[i][k + 1]; - U[i][k + 1] = U[i][k]; - U[i][k] = t; - } - } - k++; - } - iter = 0; - p--; - break; - } - // no default - } - } - - if (swapped) { - var tmp = V; - V = U; - U = tmp; - } - - this.m = m; - this.n = n; - this.s = s; - this.U = U; - this.V = V; - } - - /** - * Solve a problem of least square (Ax=b) by using the SVD. Useful when A is singular. When A is not singular, it would be better to use qr.solve(value). - * Example : We search to approximate x, with A matrix shape m*n, x vector size n, b vector size m (m > n). We will use : - * var svd = SingularValueDecomposition(A); - * var x = svd.solve(b); - * @param {Matrix} value - Matrix 1D which is the vector b (in the equation Ax = b) - * @return {Matrix} - The vector x - */ - solve(value) { - var Y = value; - var e = this.threshold; - var scols = this.s.length; - var Ls = matrix_Matrix.zeros(scols, scols); - - for (let i = 0; i < scols; i++) { - if (Math.abs(this.s[i]) <= e) { - Ls[i][i] = 0; - } else { - Ls[i][i] = 1 / this.s[i]; - } - } - - var U = this.U; - var V = this.rightSingularVectors; - - var VL = V.mmul(Ls); - var vrows = V.rows; - var urows = U.length; - var VLU = matrix_Matrix.zeros(vrows, urows); - - for (let i = 0; i < vrows; i++) { - for (let j = 0; j < urows; j++) { - let sum = 0; - for (let k = 0; k < scols; k++) { - sum += VL[i][k] * U[j][k]; - } - VLU[i][j] = sum; - } - } - - return VLU.mmul(Y); - } - - /** - * - * @param {Array} value - * @return {Matrix} - */ - solveForDiagonal(value) { - return this.solve(matrix_Matrix.diag(value)); - } - - /** - * Get the inverse of the matrix. We compute the inverse of a matrix using SVD when this matrix is singular or ill-conditioned. Example : - * var svd = SingularValueDecomposition(A); - * var inverseA = svd.inverse(); - * @return {Matrix} - The approximation of the inverse of the matrix - */ - inverse() { - var V = this.V; - var e = this.threshold; - var vrows = V.length; - var vcols = V[0].length; - var X = new matrix_Matrix(vrows, this.s.length); - - for (let i = 0; i < vrows; i++) { - for (let j = 0; j < vcols; j++) { - if (Math.abs(this.s[j]) > e) { - X[i][j] = V[i][j] / this.s[j]; - } else { - X[i][j] = 0; - } - } - } - - var U = this.U; - - var urows = U.length; - var ucols = U[0].length; - var Y = new matrix_Matrix(vrows, urows); - - for (let i = 0; i < vrows; i++) { - for (let j = 0; j < urows; j++) { - let sum = 0; - for (let k = 0; k < ucols; k++) { - sum += X[i][k] * U[j][k]; - } - Y[i][j] = sum; - } - } - - return Y; - } - - /** - * - * @return {number} - */ - get condition() { - return this.s[0] / this.s[Math.min(this.m, this.n) - 1]; - } - - /** - * - * @return {number} - */ - get norm2() { - return this.s[0]; - } - - /** - * - * @return {number} - */ - get rank() { - var tol = Math.max(this.m, this.n) * this.s[0] * Number.EPSILON; - var r = 0; - var s = this.s; - for (var i = 0, ii = s.length; i < ii; i++) { - if (s[i] > tol) { - r++; - } - } - return r; - } - - /** - * - * @return {Array} - */ - get diagonal() { - return this.s; - } - - /** - * - * @return {number} - */ - get threshold() { - return Number.EPSILON / 2 * Math.max(this.m, this.n) * this.s[0]; - } - - /** - * - * @return {Matrix} - */ - get leftSingularVectors() { - if (!matrix_Matrix.isMatrix(this.U)) { - this.U = new matrix_Matrix(this.U); - } - return this.U; - } - - /** - * - * @return {Matrix} - */ - get rightSingularVectors() { - if (!matrix_Matrix.isMatrix(this.V)) { - this.V = new matrix_Matrix(this.V); - } - return this.V; - } - - /** - * - * @return {Matrix} - */ - get diagonalMatrix() { - return matrix_Matrix.diag(this.s); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/util.js - - -/** - * @private - * Check that a row index is not out of bounds - * @param {Matrix} matrix - * @param {number} index - * @param {boolean} [outer] - */ -function checkRowIndex(matrix, index, outer) { - var max = outer ? matrix.rows : matrix.rows - 1; - if (index < 0 || index > max) { - throw new RangeError('Row index out of range'); - } -} - -/** - * @private - * Check that a column index is not out of bounds - * @param {Matrix} matrix - * @param {number} index - * @param {boolean} [outer] - */ -function checkColumnIndex(matrix, index, outer) { - var max = outer ? matrix.columns : matrix.columns - 1; - if (index < 0 || index > max) { - throw new RangeError('Column index out of range'); - } -} - -/** - * @private - * Check that the provided vector is an array with the right length - * @param {Matrix} matrix - * @param {Array|Matrix} vector - * @return {Array} - * @throws {RangeError} - */ -function checkRowVector(matrix, vector) { - if (vector.to1DArray) { - vector = vector.to1DArray(); - } - if (vector.length !== matrix.columns) { - throw new RangeError( - 'vector size must be the same as the number of columns' - ); - } - return vector; -} - -/** - * @private - * Check that the provided vector is an array with the right length - * @param {Matrix} matrix - * @param {Array|Matrix} vector - * @return {Array} - * @throws {RangeError} - */ -function checkColumnVector(matrix, vector) { - if (vector.to1DArray) { - vector = vector.to1DArray(); - } - if (vector.length !== matrix.rows) { - throw new RangeError('vector size must be the same as the number of rows'); - } - return vector; -} - -function checkIndices(matrix, rowIndices, columnIndices) { - return { - row: checkRowIndices(matrix, rowIndices), - column: checkColumnIndices(matrix, columnIndices) - }; -} - -function checkRowIndices(matrix, rowIndices) { - if (typeof rowIndices !== 'object') { - throw new TypeError('unexpected type for row indices'); - } - - var rowOut = rowIndices.some((r) => { - return r < 0 || r >= matrix.rows; - }); - - if (rowOut) { - throw new RangeError('row indices are out of range'); - } - - if (!Array.isArray(rowIndices)) rowIndices = Array.from(rowIndices); - - return rowIndices; -} - -function checkColumnIndices(matrix, columnIndices) { - if (typeof columnIndices !== 'object') { - throw new TypeError('unexpected type for column indices'); - } - - var columnOut = columnIndices.some((c) => { - return c < 0 || c >= matrix.columns; - }); - - if (columnOut) { - throw new RangeError('column indices are out of range'); - } - if (!Array.isArray(columnIndices)) columnIndices = Array.from(columnIndices); - - return columnIndices; -} - -function checkRange(matrix, startRow, endRow, startColumn, endColumn) { - if (arguments.length !== 5) { - throw new RangeError('expected 4 arguments'); - } - checkNumber('startRow', startRow); - checkNumber('endRow', endRow); - checkNumber('startColumn', startColumn); - checkNumber('endColumn', endColumn); - if ( - startRow > endRow || - startColumn > endColumn || - startRow < 0 || - startRow >= matrix.rows || - endRow < 0 || - endRow >= matrix.rows || - startColumn < 0 || - startColumn >= matrix.columns || - endColumn < 0 || - endColumn >= matrix.columns - ) { - throw new RangeError('Submatrix indices are out of range'); - } -} - -function getRange(from, to) { - var arr = new Array(to - from + 1); - for (var i = 0; i < arr.length; i++) { - arr[i] = from + i; - } - return arr; -} - -function sumByRow(matrix) { - var sum = matrix_Matrix.zeros(matrix.rows, 1); - for (var i = 0; i < matrix.rows; ++i) { - for (var j = 0; j < matrix.columns; ++j) { - sum.set(i, 0, sum.get(i, 0) + matrix.get(i, j)); - } - } - return sum; -} - -function sumByColumn(matrix) { - var sum = matrix_Matrix.zeros(1, matrix.columns); - for (var i = 0; i < matrix.rows; ++i) { - for (var j = 0; j < matrix.columns; ++j) { - sum.set(0, j, sum.get(0, j) + matrix.get(i, j)); - } - } - return sum; -} - -function sumAll(matrix) { - var v = 0; - for (var i = 0; i < matrix.rows; i++) { - for (var j = 0; j < matrix.columns; j++) { - v += matrix.get(i, j); - } - } - return v; -} - -function checkNumber(name, value) { - if (typeof value !== 'number') { - throw new TypeError(`${name} must be a number`); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/base.js - - - -class base_BaseView extends AbstractMatrix() { - constructor(matrix, rows, columns) { - super(); - this.matrix = matrix; - this.rows = rows; - this.columns = columns; - } - - static get [Symbol.species]() { - return matrix_Matrix; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/transpose.js - - -class transpose_MatrixTransposeView extends base_BaseView { - constructor(matrix) { - super(matrix, matrix.columns, matrix.rows); - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(columnIndex, rowIndex, value); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get(columnIndex, rowIndex); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/row.js - - -class row_MatrixRowView extends base_BaseView { - constructor(matrix, row) { - super(matrix, 1, matrix.columns); - this.row = row; - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(this.row, columnIndex, value); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get(this.row, columnIndex); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/sub.js - - - - -class sub_MatrixSubView extends base_BaseView { - constructor(matrix, startRow, endRow, startColumn, endColumn) { - checkRange(matrix, startRow, endRow, startColumn, endColumn); - super(matrix, endRow - startRow + 1, endColumn - startColumn + 1); - this.startRow = startRow; - this.startColumn = startColumn; - } - - set(rowIndex, columnIndex, value) { - this.matrix.set( - this.startRow + rowIndex, - this.startColumn + columnIndex, - value - ); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get( - this.startRow + rowIndex, - this.startColumn + columnIndex - ); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/selection.js - - - - -class selection_MatrixSelectionView extends base_BaseView { - constructor(matrix, rowIndices, columnIndices) { - var indices = checkIndices(matrix, rowIndices, columnIndices); - super(matrix, indices.row.length, indices.column.length); - this.rowIndices = indices.row; - this.columnIndices = indices.column; - } - - set(rowIndex, columnIndex, value) { - this.matrix.set( - this.rowIndices[rowIndex], - this.columnIndices[columnIndex], - value - ); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get( - this.rowIndices[rowIndex], - this.columnIndices[columnIndex] - ); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/rowSelection.js - - - - -class rowSelection_MatrixRowSelectionView extends base_BaseView { - constructor(matrix, rowIndices) { - rowIndices = checkRowIndices(matrix, rowIndices); - super(matrix, rowIndices.length, matrix.columns); - this.rowIndices = rowIndices; - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(this.rowIndices[rowIndex], columnIndex, value); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get(this.rowIndices[rowIndex], columnIndex); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/columnSelection.js - - - - -class columnSelection_MatrixColumnSelectionView extends base_BaseView { - constructor(matrix, columnIndices) { - columnIndices = checkColumnIndices(matrix, columnIndices); - super(matrix, matrix.rows, columnIndices.length); - this.columnIndices = columnIndices; - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(rowIndex, this.columnIndices[columnIndex], value); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get(rowIndex, this.columnIndices[columnIndex]); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/column.js - - -class column_MatrixColumnView extends base_BaseView { - constructor(matrix, column) { - super(matrix, matrix.rows, 1); - this.column = column; - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(rowIndex, this.column, value); - return this; - } - - get(rowIndex) { - return this.matrix.get(rowIndex, this.column); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/flipRow.js - - -class flipRow_MatrixFlipRowView extends base_BaseView { - constructor(matrix) { - super(matrix, matrix.rows, matrix.columns); - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(this.rows - rowIndex - 1, columnIndex, value); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get(this.rows - rowIndex - 1, columnIndex); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/views/flipColumn.js - - -class flipColumn_MatrixFlipColumnView extends base_BaseView { - constructor(matrix) { - super(matrix, matrix.rows, matrix.columns); - } - - set(rowIndex, columnIndex, value) { - this.matrix.set(rowIndex, this.columns - columnIndex - 1, value); - return this; - } - - get(rowIndex, columnIndex) { - return this.matrix.get(rowIndex, this.columns - columnIndex - 1); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/abstractMatrix.js - - - - - - - - - - - - - - - -function AbstractMatrix(superCtor) { - if (superCtor === undefined) superCtor = Object; - - /** - * Real matrix - * @class Matrix - * @param {number|Array|Matrix} nRows - Number of rows of the new matrix, - * 2D array containing the data or Matrix instance to clone - * @param {number} [nColumns] - Number of columns of the new matrix - */ - class Matrix extends superCtor { - static get [Symbol.species]() { - return this; - } - - /** - * Constructs a Matrix with the chosen dimensions from a 1D array - * @param {number} newRows - Number of rows - * @param {number} newColumns - Number of columns - * @param {Array} newData - A 1D array containing data for the matrix - * @return {Matrix} - The new matrix - */ - static from1DArray(newRows, newColumns, newData) { - var length = newRows * newColumns; - if (length !== newData.length) { - throw new RangeError('Data length does not match given dimensions'); - } - var newMatrix = new this(newRows, newColumns); - for (var row = 0; row < newRows; row++) { - for (var column = 0; column < newColumns; column++) { - newMatrix.set(row, column, newData[row * newColumns + column]); - } - } - return newMatrix; - } - - /** - * Creates a row vector, a matrix with only one row. - * @param {Array} newData - A 1D array containing data for the vector - * @return {Matrix} - The new matrix - */ - static rowVector(newData) { - var vector = new this(1, newData.length); - for (var i = 0; i < newData.length; i++) { - vector.set(0, i, newData[i]); - } - return vector; - } - - /** - * Creates a column vector, a matrix with only one column. - * @param {Array} newData - A 1D array containing data for the vector - * @return {Matrix} - The new matrix - */ - static columnVector(newData) { - var vector = new this(newData.length, 1); - for (var i = 0; i < newData.length; i++) { - vector.set(i, 0, newData[i]); - } - return vector; - } - - /** - * Creates an empty matrix with the given dimensions. Values will be undefined. Same as using new Matrix(rows, columns). - * @param {number} rows - Number of rows - * @param {number} columns - Number of columns - * @return {Matrix} - The new matrix - */ - static empty(rows, columns) { - return new this(rows, columns); - } - - /** - * Creates a matrix with the given dimensions. Values will be set to zero. - * @param {number} rows - Number of rows - * @param {number} columns - Number of columns - * @return {Matrix} - The new matrix - */ - static zeros(rows, columns) { - return this.empty(rows, columns).fill(0); - } - - /** - * Creates a matrix with the given dimensions. Values will be set to one. - * @param {number} rows - Number of rows - * @param {number} columns - Number of columns - * @return {Matrix} - The new matrix - */ - static ones(rows, columns) { - return this.empty(rows, columns).fill(1); - } - - /** - * Creates a matrix with the given dimensions. Values will be randomly set. - * @param {number} rows - Number of rows - * @param {number} columns - Number of columns - * @param {function} [rng=Math.random] - Random number generator - * @return {Matrix} The new matrix - */ - static rand(rows, columns, rng) { - if (rng === undefined) rng = Math.random; - var matrix = this.empty(rows, columns); - for (var i = 0; i < rows; i++) { - for (var j = 0; j < columns; j++) { - matrix.set(i, j, rng()); - } - } - return matrix; - } - - /** - * Creates a matrix with the given dimensions. Values will be random integers. - * @param {number} rows - Number of rows - * @param {number} columns - Number of columns - * @param {number} [maxValue=1000] - Maximum value - * @param {function} [rng=Math.random] - Random number generator - * @return {Matrix} The new matrix - */ - static randInt(rows, columns, maxValue, rng) { - if (maxValue === undefined) maxValue = 1000; - if (rng === undefined) rng = Math.random; - var matrix = this.empty(rows, columns); - for (var i = 0; i < rows; i++) { - for (var j = 0; j < columns; j++) { - var value = Math.floor(rng() * maxValue); - matrix.set(i, j, value); - } - } - return matrix; - } - - /** - * Creates an identity matrix with the given dimension. Values of the diagonal will be 1 and others will be 0. - * @param {number} rows - Number of rows - * @param {number} [columns=rows] - Number of columns - * @param {number} [value=1] - Value to fill the diagonal with - * @return {Matrix} - The new identity matrix - */ - static eye(rows, columns, value) { - if (columns === undefined) columns = rows; - if (value === undefined) value = 1; - var min = Math.min(rows, columns); - var matrix = this.zeros(rows, columns); - for (var i = 0; i < min; i++) { - matrix.set(i, i, value); - } - return matrix; - } - - /** - * Creates a diagonal matrix based on the given array. - * @param {Array} data - Array containing the data for the diagonal - * @param {number} [rows] - Number of rows (Default: data.length) - * @param {number} [columns] - Number of columns (Default: rows) - * @return {Matrix} - The new diagonal matrix - */ - static diag(data, rows, columns) { - var l = data.length; - if (rows === undefined) rows = l; - if (columns === undefined) columns = rows; - var min = Math.min(l, rows, columns); - var matrix = this.zeros(rows, columns); - for (var i = 0; i < min; i++) { - matrix.set(i, i, data[i]); - } - return matrix; - } - - /** - * Returns a matrix whose elements are the minimum between matrix1 and matrix2 - * @param {Matrix} matrix1 - * @param {Matrix} matrix2 - * @return {Matrix} - */ - static min(matrix1, matrix2) { - matrix1 = this.checkMatrix(matrix1); - matrix2 = this.checkMatrix(matrix2); - var rows = matrix1.rows; - var columns = matrix1.columns; - var result = new this(rows, columns); - for (var i = 0; i < rows; i++) { - for (var j = 0; j < columns; j++) { - result.set(i, j, Math.min(matrix1.get(i, j), matrix2.get(i, j))); - } - } - return result; - } - - /** - * Returns a matrix whose elements are the maximum between matrix1 and matrix2 - * @param {Matrix} matrix1 - * @param {Matrix} matrix2 - * @return {Matrix} - */ - static max(matrix1, matrix2) { - matrix1 = this.checkMatrix(matrix1); - matrix2 = this.checkMatrix(matrix2); - var rows = matrix1.rows; - var columns = matrix1.columns; - var result = new this(rows, columns); - for (var i = 0; i < rows; i++) { - for (var j = 0; j < columns; j++) { - result.set(i, j, Math.max(matrix1.get(i, j), matrix2.get(i, j))); - } - } - return result; - } - - /** - * Check that the provided value is a Matrix and tries to instantiate one if not - * @param {*} value - The value to check - * @return {Matrix} - */ - static checkMatrix(value) { - return Matrix.isMatrix(value) ? value : new this(value); - } - - /** - * Returns true if the argument is a Matrix, false otherwise - * @param {*} value - The value to check - * @return {boolean} - */ - static isMatrix(value) { - return (value != null) && (value.klass === 'Matrix'); - } - - /** - * @prop {number} size - The number of elements in the matrix. - */ - get size() { - return this.rows * this.columns; - } - - /** - * Applies a callback for each element of the matrix. The function is called in the matrix (this) context. - * @param {function} callback - Function that will be called with two parameters : i (row) and j (column) - * @return {Matrix} this - */ - apply(callback) { - if (typeof callback !== 'function') { - throw new TypeError('callback must be a function'); - } - var ii = this.rows; - var jj = this.columns; - for (var i = 0; i < ii; i++) { - for (var j = 0; j < jj; j++) { - callback.call(this, i, j); - } - } - return this; - } - - /** - * Returns a new 1D array filled row by row with the matrix values - * @return {Array} - */ - to1DArray() { - var array = new Array(this.size); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - array[i * this.columns + j] = this.get(i, j); - } - } - return array; - } - - /** - * Returns a 2D array containing a copy of the data - * @return {Array} - */ - to2DArray() { - var copy = new Array(this.rows); - for (var i = 0; i < this.rows; i++) { - copy[i] = new Array(this.columns); - for (var j = 0; j < this.columns; j++) { - copy[i][j] = this.get(i, j); - } - } - return copy; - } - - /** - * @return {boolean} true if the matrix has one row - */ - isRowVector() { - return this.rows === 1; - } - - /** - * @return {boolean} true if the matrix has one column - */ - isColumnVector() { - return this.columns === 1; - } - - /** - * @return {boolean} true if the matrix has one row or one column - */ - isVector() { - return (this.rows === 1) || (this.columns === 1); - } - - /** - * @return {boolean} true if the matrix has the same number of rows and columns - */ - isSquare() { - return this.rows === this.columns; - } - - /** - * @return {boolean} true if the matrix is square and has the same values on both sides of the diagonal - */ - isSymmetric() { - if (this.isSquare()) { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j <= i; j++) { - if (this.get(i, j) !== this.get(j, i)) { - return false; - } - } - } - return true; - } - return false; - } - - /** - * Sets a given element of the matrix. mat.set(3,4,1) is equivalent to mat[3][4]=1 - * @abstract - * @param {number} rowIndex - Index of the row - * @param {number} columnIndex - Index of the column - * @param {number} value - The new value for the element - * @return {Matrix} this - */ - set(rowIndex, columnIndex, value) { // eslint-disable-line no-unused-vars - throw new Error('set method is unimplemented'); - } - - /** - * Returns the given element of the matrix. mat.get(3,4) is equivalent to matrix[3][4] - * @abstract - * @param {number} rowIndex - Index of the row - * @param {number} columnIndex - Index of the column - * @return {number} - */ - get(rowIndex, columnIndex) { // eslint-disable-line no-unused-vars - throw new Error('get method is unimplemented'); - } - - /** - * Creates a new matrix that is a repetition of the current matrix. New matrix has rowRep times the number of - * rows of the matrix, and colRep times the number of columns of the matrix - * @param {number} rowRep - Number of times the rows should be repeated - * @param {number} colRep - Number of times the columns should be re - * @return {Matrix} - * @example - * var matrix = new Matrix([[1,2]]); - * matrix.repeat(2); // [[1,2],[1,2]] - */ - repeat(rowRep, colRep) { - rowRep = rowRep || 1; - colRep = colRep || 1; - var matrix = new this.constructor[Symbol.species](this.rows * rowRep, this.columns * colRep); - for (var i = 0; i < rowRep; i++) { - for (var j = 0; j < colRep; j++) { - matrix.setSubMatrix(this, this.rows * i, this.columns * j); - } - } - return matrix; - } - - /** - * Fills the matrix with a given value. All elements will be set to this value. - * @param {number} value - New value - * @return {Matrix} this - */ - fill(value) { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, value); - } - } - return this; - } - - /** - * Negates the matrix. All elements will be multiplied by (-1) - * @return {Matrix} this - */ - neg() { - return this.mulS(-1); - } - - /** - * Returns a new array from the given row index - * @param {number} index - Row index - * @return {Array} - */ - getRow(index) { - checkRowIndex(this, index); - var row = new Array(this.columns); - for (var i = 0; i < this.columns; i++) { - row[i] = this.get(index, i); - } - return row; - } - - /** - * Returns a new row vector from the given row index - * @param {number} index - Row index - * @return {Matrix} - */ - getRowVector(index) { - return this.constructor.rowVector(this.getRow(index)); - } - - /** - * Sets a row at the given index - * @param {number} index - Row index - * @param {Array|Matrix} array - Array or vector - * @return {Matrix} this - */ - setRow(index, array) { - checkRowIndex(this, index); - array = checkRowVector(this, array); - for (var i = 0; i < this.columns; i++) { - this.set(index, i, array[i]); - } - return this; - } - - /** - * Swaps two rows - * @param {number} row1 - First row index - * @param {number} row2 - Second row index - * @return {Matrix} this - */ - swapRows(row1, row2) { - checkRowIndex(this, row1); - checkRowIndex(this, row2); - for (var i = 0; i < this.columns; i++) { - var temp = this.get(row1, i); - this.set(row1, i, this.get(row2, i)); - this.set(row2, i, temp); - } - return this; - } - - /** - * Returns a new array from the given column index - * @param {number} index - Column index - * @return {Array} - */ - getColumn(index) { - checkColumnIndex(this, index); - var column = new Array(this.rows); - for (var i = 0; i < this.rows; i++) { - column[i] = this.get(i, index); - } - return column; - } - - /** - * Returns a new column vector from the given column index - * @param {number} index - Column index - * @return {Matrix} - */ - getColumnVector(index) { - return this.constructor.columnVector(this.getColumn(index)); - } - - /** - * Sets a column at the given index - * @param {number} index - Column index - * @param {Array|Matrix} array - Array or vector - * @return {Matrix} this - */ - setColumn(index, array) { - checkColumnIndex(this, index); - array = checkColumnVector(this, array); - for (var i = 0; i < this.rows; i++) { - this.set(i, index, array[i]); - } - return this; - } - - /** - * Swaps two columns - * @param {number} column1 - First column index - * @param {number} column2 - Second column index - * @return {Matrix} this - */ - swapColumns(column1, column2) { - checkColumnIndex(this, column1); - checkColumnIndex(this, column2); - for (var i = 0; i < this.rows; i++) { - var temp = this.get(i, column1); - this.set(i, column1, this.get(i, column2)); - this.set(i, column2, temp); - } - return this; - } - - /** - * Adds the values of a vector to each row - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - addRowVector(vector) { - vector = checkRowVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) + vector[j]); - } - } - return this; - } - - /** - * Subtracts the values of a vector from each row - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - subRowVector(vector) { - vector = checkRowVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) - vector[j]); - } - } - return this; - } - - /** - * Multiplies the values of a vector with each row - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - mulRowVector(vector) { - vector = checkRowVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) * vector[j]); - } - } - return this; - } - - /** - * Divides the values of each row by those of a vector - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - divRowVector(vector) { - vector = checkRowVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) / vector[j]); - } - } - return this; - } - - /** - * Adds the values of a vector to each column - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - addColumnVector(vector) { - vector = checkColumnVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) + vector[i]); - } - } - return this; - } - - /** - * Subtracts the values of a vector from each column - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - subColumnVector(vector) { - vector = checkColumnVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) - vector[i]); - } - } - return this; - } - - /** - * Multiplies the values of a vector with each column - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - mulColumnVector(vector) { - vector = checkColumnVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) * vector[i]); - } - } - return this; - } - - /** - * Divides the values of each column by those of a vector - * @param {Array|Matrix} vector - Array or vector - * @return {Matrix} this - */ - divColumnVector(vector) { - vector = checkColumnVector(this, vector); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) / vector[i]); - } - } - return this; - } - - /** - * Multiplies the values of a row with a scalar - * @param {number} index - Row index - * @param {number} value - * @return {Matrix} this - */ - mulRow(index, value) { - checkRowIndex(this, index); - for (var i = 0; i < this.columns; i++) { - this.set(index, i, this.get(index, i) * value); - } - return this; - } - - /** - * Multiplies the values of a column with a scalar - * @param {number} index - Column index - * @param {number} value - * @return {Matrix} this - */ - mulColumn(index, value) { - checkColumnIndex(this, index); - for (var i = 0; i < this.rows; i++) { - this.set(i, index, this.get(i, index) * value); - } - return this; - } - - /** - * Returns the maximum value of the matrix - * @return {number} - */ - max() { - var v = this.get(0, 0); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - if (this.get(i, j) > v) { - v = this.get(i, j); - } - } - } - return v; - } - - /** - * Returns the index of the maximum value - * @return {Array} - */ - maxIndex() { - var v = this.get(0, 0); - var idx = [0, 0]; - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - if (this.get(i, j) > v) { - v = this.get(i, j); - idx[0] = i; - idx[1] = j; - } - } - } - return idx; - } - - /** - * Returns the minimum value of the matrix - * @return {number} - */ - min() { - var v = this.get(0, 0); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - if (this.get(i, j) < v) { - v = this.get(i, j); - } - } - } - return v; - } - - /** - * Returns the index of the minimum value - * @return {Array} - */ - minIndex() { - var v = this.get(0, 0); - var idx = [0, 0]; - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - if (this.get(i, j) < v) { - v = this.get(i, j); - idx[0] = i; - idx[1] = j; - } - } - } - return idx; - } - - /** - * Returns the maximum value of one row - * @param {number} row - Row index - * @return {number} - */ - maxRow(row) { - checkRowIndex(this, row); - var v = this.get(row, 0); - for (var i = 1; i < this.columns; i++) { - if (this.get(row, i) > v) { - v = this.get(row, i); - } - } - return v; - } - - /** - * Returns the index of the maximum value of one row - * @param {number} row - Row index - * @return {Array} - */ - maxRowIndex(row) { - checkRowIndex(this, row); - var v = this.get(row, 0); - var idx = [row, 0]; - for (var i = 1; i < this.columns; i++) { - if (this.get(row, i) > v) { - v = this.get(row, i); - idx[1] = i; - } - } - return idx; - } - - /** - * Returns the minimum value of one row - * @param {number} row - Row index - * @return {number} - */ - minRow(row) { - checkRowIndex(this, row); - var v = this.get(row, 0); - for (var i = 1; i < this.columns; i++) { - if (this.get(row, i) < v) { - v = this.get(row, i); - } - } - return v; - } - - /** - * Returns the index of the maximum value of one row - * @param {number} row - Row index - * @return {Array} - */ - minRowIndex(row) { - checkRowIndex(this, row); - var v = this.get(row, 0); - var idx = [row, 0]; - for (var i = 1; i < this.columns; i++) { - if (this.get(row, i) < v) { - v = this.get(row, i); - idx[1] = i; - } - } - return idx; - } - - /** - * Returns the maximum value of one column - * @param {number} column - Column index - * @return {number} - */ - maxColumn(column) { - checkColumnIndex(this, column); - var v = this.get(0, column); - for (var i = 1; i < this.rows; i++) { - if (this.get(i, column) > v) { - v = this.get(i, column); - } - } - return v; - } - - /** - * Returns the index of the maximum value of one column - * @param {number} column - Column index - * @return {Array} - */ - maxColumnIndex(column) { - checkColumnIndex(this, column); - var v = this.get(0, column); - var idx = [0, column]; - for (var i = 1; i < this.rows; i++) { - if (this.get(i, column) > v) { - v = this.get(i, column); - idx[0] = i; - } - } - return idx; - } - - /** - * Returns the minimum value of one column - * @param {number} column - Column index - * @return {number} - */ - minColumn(column) { - checkColumnIndex(this, column); - var v = this.get(0, column); - for (var i = 1; i < this.rows; i++) { - if (this.get(i, column) < v) { - v = this.get(i, column); - } - } - return v; - } - - /** - * Returns the index of the minimum value of one column - * @param {number} column - Column index - * @return {Array} - */ - minColumnIndex(column) { - checkColumnIndex(this, column); - var v = this.get(0, column); - var idx = [0, column]; - for (var i = 1; i < this.rows; i++) { - if (this.get(i, column) < v) { - v = this.get(i, column); - idx[0] = i; - } - } - return idx; - } - - /** - * Returns an array containing the diagonal values of the matrix - * @return {Array} - */ - diag() { - var min = Math.min(this.rows, this.columns); - var diag = new Array(min); - for (var i = 0; i < min; i++) { - diag[i] = this.get(i, i); - } - return diag; - } - - /** - * Returns the sum by the argument given, if no argument given, - * it returns the sum of all elements of the matrix. - * @param {string} by - sum by 'row' or 'column'. - * @return {Matrix|number} - */ - sum(by) { - switch (by) { - case 'row': - return sumByRow(this); - case 'column': - return sumByColumn(this); - default: - return sumAll(this); - } - } - - /** - * Returns the mean of all elements of the matrix - * @return {number} - */ - mean() { - return this.sum() / this.size; - } - - /** - * Returns the product of all elements of the matrix - * @return {number} - */ - prod() { - var prod = 1; - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - prod *= this.get(i, j); - } - } - return prod; - } - - /** - * Returns the norm of a matrix. - * @param {string} type - "frobenius" (default) or "max" return resp. the Frobenius norm and the max norm. - * @return {number} - */ - norm(type = 'frobenius') { - var result = 0; - if (type === 'max') { - return this.max(); - } else if (type === 'frobenius') { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - result = result + this.get(i, j) * this.get(i, j); - } - } - return Math.sqrt(result); - } else { - throw new RangeError(`unknown norm type: ${type}`); - } - } - - /** - * Computes the cumulative sum of the matrix elements (in place, row by row) - * @return {Matrix} this - */ - cumulativeSum() { - var sum = 0; - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - sum += this.get(i, j); - this.set(i, j, sum); - } - } - return this; - } - - /** - * Computes the dot (scalar) product between the matrix and another - * @param {Matrix} vector2 vector - * @return {number} - */ - dot(vector2) { - if (Matrix.isMatrix(vector2)) vector2 = vector2.to1DArray(); - var vector1 = this.to1DArray(); - if (vector1.length !== vector2.length) { - throw new RangeError('vectors do not have the same size'); - } - var dot = 0; - for (var i = 0; i < vector1.length; i++) { - dot += vector1[i] * vector2[i]; - } - return dot; - } - - /** - * Returns the matrix product between this and other - * @param {Matrix} other - * @return {Matrix} - */ - mmul(other) { - other = this.constructor.checkMatrix(other); - if (this.columns !== other.rows) { - // eslint-disable-next-line no-console - console.warn('Number of columns of left matrix are not equal to number of rows of right matrix.'); - } - - var m = this.rows; - var n = this.columns; - var p = other.columns; - - var result = new this.constructor[Symbol.species](m, p); - - var Bcolj = new Array(n); - for (var j = 0; j < p; j++) { - for (var k = 0; k < n; k++) { - Bcolj[k] = other.get(k, j); - } - - for (var i = 0; i < m; i++) { - var s = 0; - for (k = 0; k < n; k++) { - s += this.get(i, k) * Bcolj[k]; - } - - result.set(i, j, s); - } - } - return result; - } - - strassen2x2(other) { - var result = new this.constructor[Symbol.species](2, 2); - const a11 = this.get(0, 0); - const b11 = other.get(0, 0); - const a12 = this.get(0, 1); - const b12 = other.get(0, 1); - const a21 = this.get(1, 0); - const b21 = other.get(1, 0); - const a22 = this.get(1, 1); - const b22 = other.get(1, 1); - - // Compute intermediate values. - const m1 = (a11 + a22) * (b11 + b22); - const m2 = (a21 + a22) * b11; - const m3 = a11 * (b12 - b22); - const m4 = a22 * (b21 - b11); - const m5 = (a11 + a12) * b22; - const m6 = (a21 - a11) * (b11 + b12); - const m7 = (a12 - a22) * (b21 + b22); - - // Combine intermediate values into the output. - const c00 = m1 + m4 - m5 + m7; - const c01 = m3 + m5; - const c10 = m2 + m4; - const c11 = m1 - m2 + m3 + m6; - - result.set(0, 0, c00); - result.set(0, 1, c01); - result.set(1, 0, c10); - result.set(1, 1, c11); - return result; - } - - strassen3x3(other) { - var result = new this.constructor[Symbol.species](3, 3); - - const a00 = this.get(0, 0); - const a01 = this.get(0, 1); - const a02 = this.get(0, 2); - const a10 = this.get(1, 0); - const a11 = this.get(1, 1); - const a12 = this.get(1, 2); - const a20 = this.get(2, 0); - const a21 = this.get(2, 1); - const a22 = this.get(2, 2); - - const b00 = other.get(0, 0); - const b01 = other.get(0, 1); - const b02 = other.get(0, 2); - const b10 = other.get(1, 0); - const b11 = other.get(1, 1); - const b12 = other.get(1, 2); - const b20 = other.get(2, 0); - const b21 = other.get(2, 1); - const b22 = other.get(2, 2); - - const m1 = (a00 + a01 + a02 - a10 - a11 - a21 - a22) * b11; - const m2 = (a00 - a10) * (-b01 + b11); - const m3 = a11 * (-b00 + b01 + b10 - b11 - b12 - b20 + b22); - const m4 = (-a00 + a10 + a11) * (b00 - b01 + b11); - const m5 = (a10 + a11) * (-b00 + b01); - const m6 = a00 * b00; - const m7 = (-a00 + a20 + a21) * (b00 - b02 + b12); - const m8 = (-a00 + a20) * (b02 - b12); - const m9 = (a20 + a21) * (-b00 + b02); - const m10 = (a00 + a01 + a02 - a11 - a12 - a20 - a21) * b12; - const m11 = a21 * (-b00 + b02 + b10 - b11 - b12 - b20 + b21); - const m12 = (-a02 + a21 + a22) * (b11 + b20 - b21); - const m13 = (a02 - a22) * (b11 - b21); - const m14 = a02 * b20; - const m15 = (a21 + a22) * (-b20 + b21); - const m16 = (-a02 + a11 + a12) * (b12 + b20 - b22); - const m17 = (a02 - a12) * (b12 - b22); - const m18 = (a11 + a12) * (-b20 + b22); - const m19 = a01 * b10; - const m20 = a12 * b21; - const m21 = a10 * b02; - const m22 = a20 * b01; - const m23 = a22 * b22; - - const c00 = m6 + m14 + m19; - const c01 = m1 + m4 + m5 + m6 + m12 + m14 + m15; - const c02 = m6 + m7 + m9 + m10 + m14 + m16 + m18; - const c10 = m2 + m3 + m4 + m6 + m14 + m16 + m17; - const c11 = m2 + m4 + m5 + m6 + m20; - const c12 = m14 + m16 + m17 + m18 + m21; - const c20 = m6 + m7 + m8 + m11 + m12 + m13 + m14; - const c21 = m12 + m13 + m14 + m15 + m22; - const c22 = m6 + m7 + m8 + m9 + m23; - - result.set(0, 0, c00); - result.set(0, 1, c01); - result.set(0, 2, c02); - result.set(1, 0, c10); - result.set(1, 1, c11); - result.set(1, 2, c12); - result.set(2, 0, c20); - result.set(2, 1, c21); - result.set(2, 2, c22); - return result; - } - - /** - * Returns the matrix product between x and y. More efficient than mmul(other) only when we multiply squared matrix and when the size of the matrix is > 1000. - * @param {Matrix} y - * @return {Matrix} - */ - mmulStrassen(y) { - var x = this.clone(); - var r1 = x.rows; - var c1 = x.columns; - var r2 = y.rows; - var c2 = y.columns; - if (c1 !== r2) { - // eslint-disable-next-line no-console - console.warn(`Multiplying ${r1} x ${c1} and ${r2} x ${c2} matrix: dimensions do not match.`); - } - - // Put a matrix into the top left of a matrix of zeros. - // `rows` and `cols` are the dimensions of the output matrix. - function embed(mat, rows, cols) { - var r = mat.rows; - var c = mat.columns; - if ((r === rows) && (c === cols)) { - return mat; - } else { - var resultat = Matrix.zeros(rows, cols); - resultat = resultat.setSubMatrix(mat, 0, 0); - return resultat; - } - } - - - // Make sure both matrices are the same size. - // This is exclusively for simplicity: - // this algorithm can be implemented with matrices of different sizes. - - var r = Math.max(r1, r2); - var c = Math.max(c1, c2); - x = embed(x, r, c); - y = embed(y, r, c); - - // Our recursive multiplication function. - function blockMult(a, b, rows, cols) { - // For small matrices, resort to naive multiplication. - if (rows <= 512 || cols <= 512) { - return a.mmul(b); // a is equivalent to this - } - - // Apply dynamic padding. - if ((rows % 2 === 1) && (cols % 2 === 1)) { - a = embed(a, rows + 1, cols + 1); - b = embed(b, rows + 1, cols + 1); - } else if (rows % 2 === 1) { - a = embed(a, rows + 1, cols); - b = embed(b, rows + 1, cols); - } else if (cols % 2 === 1) { - a = embed(a, rows, cols + 1); - b = embed(b, rows, cols + 1); - } - - var halfRows = parseInt(a.rows / 2, 10); - var halfCols = parseInt(a.columns / 2, 10); - // Subdivide input matrices. - var a11 = a.subMatrix(0, halfRows - 1, 0, halfCols - 1); - var b11 = b.subMatrix(0, halfRows - 1, 0, halfCols - 1); - - var a12 = a.subMatrix(0, halfRows - 1, halfCols, a.columns - 1); - var b12 = b.subMatrix(0, halfRows - 1, halfCols, b.columns - 1); - - var a21 = a.subMatrix(halfRows, a.rows - 1, 0, halfCols - 1); - var b21 = b.subMatrix(halfRows, b.rows - 1, 0, halfCols - 1); - - var a22 = a.subMatrix(halfRows, a.rows - 1, halfCols, a.columns - 1); - var b22 = b.subMatrix(halfRows, b.rows - 1, halfCols, b.columns - 1); - - // Compute intermediate values. - var m1 = blockMult(Matrix.add(a11, a22), Matrix.add(b11, b22), halfRows, halfCols); - var m2 = blockMult(Matrix.add(a21, a22), b11, halfRows, halfCols); - var m3 = blockMult(a11, Matrix.sub(b12, b22), halfRows, halfCols); - var m4 = blockMult(a22, Matrix.sub(b21, b11), halfRows, halfCols); - var m5 = blockMult(Matrix.add(a11, a12), b22, halfRows, halfCols); - var m6 = blockMult(Matrix.sub(a21, a11), Matrix.add(b11, b12), halfRows, halfCols); - var m7 = blockMult(Matrix.sub(a12, a22), Matrix.add(b21, b22), halfRows, halfCols); - - // Combine intermediate values into the output. - var c11 = Matrix.add(m1, m4); - c11.sub(m5); - c11.add(m7); - var c12 = Matrix.add(m3, m5); - var c21 = Matrix.add(m2, m4); - var c22 = Matrix.sub(m1, m2); - c22.add(m3); - c22.add(m6); - - // Crop output to the desired size (undo dynamic padding). - var resultat = Matrix.zeros(2 * c11.rows, 2 * c11.columns); - resultat = resultat.setSubMatrix(c11, 0, 0); - resultat = resultat.setSubMatrix(c12, c11.rows, 0); - resultat = resultat.setSubMatrix(c21, 0, c11.columns); - resultat = resultat.setSubMatrix(c22, c11.rows, c11.columns); - return resultat.subMatrix(0, rows - 1, 0, cols - 1); - } - return blockMult(x, y, r, c); - } - - /** - * Returns a row-by-row scaled matrix - * @param {number} [min=0] - Minimum scaled value - * @param {number} [max=1] - Maximum scaled value - * @return {Matrix} - The scaled matrix - */ - scaleRows(min, max) { - min = min === undefined ? 0 : min; - max = max === undefined ? 1 : max; - if (min >= max) { - throw new RangeError('min should be strictly smaller than max'); - } - var newMatrix = this.constructor.empty(this.rows, this.columns); - for (var i = 0; i < this.rows; i++) { - var scaled = ml_array_rescale_lib_es6(this.getRow(i), { min, max }); - newMatrix.setRow(i, scaled); - } - return newMatrix; - } - - /** - * Returns a new column-by-column scaled matrix - * @param {number} [min=0] - Minimum scaled value - * @param {number} [max=1] - Maximum scaled value - * @return {Matrix} - The new scaled matrix - * @example - * var matrix = new Matrix([[1,2],[-1,0]]); - * var scaledMatrix = matrix.scaleColumns(); // [[1,1],[0,0]] - */ - scaleColumns(min, max) { - min = min === undefined ? 0 : min; - max = max === undefined ? 1 : max; - if (min >= max) { - throw new RangeError('min should be strictly smaller than max'); - } - var newMatrix = this.constructor.empty(this.rows, this.columns); - for (var i = 0; i < this.columns; i++) { - var scaled = ml_array_rescale_lib_es6(this.getColumn(i), { - min: min, - max: max - }); - newMatrix.setColumn(i, scaled); - } - return newMatrix; - } - - - /** - * Returns the Kronecker product (also known as tensor product) between this and other - * See https://en.wikipedia.org/wiki/Kronecker_product - * @param {Matrix} other - * @return {Matrix} - */ - kroneckerProduct(other) { - other = this.constructor.checkMatrix(other); - - var m = this.rows; - var n = this.columns; - var p = other.rows; - var q = other.columns; - - var result = new this.constructor[Symbol.species](m * p, n * q); - for (var i = 0; i < m; i++) { - for (var j = 0; j < n; j++) { - for (var k = 0; k < p; k++) { - for (var l = 0; l < q; l++) { - result[p * i + k][q * j + l] = this.get(i, j) * other.get(k, l); - } - } - } - } - return result; - } - - /** - * Transposes the matrix and returns a new one containing the result - * @return {Matrix} - */ - transpose() { - var result = new this.constructor[Symbol.species](this.columns, this.rows); - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - result.set(j, i, this.get(i, j)); - } - } - return result; - } - - /** - * Sorts the rows (in place) - * @param {function} compareFunction - usual Array.prototype.sort comparison function - * @return {Matrix} this - */ - sortRows(compareFunction) { - if (compareFunction === undefined) compareFunction = compareNumbers; - for (var i = 0; i < this.rows; i++) { - this.setRow(i, this.getRow(i).sort(compareFunction)); - } - return this; - } - - /** - * Sorts the columns (in place) - * @param {function} compareFunction - usual Array.prototype.sort comparison function - * @return {Matrix} this - */ - sortColumns(compareFunction) { - if (compareFunction === undefined) compareFunction = compareNumbers; - for (var i = 0; i < this.columns; i++) { - this.setColumn(i, this.getColumn(i).sort(compareFunction)); - } - return this; - } - - /** - * Returns a subset of the matrix - * @param {number} startRow - First row index - * @param {number} endRow - Last row index - * @param {number} startColumn - First column index - * @param {number} endColumn - Last column index - * @return {Matrix} - */ - subMatrix(startRow, endRow, startColumn, endColumn) { - checkRange(this, startRow, endRow, startColumn, endColumn); - var newMatrix = new this.constructor[Symbol.species](endRow - startRow + 1, endColumn - startColumn + 1); - for (var i = startRow; i <= endRow; i++) { - for (var j = startColumn; j <= endColumn; j++) { - newMatrix[i - startRow][j - startColumn] = this.get(i, j); - } - } - return newMatrix; - } - - /** - * Returns a subset of the matrix based on an array of row indices - * @param {Array} indices - Array containing the row indices - * @param {number} [startColumn = 0] - First column index - * @param {number} [endColumn = this.columns-1] - Last column index - * @return {Matrix} - */ - subMatrixRow(indices, startColumn, endColumn) { - if (startColumn === undefined) startColumn = 0; - if (endColumn === undefined) endColumn = this.columns - 1; - if ((startColumn > endColumn) || (startColumn < 0) || (startColumn >= this.columns) || (endColumn < 0) || (endColumn >= this.columns)) { - throw new RangeError('Argument out of range'); - } - - var newMatrix = new this.constructor[Symbol.species](indices.length, endColumn - startColumn + 1); - for (var i = 0; i < indices.length; i++) { - for (var j = startColumn; j <= endColumn; j++) { - if (indices[i] < 0 || indices[i] >= this.rows) { - throw new RangeError(`Row index out of range: ${indices[i]}`); - } - newMatrix.set(i, j - startColumn, this.get(indices[i], j)); - } - } - return newMatrix; - } - - /** - * Returns a subset of the matrix based on an array of column indices - * @param {Array} indices - Array containing the column indices - * @param {number} [startRow = 0] - First row index - * @param {number} [endRow = this.rows-1] - Last row index - * @return {Matrix} - */ - subMatrixColumn(indices, startRow, endRow) { - if (startRow === undefined) startRow = 0; - if (endRow === undefined) endRow = this.rows - 1; - if ((startRow > endRow) || (startRow < 0) || (startRow >= this.rows) || (endRow < 0) || (endRow >= this.rows)) { - throw new RangeError('Argument out of range'); - } - - var newMatrix = new this.constructor[Symbol.species](endRow - startRow + 1, indices.length); - for (var i = 0; i < indices.length; i++) { - for (var j = startRow; j <= endRow; j++) { - if (indices[i] < 0 || indices[i] >= this.columns) { - throw new RangeError(`Column index out of range: ${indices[i]}`); - } - newMatrix.set(j - startRow, i, this.get(j, indices[i])); - } - } - return newMatrix; - } - - /** - * Set a part of the matrix to the given sub-matrix - * @param {Matrix|Array< Array >} matrix - The source matrix from which to extract values. - * @param {number} startRow - The index of the first row to set - * @param {number} startColumn - The index of the first column to set - * @return {Matrix} - */ - setSubMatrix(matrix, startRow, startColumn) { - matrix = this.constructor.checkMatrix(matrix); - var endRow = startRow + matrix.rows - 1; - var endColumn = startColumn + matrix.columns - 1; - checkRange(this, startRow, endRow, startColumn, endColumn); - for (var i = 0; i < matrix.rows; i++) { - for (var j = 0; j < matrix.columns; j++) { - this[startRow + i][startColumn + j] = matrix.get(i, j); - } - } - return this; - } - - /** - * Return a new matrix based on a selection of rows and columns - * @param {Array} rowIndices - The row indices to select. Order matters and an index can be more than once. - * @param {Array} columnIndices - The column indices to select. Order matters and an index can be use more than once. - * @return {Matrix} The new matrix - */ - selection(rowIndices, columnIndices) { - var indices = checkIndices(this, rowIndices, columnIndices); - var newMatrix = new this.constructor[Symbol.species](rowIndices.length, columnIndices.length); - for (var i = 0; i < indices.row.length; i++) { - var rowIndex = indices.row[i]; - for (var j = 0; j < indices.column.length; j++) { - var columnIndex = indices.column[j]; - newMatrix[i][j] = this.get(rowIndex, columnIndex); - } - } - return newMatrix; - } - - /** - * Returns the trace of the matrix (sum of the diagonal elements) - * @return {number} - */ - trace() { - var min = Math.min(this.rows, this.columns); - var trace = 0; - for (var i = 0; i < min; i++) { - trace += this.get(i, i); - } - return trace; - } - - /* - Matrix views - */ - - /** - * Returns a view of the transposition of the matrix - * @return {MatrixTransposeView} - */ - transposeView() { - return new transpose_MatrixTransposeView(this); - } - - /** - * Returns a view of the row vector with the given index - * @param {number} row - row index of the vector - * @return {MatrixRowView} - */ - rowView(row) { - checkRowIndex(this, row); - return new row_MatrixRowView(this, row); - } - - /** - * Returns a view of the column vector with the given index - * @param {number} column - column index of the vector - * @return {MatrixColumnView} - */ - columnView(column) { - checkColumnIndex(this, column); - return new column_MatrixColumnView(this, column); - } - - /** - * Returns a view of the matrix flipped in the row axis - * @return {MatrixFlipRowView} - */ - flipRowView() { - return new flipRow_MatrixFlipRowView(this); - } - - /** - * Returns a view of the matrix flipped in the column axis - * @return {MatrixFlipColumnView} - */ - flipColumnView() { - return new flipColumn_MatrixFlipColumnView(this); - } - - /** - * Returns a view of a submatrix giving the index boundaries - * @param {number} startRow - first row index of the submatrix - * @param {number} endRow - last row index of the submatrix - * @param {number} startColumn - first column index of the submatrix - * @param {number} endColumn - last column index of the submatrix - * @return {MatrixSubView} - */ - subMatrixView(startRow, endRow, startColumn, endColumn) { - return new sub_MatrixSubView(this, startRow, endRow, startColumn, endColumn); - } - - /** - * Returns a view of the cross of the row indices and the column indices - * @example - * // resulting vector is [[2], [2]] - * var matrix = new Matrix([[1,2,3], [4,5,6]]).selectionView([0, 0], [1]) - * @param {Array} rowIndices - * @param {Array} columnIndices - * @return {MatrixSelectionView} - */ - selectionView(rowIndices, columnIndices) { - return new selection_MatrixSelectionView(this, rowIndices, columnIndices); - } - - /** - * Returns a view of the row indices - * @example - * // resulting vector is [[1,2,3], [1,2,3]] - * var matrix = new Matrix([[1,2,3], [4,5,6]]).rowSelectionView([0, 0]) - * @param {Array} rowIndices - * @return {MatrixRowSelectionView} - */ - rowSelectionView(rowIndices) { - return new rowSelection_MatrixRowSelectionView(this, rowIndices); - } - - /** - * Returns a view of the column indices - * @example - * // resulting vector is [[2, 2], [5, 5]] - * var matrix = new Matrix([[1,2,3], [4,5,6]]).columnSelectionView([1, 1]) - * @param {Array} columnIndices - * @return {MatrixColumnSelectionView} - */ - columnSelectionView(columnIndices) { - return new columnSelection_MatrixColumnSelectionView(this, columnIndices); - } - - - /** - * Calculates and returns the determinant of a matrix as a Number - * @example - * new Matrix([[1,2,3], [4,5,6]]).det() - * @return {number} - */ - det() { - if (this.isSquare()) { - var a, b, c, d; - if (this.columns === 2) { - // 2 x 2 matrix - a = this.get(0, 0); - b = this.get(0, 1); - c = this.get(1, 0); - d = this.get(1, 1); - - return a * d - (b * c); - } else if (this.columns === 3) { - // 3 x 3 matrix - var subMatrix0, subMatrix1, subMatrix2; - subMatrix0 = this.selectionView([1, 2], [1, 2]); - subMatrix1 = this.selectionView([1, 2], [0, 2]); - subMatrix2 = this.selectionView([1, 2], [0, 1]); - a = this.get(0, 0); - b = this.get(0, 1); - c = this.get(0, 2); - - return a * subMatrix0.det() - b * subMatrix1.det() + c * subMatrix2.det(); - } else { - // general purpose determinant using the LU decomposition - return new lu_LuDecomposition(this).determinant; - } - } else { - throw Error('Determinant can only be calculated for a square matrix.'); - } - } - - /** - * Returns inverse of a matrix if it exists or the pseudoinverse - * @param {number} threshold - threshold for taking inverse of singular values (default = 1e-15) - * @return {Matrix} the (pseudo)inverted matrix. - */ - pseudoInverse(threshold) { - if (threshold === undefined) threshold = Number.EPSILON; - var svdSolution = new svd_SingularValueDecomposition(this, { autoTranspose: true }); - - var U = svdSolution.leftSingularVectors; - var V = svdSolution.rightSingularVectors; - var s = svdSolution.diagonal; - - for (var i = 0; i < s.length; i++) { - if (Math.abs(s[i]) > threshold) { - s[i] = 1.0 / s[i]; - } else { - s[i] = 0.0; - } - } - - // convert list to diagonal - s = this.constructor[Symbol.species].diag(s); - return V.mmul(s.mmul(U.transposeView())); - } - - /** - * Creates an exact and independent copy of the matrix - * @return {Matrix} - */ - clone() { - var newMatrix = new this.constructor[Symbol.species](this.rows, this.columns); - for (var row = 0; row < this.rows; row++) { - for (var column = 0; column < this.columns; column++) { - newMatrix.set(row, column, this.get(row, column)); - } - } - return newMatrix; - } - } - - Matrix.prototype.klass = 'Matrix'; - - function compareNumbers(a, b) { - return a - b; - } - - /* - Synonyms - */ - - Matrix.random = Matrix.rand; - Matrix.diagonal = Matrix.diag; - Matrix.prototype.diagonal = Matrix.prototype.diag; - Matrix.identity = Matrix.eye; - Matrix.prototype.negate = Matrix.prototype.neg; - Matrix.prototype.tensorProduct = Matrix.prototype.kroneckerProduct; - Matrix.prototype.determinant = Matrix.prototype.det; - - /* - Add dynamically instance and static methods for mathematical operations - */ - - var inplaceOperator = ` -(function %name%(value) { - if (typeof value === 'number') return this.%name%S(value); - return this.%name%M(value); -}) -`; - - var inplaceOperatorScalar = ` -(function %name%S(value) { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) %op% value); - } - } - return this; -}) -`; - - var inplaceOperatorMatrix = ` -(function %name%M(matrix) { - matrix = this.constructor.checkMatrix(matrix); - if (this.rows !== matrix.rows || - this.columns !== matrix.columns) { - throw new RangeError('Matrices dimensions must be equal'); - } - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, this.get(i, j) %op% matrix.get(i, j)); - } - } - return this; -}) -`; - - var staticOperator = ` -(function %name%(matrix, value) { - var newMatrix = new this[Symbol.species](matrix); - return newMatrix.%name%(value); -}) -`; - - var inplaceMethod = ` -(function %name%() { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, %method%(this.get(i, j))); - } - } - return this; -}) -`; - - var staticMethod = ` -(function %name%(matrix) { - var newMatrix = new this[Symbol.species](matrix); - return newMatrix.%name%(); -}) -`; - - var inplaceMethodWithArgs = ` -(function %name%(%args%) { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, %method%(this.get(i, j), %args%)); - } - } - return this; -}) -`; - - var staticMethodWithArgs = ` -(function %name%(matrix, %args%) { - var newMatrix = new this[Symbol.species](matrix); - return newMatrix.%name%(%args%); -}) -`; - - - var inplaceMethodWithOneArgScalar = ` -(function %name%S(value) { - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, %method%(this.get(i, j), value)); - } - } - return this; -}) -`; - var inplaceMethodWithOneArgMatrix = ` -(function %name%M(matrix) { - matrix = this.constructor.checkMatrix(matrix); - if (this.rows !== matrix.rows || - this.columns !== matrix.columns) { - throw new RangeError('Matrices dimensions must be equal'); - } - for (var i = 0; i < this.rows; i++) { - for (var j = 0; j < this.columns; j++) { - this.set(i, j, %method%(this.get(i, j), matrix.get(i, j))); - } - } - return this; -}) -`; - - var inplaceMethodWithOneArg = ` -(function %name%(value) { - if (typeof value === 'number') return this.%name%S(value); - return this.%name%M(value); -}) -`; - - var staticMethodWithOneArg = staticMethodWithArgs; - - var operators = [ - // Arithmetic operators - ['+', 'add'], - ['-', 'sub', 'subtract'], - ['*', 'mul', 'multiply'], - ['/', 'div', 'divide'], - ['%', 'mod', 'modulus'], - // Bitwise operators - ['&', 'and'], - ['|', 'or'], - ['^', 'xor'], - ['<<', 'leftShift'], - ['>>', 'signPropagatingRightShift'], - ['>>>', 'rightShift', 'zeroFillRightShift'] - ]; - - var i; - var eval2 = eval; // eslint-disable-line no-eval - for (var operator of operators) { - var inplaceOp = eval2(fillTemplateFunction(inplaceOperator, { name: operator[1], op: operator[0] })); - var inplaceOpS = eval2(fillTemplateFunction(inplaceOperatorScalar, { name: `${operator[1]}S`, op: operator[0] })); - var inplaceOpM = eval2(fillTemplateFunction(inplaceOperatorMatrix, { name: `${operator[1]}M`, op: operator[0] })); - var staticOp = eval2(fillTemplateFunction(staticOperator, { name: operator[1] })); - for (i = 1; i < operator.length; i++) { - Matrix.prototype[operator[i]] = inplaceOp; - Matrix.prototype[`${operator[i]}S`] = inplaceOpS; - Matrix.prototype[`${operator[i]}M`] = inplaceOpM; - Matrix[operator[i]] = staticOp; - } - } - - var methods = [['~', 'not']]; - - [ - 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'cbrt', 'ceil', - 'clz32', 'cos', 'cosh', 'exp', 'expm1', 'floor', 'fround', 'log', 'log1p', - 'log10', 'log2', 'round', 'sign', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc' - ].forEach(function (mathMethod) { - methods.push([`Math.${mathMethod}`, mathMethod]); - }); - - for (var method of methods) { - var inplaceMeth = eval2(fillTemplateFunction(inplaceMethod, { name: method[1], method: method[0] })); - var staticMeth = eval2(fillTemplateFunction(staticMethod, { name: method[1] })); - for (i = 1; i < method.length; i++) { - Matrix.prototype[method[i]] = inplaceMeth; - Matrix[method[i]] = staticMeth; - } - } - - var methodsWithArgs = [['Math.pow', 1, 'pow']]; - - for (var methodWithArg of methodsWithArgs) { - var args = 'arg0'; - for (i = 1; i < methodWithArg[1]; i++) { - args += `, arg${i}`; - } - if (methodWithArg[1] !== 1) { - var inplaceMethWithArgs = eval2(fillTemplateFunction(inplaceMethodWithArgs, { - name: methodWithArg[2], - method: methodWithArg[0], - args: args - })); - var staticMethWithArgs = eval2(fillTemplateFunction(staticMethodWithArgs, { name: methodWithArg[2], args: args })); - for (i = 2; i < methodWithArg.length; i++) { - Matrix.prototype[methodWithArg[i]] = inplaceMethWithArgs; - Matrix[methodWithArg[i]] = staticMethWithArgs; - } - } else { - var tmplVar = { - name: methodWithArg[2], - args: args, - method: methodWithArg[0] - }; - var inplaceMethod2 = eval2(fillTemplateFunction(inplaceMethodWithOneArg, tmplVar)); - var inplaceMethodS = eval2(fillTemplateFunction(inplaceMethodWithOneArgScalar, tmplVar)); - var inplaceMethodM = eval2(fillTemplateFunction(inplaceMethodWithOneArgMatrix, tmplVar)); - var staticMethod2 = eval2(fillTemplateFunction(staticMethodWithOneArg, tmplVar)); - for (i = 2; i < methodWithArg.length; i++) { - Matrix.prototype[methodWithArg[i]] = inplaceMethod2; - Matrix.prototype[`${methodWithArg[i]}M`] = inplaceMethodM; - Matrix.prototype[`${methodWithArg[i]}S`] = inplaceMethodS; - Matrix[methodWithArg[i]] = staticMethod2; - } - } - } - - function fillTemplateFunction(template, values) { - for (var value in values) { - template = template.replace(new RegExp(`%${value}%`, 'g'), values[value]); - } - return template; - } - - return Matrix; -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/matrix.js - - - -class matrix_Matrix extends AbstractMatrix(Array) { - constructor(nRows, nColumns) { - var i; - if (arguments.length === 1 && typeof nRows === 'number') { - return new Array(nRows); - } - if (matrix_Matrix.isMatrix(nRows)) { - return nRows.clone(); - } else if (Number.isInteger(nRows) && nRows > 0) { - // Create an empty matrix - super(nRows); - if (Number.isInteger(nColumns) && nColumns > 0) { - for (i = 0; i < nRows; i++) { - this[i] = new Array(nColumns); - } - } else { - throw new TypeError('nColumns must be a positive integer'); - } - } else if (Array.isArray(nRows)) { - // Copy the values from the 2D array - const matrix = nRows; - nRows = matrix.length; - nColumns = matrix[0].length; - if (typeof nColumns !== 'number' || nColumns === 0) { - throw new TypeError( - 'Data must be a 2D array with at least one element' - ); - } - super(nRows); - for (i = 0; i < nRows; i++) { - if (matrix[i].length !== nColumns) { - throw new RangeError('Inconsistent array dimensions'); - } - this[i] = [].concat(matrix[i]); - } - } else { - throw new TypeError( - 'First argument must be a positive number or an array' - ); - } - this.rows = nRows; - this.columns = nColumns; - return this; - } - - set(rowIndex, columnIndex, value) { - this[rowIndex][columnIndex] = value; - return this; - } - - get(rowIndex, columnIndex) { - return this[rowIndex][columnIndex]; - } - - /** - * Removes a row from the given index - * @param {number} index - Row index - * @return {Matrix} this - */ - removeRow(index) { - checkRowIndex(this, index); - if (this.rows === 1) { - throw new RangeError('A matrix cannot have less than one row'); - } - this.splice(index, 1); - this.rows -= 1; - return this; - } - - /** - * Adds a row at the given index - * @param {number} [index = this.rows] - Row index - * @param {Array|Matrix} array - Array or vector - * @return {Matrix} this - */ - addRow(index, array) { - if (array === undefined) { - array = index; - index = this.rows; - } - checkRowIndex(this, index, true); - array = checkRowVector(this, array, true); - this.splice(index, 0, array); - this.rows += 1; - return this; - } - - /** - * Removes a column from the given index - * @param {number} index - Column index - * @return {Matrix} this - */ - removeColumn(index) { - checkColumnIndex(this, index); - if (this.columns === 1) { - throw new RangeError('A matrix cannot have less than one column'); - } - for (var i = 0; i < this.rows; i++) { - this[i].splice(index, 1); - } - this.columns -= 1; - return this; - } - - /** - * Adds a column at the given index - * @param {number} [index = this.columns] - Column index - * @param {Array|Matrix} array - Array or vector - * @return {Matrix} this - */ - addColumn(index, array) { - if (typeof array === 'undefined') { - array = index; - index = this.columns; - } - checkColumnIndex(this, index, true); - array = checkColumnVector(this, array); - for (var i = 0; i < this.rows; i++) { - this[i].splice(index, 0, array[i]); - } - this.columns += 1; - return this; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/wrap/WrapperMatrix1D.js - - - -class WrapperMatrix1D_WrapperMatrix1D extends AbstractMatrix() { - /** - * @class WrapperMatrix1D - * @param {Array} data - * @param {object} [options] - * @param {object} [options.rows = 1] - */ - constructor(data, options = {}) { - const { rows = 1 } = options; - - if (data.length % rows !== 0) { - throw new Error('the data length is not divisible by the number of rows'); - } - super(); - this.rows = rows; - this.columns = data.length / rows; - this.data = data; - } - - set(rowIndex, columnIndex, value) { - var index = this._calculateIndex(rowIndex, columnIndex); - this.data[index] = value; - return this; - } - - get(rowIndex, columnIndex) { - var index = this._calculateIndex(rowIndex, columnIndex); - return this.data[index]; - } - - _calculateIndex(row, column) { - return row * this.columns + column; - } - - static get [Symbol.species]() { - return matrix_Matrix; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/wrap/WrapperMatrix2D.js - - - -class WrapperMatrix2D_WrapperMatrix2D extends AbstractMatrix() { - /** - * @class WrapperMatrix2D - * @param {Array>} data - */ - constructor(data) { - super(); - this.data = data; - this.rows = data.length; - this.columns = data[0].length; - } - - set(rowIndex, columnIndex, value) { - this.data[rowIndex][columnIndex] = value; - return this; - } - - get(rowIndex, columnIndex) { - return this.data[rowIndex][columnIndex]; - } - - static get [Symbol.species]() { - return matrix_Matrix; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/wrap/wrap.js - - - -/** - * @param {Array>|Array} array - * @param {object} [options] - * @param {object} [options.rows = 1] - * @return {WrapperMatrix1D|WrapperMatrix2D} - */ -function wrap(array, options) { - if (Array.isArray(array)) { - if (array[0] && Array.isArray(array[0])) { - return new WrapperMatrix2D_WrapperMatrix2D(array); - } else { - return new WrapperMatrix1D_WrapperMatrix1D(array, options); - } - } else { - throw new Error('the argument is not an array'); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/dc/qr.js - - - - -/** - * @class QrDecomposition - * @link https://github.com/lutzroeder/Mapack/blob/master/Source/QrDecomposition.cs - * @param {Matrix} value - */ -class qr_QrDecomposition { - constructor(value) { - value = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(value); - - var qr = value.clone(); - var m = value.rows; - var n = value.columns; - var rdiag = new Array(n); - var i, j, k, s; - - for (k = 0; k < n; k++) { - var nrm = 0; - for (i = k; i < m; i++) { - nrm = hypotenuse(nrm, qr.get(i, k)); - } - if (nrm !== 0) { - if (qr.get(k, k) < 0) { - nrm = -nrm; - } - for (i = k; i < m; i++) { - qr.set(i, k, qr.get(i, k) / nrm); - } - qr.set(k, k, qr.get(k, k) + 1); - for (j = k + 1; j < n; j++) { - s = 0; - for (i = k; i < m; i++) { - s += qr.get(i, k) * qr.get(i, j); - } - s = -s / qr.get(k, k); - for (i = k; i < m; i++) { - qr.set(i, j, qr.get(i, j) + s * qr.get(i, k)); - } - } - } - rdiag[k] = -nrm; - } - - this.QR = qr; - this.Rdiag = rdiag; - } - - /** - * Solve a problem of least square (Ax=b) by using the QR decomposition. Useful when A is rectangular, but not working when A is singular. - * Example : We search to approximate x, with A matrix shape m*n, x vector size n, b vector size m (m > n). We will use : - * var qr = QrDecomposition(A); - * var x = qr.solve(b); - * @param {Matrix} value - Matrix 1D which is the vector b (in the equation Ax = b) - * @return {Matrix} - The vector x - */ - solve(value) { - value = matrix_Matrix.checkMatrix(value); - - var qr = this.QR; - var m = qr.rows; - - if (value.rows !== m) { - throw new Error('Matrix row dimensions must agree'); - } - if (!this.isFullRank()) { - throw new Error('Matrix is rank deficient'); - } - - var count = value.columns; - var X = value.clone(); - var n = qr.columns; - var i, j, k, s; - - for (k = 0; k < n; k++) { - for (j = 0; j < count; j++) { - s = 0; - for (i = k; i < m; i++) { - s += qr[i][k] * X[i][j]; - } - s = -s / qr[k][k]; - for (i = k; i < m; i++) { - X[i][j] += s * qr[i][k]; - } - } - } - for (k = n - 1; k >= 0; k--) { - for (j = 0; j < count; j++) { - X[k][j] /= this.Rdiag[k]; - } - for (i = 0; i < k; i++) { - for (j = 0; j < count; j++) { - X[i][j] -= X[k][j] * qr[i][k]; - } - } - } - - return X.subMatrix(0, n - 1, 0, count - 1); - } - - /** - * - * @return {boolean} - */ - isFullRank() { - var columns = this.QR.columns; - for (var i = 0; i < columns; i++) { - if (this.Rdiag[i] === 0) { - return false; - } - } - return true; - } - - /** - * - * @return {Matrix} - */ - get upperTriangularMatrix() { - var qr = this.QR; - var n = qr.columns; - var X = new matrix_Matrix(n, n); - var i, j; - for (i = 0; i < n; i++) { - for (j = 0; j < n; j++) { - if (i < j) { - X[i][j] = qr[i][j]; - } else if (i === j) { - X[i][j] = this.Rdiag[i]; - } else { - X[i][j] = 0; - } - } - } - return X; - } - - /** - * - * @return {Matrix} - */ - get orthogonalMatrix() { - var qr = this.QR; - var rows = qr.rows; - var columns = qr.columns; - var X = new matrix_Matrix(rows, columns); - var i, j, k, s; - - for (k = columns - 1; k >= 0; k--) { - for (i = 0; i < rows; i++) { - X[i][k] = 0; - } - X[k][k] = 1; - for (j = k; j < columns; j++) { - if (qr[k][k] !== 0) { - s = 0; - for (i = k; i < rows; i++) { - s += qr[i][k] * X[i][j]; - } - - s = -s / qr[k][k]; - - for (i = k; i < rows; i++) { - X[i][j] += s * qr[i][k]; - } - } - } - } - return X; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/decompositions.js - - - - - - -/** - * Computes the inverse of a Matrix - * @param {Matrix} matrix - * @param {boolean} [useSVD=false] - * @return {Matrix} - */ -function inverse(matrix, useSVD = false) { - matrix = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(matrix); - if (useSVD) { - return new svd_SingularValueDecomposition(matrix).inverse(); - } else { - return solve(matrix, matrix_Matrix.eye(matrix.rows)); - } -} - -/** - * - * @param {Matrix} leftHandSide - * @param {Matrix} rightHandSide - * @param {boolean} [useSVD = false] - * @return {Matrix} - */ -function solve(leftHandSide, rightHandSide, useSVD = false) { - leftHandSide = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(leftHandSide); - rightHandSide = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(rightHandSide); - if (useSVD) { - return new svd_SingularValueDecomposition(leftHandSide).solve(rightHandSide); - } else { - return leftHandSide.isSquare() - ? new lu_LuDecomposition(leftHandSide).solve(rightHandSide) - : new qr_QrDecomposition(leftHandSide).solve(rightHandSide); - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/linearDependencies.js - - - - - -// function used by rowsDependencies -function xrange(n, exception) { - var range = []; - for (var i = 0; i < n; i++) { - if (i !== exception) { - range.push(i); - } - } - return range; -} - -// function used by rowsDependencies -function dependenciesOneRow( - error, - matrix, - index, - thresholdValue = 10e-10, - thresholdError = 10e-10 -) { - if (error > thresholdError) { - return new Array(matrix.rows + 1).fill(0); - } else { - var returnArray = matrix.addRow(index, [0]); - for (var i = 0; i < returnArray.rows; i++) { - if (Math.abs(returnArray.get(i, 0)) < thresholdValue) { - returnArray.set(i, 0, 0); - } - } - return returnArray.to1DArray(); - } -} - -/** - * Creates a matrix which represents the dependencies between rows. - * If a row is a linear combination of others rows, the result will be a row with the coefficients of this combination. - * For example : for A = [[2, 0, 0, 1], [0, 1, 6, 0], [0, 3, 0, 1], [0, 0, 1, 0], [0, 1, 2, 0]], the result will be [[0, 0, 0, 0, 0], [0, 0, 0, 4, 1], [0, 0, 0, 0, 0], [0, 0.25, 0, 0, -0.25], [0, 1, 0, -4, 0]] - * @param {Matrix} matrix - * @param {Object} [options] includes thresholdValue and thresholdError. - * @param {number} [options.thresholdValue = 10e-10] If an absolute value is inferior to this threshold, it will equals zero. - * @param {number} [options.thresholdError = 10e-10] If the error is inferior to that threshold, the linear combination found is accepted and the row is dependent from other rows. - * @return {Matrix} the matrix which represents the dependencies between rows. - */ - -function linearDependencies(matrix, options = {}) { - const { thresholdValue = 10e-10, thresholdError = 10e-10 } = options; - - var n = matrix.rows; - var results = new matrix_Matrix(n, n); - - for (var i = 0; i < n; i++) { - var b = matrix_Matrix.columnVector(matrix.getRow(i)); - var Abis = matrix.subMatrixRow(xrange(n, i)).transposeView(); - var svd = new svd_SingularValueDecomposition(Abis); - var x = svd.solve(b); - var error = lib_es6( - matrix_Matrix.sub(b, Abis.mmul(x)) - .abs() - .to1DArray() - ); - results.setRow( - i, - dependenciesOneRow(error, x, i, thresholdValue, thresholdError) - ); - } - return results; -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/dc/evd.js - - - - -/** - * @class EigenvalueDecomposition - * @link https://github.com/lutzroeder/Mapack/blob/master/Source/EigenvalueDecomposition.cs - * @param {Matrix} matrix - * @param {object} [options] - * @param {boolean} [options.assumeSymmetric=false] - */ -class evd_EigenvalueDecomposition { - constructor(matrix, options = {}) { - const { assumeSymmetric = false } = options; - - matrix = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(matrix); - if (!matrix.isSquare()) { - throw new Error('Matrix is not a square matrix'); - } - - var n = matrix.columns; - var V = getFilled2DArray(n, n, 0); - var d = new Array(n); - var e = new Array(n); - var value = matrix; - var i, j; - - var isSymmetric = false; - if (assumeSymmetric) { - isSymmetric = true; - } else { - isSymmetric = matrix.isSymmetric(); - } - - if (isSymmetric) { - for (i = 0; i < n; i++) { - for (j = 0; j < n; j++) { - V[i][j] = value.get(i, j); - } - } - tred2(n, e, d, V); - tql2(n, e, d, V); - } else { - var H = getFilled2DArray(n, n, 0); - var ort = new Array(n); - for (j = 0; j < n; j++) { - for (i = 0; i < n; i++) { - H[i][j] = value.get(i, j); - } - } - orthes(n, H, ort, V); - hqr2(n, e, d, V, H); - } - - this.n = n; - this.e = e; - this.d = d; - this.V = V; - } - - /** - * - * @return {Array} - */ - get realEigenvalues() { - return this.d; - } - - /** - * - * @return {Array} - */ - get imaginaryEigenvalues() { - return this.e; - } - - /** - * - * @return {Matrix} - */ - get eigenvectorMatrix() { - if (!matrix_Matrix.isMatrix(this.V)) { - this.V = new matrix_Matrix(this.V); - } - return this.V; - } - - /** - * - * @return {Matrix} - */ - get diagonalMatrix() { - var n = this.n; - var e = this.e; - var d = this.d; - var X = new matrix_Matrix(n, n); - var i, j; - for (i = 0; i < n; i++) { - for (j = 0; j < n; j++) { - X[i][j] = 0; - } - X[i][i] = d[i]; - if (e[i] > 0) { - X[i][i + 1] = e[i]; - } else if (e[i] < 0) { - X[i][i - 1] = e[i]; - } - } - return X; - } -} - -function tred2(n, e, d, V) { - var f, g, h, i, j, k, hh, scale; - - for (j = 0; j < n; j++) { - d[j] = V[n - 1][j]; - } - - for (i = n - 1; i > 0; i--) { - scale = 0; - h = 0; - for (k = 0; k < i; k++) { - scale = scale + Math.abs(d[k]); - } - - if (scale === 0) { - e[i] = d[i - 1]; - for (j = 0; j < i; j++) { - d[j] = V[i - 1][j]; - V[i][j] = 0; - V[j][i] = 0; - } - } else { - for (k = 0; k < i; k++) { - d[k] /= scale; - h += d[k] * d[k]; - } - - f = d[i - 1]; - g = Math.sqrt(h); - if (f > 0) { - g = -g; - } - - e[i] = scale * g; - h = h - f * g; - d[i - 1] = f - g; - for (j = 0; j < i; j++) { - e[j] = 0; - } - - for (j = 0; j < i; j++) { - f = d[j]; - V[j][i] = f; - g = e[j] + V[j][j] * f; - for (k = j + 1; k <= i - 1; k++) { - g += V[k][j] * d[k]; - e[k] += V[k][j] * f; - } - e[j] = g; - } - - f = 0; - for (j = 0; j < i; j++) { - e[j] /= h; - f += e[j] * d[j]; - } - - hh = f / (h + h); - for (j = 0; j < i; j++) { - e[j] -= hh * d[j]; - } - - for (j = 0; j < i; j++) { - f = d[j]; - g = e[j]; - for (k = j; k <= i - 1; k++) { - V[k][j] -= f * e[k] + g * d[k]; - } - d[j] = V[i - 1][j]; - V[i][j] = 0; - } - } - d[i] = h; - } - - for (i = 0; i < n - 1; i++) { - V[n - 1][i] = V[i][i]; - V[i][i] = 1; - h = d[i + 1]; - if (h !== 0) { - for (k = 0; k <= i; k++) { - d[k] = V[k][i + 1] / h; - } - - for (j = 0; j <= i; j++) { - g = 0; - for (k = 0; k <= i; k++) { - g += V[k][i + 1] * V[k][j]; - } - for (k = 0; k <= i; k++) { - V[k][j] -= g * d[k]; - } - } - } - - for (k = 0; k <= i; k++) { - V[k][i + 1] = 0; - } - } - - for (j = 0; j < n; j++) { - d[j] = V[n - 1][j]; - V[n - 1][j] = 0; - } - - V[n - 1][n - 1] = 1; - e[0] = 0; -} - -function tql2(n, e, d, V) { - var g, h, i, j, k, l, m, p, r, dl1, c, c2, c3, el1, s, s2, iter; - - for (i = 1; i < n; i++) { - e[i - 1] = e[i]; - } - - e[n - 1] = 0; - - var f = 0; - var tst1 = 0; - var eps = Number.EPSILON; - - for (l = 0; l < n; l++) { - tst1 = Math.max(tst1, Math.abs(d[l]) + Math.abs(e[l])); - m = l; - while (m < n) { - if (Math.abs(e[m]) <= eps * tst1) { - break; - } - m++; - } - - if (m > l) { - iter = 0; - do { - iter = iter + 1; - - g = d[l]; - p = (d[l + 1] - g) / (2 * e[l]); - r = hypotenuse(p, 1); - if (p < 0) { - r = -r; - } - - d[l] = e[l] / (p + r); - d[l + 1] = e[l] * (p + r); - dl1 = d[l + 1]; - h = g - d[l]; - for (i = l + 2; i < n; i++) { - d[i] -= h; - } - - f = f + h; - - p = d[m]; - c = 1; - c2 = c; - c3 = c; - el1 = e[l + 1]; - s = 0; - s2 = 0; - for (i = m - 1; i >= l; i--) { - c3 = c2; - c2 = c; - s2 = s; - g = c * e[i]; - h = c * p; - r = hypotenuse(p, e[i]); - e[i + 1] = s * r; - s = e[i] / r; - c = p / r; - p = c * d[i] - s * g; - d[i + 1] = h + s * (c * g + s * d[i]); - - for (k = 0; k < n; k++) { - h = V[k][i + 1]; - V[k][i + 1] = s * V[k][i] + c * h; - V[k][i] = c * V[k][i] - s * h; - } - } - - p = -s * s2 * c3 * el1 * e[l] / dl1; - e[l] = s * p; - d[l] = c * p; - } while (Math.abs(e[l]) > eps * tst1); - } - d[l] = d[l] + f; - e[l] = 0; - } - - for (i = 0; i < n - 1; i++) { - k = i; - p = d[i]; - for (j = i + 1; j < n; j++) { - if (d[j] < p) { - k = j; - p = d[j]; - } - } - - if (k !== i) { - d[k] = d[i]; - d[i] = p; - for (j = 0; j < n; j++) { - p = V[j][i]; - V[j][i] = V[j][k]; - V[j][k] = p; - } - } - } -} - -function orthes(n, H, ort, V) { - var low = 0; - var high = n - 1; - var f, g, h, i, j, m; - var scale; - - for (m = low + 1; m <= high - 1; m++) { - scale = 0; - for (i = m; i <= high; i++) { - scale = scale + Math.abs(H[i][m - 1]); - } - - if (scale !== 0) { - h = 0; - for (i = high; i >= m; i--) { - ort[i] = H[i][m - 1] / scale; - h += ort[i] * ort[i]; - } - - g = Math.sqrt(h); - if (ort[m] > 0) { - g = -g; - } - - h = h - ort[m] * g; - ort[m] = ort[m] - g; - - for (j = m; j < n; j++) { - f = 0; - for (i = high; i >= m; i--) { - f += ort[i] * H[i][j]; - } - - f = f / h; - for (i = m; i <= high; i++) { - H[i][j] -= f * ort[i]; - } - } - - for (i = 0; i <= high; i++) { - f = 0; - for (j = high; j >= m; j--) { - f += ort[j] * H[i][j]; - } - - f = f / h; - for (j = m; j <= high; j++) { - H[i][j] -= f * ort[j]; - } - } - - ort[m] = scale * ort[m]; - H[m][m - 1] = scale * g; - } - } - - for (i = 0; i < n; i++) { - for (j = 0; j < n; j++) { - V[i][j] = i === j ? 1 : 0; - } - } - - for (m = high - 1; m >= low + 1; m--) { - if (H[m][m - 1] !== 0) { - for (i = m + 1; i <= high; i++) { - ort[i] = H[i][m - 1]; - } - - for (j = m; j <= high; j++) { - g = 0; - for (i = m; i <= high; i++) { - g += ort[i] * V[i][j]; - } - - g = g / ort[m] / H[m][m - 1]; - for (i = m; i <= high; i++) { - V[i][j] += g * ort[i]; - } - } - } - } -} - -function hqr2(nn, e, d, V, H) { - var n = nn - 1; - var low = 0; - var high = nn - 1; - var eps = Number.EPSILON; - var exshift = 0; - var norm = 0; - var p = 0; - var q = 0; - var r = 0; - var s = 0; - var z = 0; - var iter = 0; - var i, j, k, l, m, t, w, x, y; - var ra, sa, vr, vi; - var notlast, cdivres; - - for (i = 0; i < nn; i++) { - if (i < low || i > high) { - d[i] = H[i][i]; - e[i] = 0; - } - - for (j = Math.max(i - 1, 0); j < nn; j++) { - norm = norm + Math.abs(H[i][j]); - } - } - - while (n >= low) { - l = n; - while (l > low) { - s = Math.abs(H[l - 1][l - 1]) + Math.abs(H[l][l]); - if (s === 0) { - s = norm; - } - if (Math.abs(H[l][l - 1]) < eps * s) { - break; - } - l--; - } - - if (l === n) { - H[n][n] = H[n][n] + exshift; - d[n] = H[n][n]; - e[n] = 0; - n--; - iter = 0; - } else if (l === n - 1) { - w = H[n][n - 1] * H[n - 1][n]; - p = (H[n - 1][n - 1] - H[n][n]) / 2; - q = p * p + w; - z = Math.sqrt(Math.abs(q)); - H[n][n] = H[n][n] + exshift; - H[n - 1][n - 1] = H[n - 1][n - 1] + exshift; - x = H[n][n]; - - if (q >= 0) { - z = p >= 0 ? p + z : p - z; - d[n - 1] = x + z; - d[n] = d[n - 1]; - if (z !== 0) { - d[n] = x - w / z; - } - e[n - 1] = 0; - e[n] = 0; - x = H[n][n - 1]; - s = Math.abs(x) + Math.abs(z); - p = x / s; - q = z / s; - r = Math.sqrt(p * p + q * q); - p = p / r; - q = q / r; - - for (j = n - 1; j < nn; j++) { - z = H[n - 1][j]; - H[n - 1][j] = q * z + p * H[n][j]; - H[n][j] = q * H[n][j] - p * z; - } - - for (i = 0; i <= n; i++) { - z = H[i][n - 1]; - H[i][n - 1] = q * z + p * H[i][n]; - H[i][n] = q * H[i][n] - p * z; - } - - for (i = low; i <= high; i++) { - z = V[i][n - 1]; - V[i][n - 1] = q * z + p * V[i][n]; - V[i][n] = q * V[i][n] - p * z; - } - } else { - d[n - 1] = x + p; - d[n] = x + p; - e[n - 1] = z; - e[n] = -z; - } - - n = n - 2; - iter = 0; - } else { - x = H[n][n]; - y = 0; - w = 0; - if (l < n) { - y = H[n - 1][n - 1]; - w = H[n][n - 1] * H[n - 1][n]; - } - - if (iter === 10) { - exshift += x; - for (i = low; i <= n; i++) { - H[i][i] -= x; - } - s = Math.abs(H[n][n - 1]) + Math.abs(H[n - 1][n - 2]); - x = y = 0.75 * s; - w = -0.4375 * s * s; - } - - if (iter === 30) { - s = (y - x) / 2; - s = s * s + w; - if (s > 0) { - s = Math.sqrt(s); - if (y < x) { - s = -s; - } - s = x - w / ((y - x) / 2 + s); - for (i = low; i <= n; i++) { - H[i][i] -= s; - } - exshift += s; - x = y = w = 0.964; - } - } - - iter = iter + 1; - - m = n - 2; - while (m >= l) { - z = H[m][m]; - r = x - z; - s = y - z; - p = (r * s - w) / H[m + 1][m] + H[m][m + 1]; - q = H[m + 1][m + 1] - z - r - s; - r = H[m + 2][m + 1]; - s = Math.abs(p) + Math.abs(q) + Math.abs(r); - p = p / s; - q = q / s; - r = r / s; - if (m === l) { - break; - } - if ( - Math.abs(H[m][m - 1]) * (Math.abs(q) + Math.abs(r)) < - eps * - (Math.abs(p) * - (Math.abs(H[m - 1][m - 1]) + - Math.abs(z) + - Math.abs(H[m + 1][m + 1]))) - ) { - break; - } - m--; - } - - for (i = m + 2; i <= n; i++) { - H[i][i - 2] = 0; - if (i > m + 2) { - H[i][i - 3] = 0; - } - } - - for (k = m; k <= n - 1; k++) { - notlast = k !== n - 1; - if (k !== m) { - p = H[k][k - 1]; - q = H[k + 1][k - 1]; - r = notlast ? H[k + 2][k - 1] : 0; - x = Math.abs(p) + Math.abs(q) + Math.abs(r); - if (x !== 0) { - p = p / x; - q = q / x; - r = r / x; - } - } - - if (x === 0) { - break; - } - - s = Math.sqrt(p * p + q * q + r * r); - if (p < 0) { - s = -s; - } - - if (s !== 0) { - if (k !== m) { - H[k][k - 1] = -s * x; - } else if (l !== m) { - H[k][k - 1] = -H[k][k - 1]; - } - - p = p + s; - x = p / s; - y = q / s; - z = r / s; - q = q / p; - r = r / p; - - for (j = k; j < nn; j++) { - p = H[k][j] + q * H[k + 1][j]; - if (notlast) { - p = p + r * H[k + 2][j]; - H[k + 2][j] = H[k + 2][j] - p * z; - } - - H[k][j] = H[k][j] - p * x; - H[k + 1][j] = H[k + 1][j] - p * y; - } - - for (i = 0; i <= Math.min(n, k + 3); i++) { - p = x * H[i][k] + y * H[i][k + 1]; - if (notlast) { - p = p + z * H[i][k + 2]; - H[i][k + 2] = H[i][k + 2] - p * r; - } - - H[i][k] = H[i][k] - p; - H[i][k + 1] = H[i][k + 1] - p * q; - } - - for (i = low; i <= high; i++) { - p = x * V[i][k] + y * V[i][k + 1]; - if (notlast) { - p = p + z * V[i][k + 2]; - V[i][k + 2] = V[i][k + 2] - p * r; - } - - V[i][k] = V[i][k] - p; - V[i][k + 1] = V[i][k + 1] - p * q; - } - } - } - } - } - - if (norm === 0) { - return; - } - - for (n = nn - 1; n >= 0; n--) { - p = d[n]; - q = e[n]; - - if (q === 0) { - l = n; - H[n][n] = 1; - for (i = n - 1; i >= 0; i--) { - w = H[i][i] - p; - r = 0; - for (j = l; j <= n; j++) { - r = r + H[i][j] * H[j][n]; - } - - if (e[i] < 0) { - z = w; - s = r; - } else { - l = i; - if (e[i] === 0) { - H[i][n] = w !== 0 ? -r / w : -r / (eps * norm); - } else { - x = H[i][i + 1]; - y = H[i + 1][i]; - q = (d[i] - p) * (d[i] - p) + e[i] * e[i]; - t = (x * s - z * r) / q; - H[i][n] = t; - H[i + 1][n] = - Math.abs(x) > Math.abs(z) ? (-r - w * t) / x : (-s - y * t) / z; - } - - t = Math.abs(H[i][n]); - if (eps * t * t > 1) { - for (j = i; j <= n; j++) { - H[j][n] = H[j][n] / t; - } - } - } - } - } else if (q < 0) { - l = n - 1; - - if (Math.abs(H[n][n - 1]) > Math.abs(H[n - 1][n])) { - H[n - 1][n - 1] = q / H[n][n - 1]; - H[n - 1][n] = -(H[n][n] - p) / H[n][n - 1]; - } else { - cdivres = cdiv(0, -H[n - 1][n], H[n - 1][n - 1] - p, q); - H[n - 1][n - 1] = cdivres[0]; - H[n - 1][n] = cdivres[1]; - } - - H[n][n - 1] = 0; - H[n][n] = 1; - for (i = n - 2; i >= 0; i--) { - ra = 0; - sa = 0; - for (j = l; j <= n; j++) { - ra = ra + H[i][j] * H[j][n - 1]; - sa = sa + H[i][j] * H[j][n]; - } - - w = H[i][i] - p; - - if (e[i] < 0) { - z = w; - r = ra; - s = sa; - } else { - l = i; - if (e[i] === 0) { - cdivres = cdiv(-ra, -sa, w, q); - H[i][n - 1] = cdivres[0]; - H[i][n] = cdivres[1]; - } else { - x = H[i][i + 1]; - y = H[i + 1][i]; - vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q; - vi = (d[i] - p) * 2 * q; - if (vr === 0 && vi === 0) { - vr = - eps * - norm * - (Math.abs(w) + - Math.abs(q) + - Math.abs(x) + - Math.abs(y) + - Math.abs(z)); - } - cdivres = cdiv( - x * r - z * ra + q * sa, - x * s - z * sa - q * ra, - vr, - vi - ); - H[i][n - 1] = cdivres[0]; - H[i][n] = cdivres[1]; - if (Math.abs(x) > Math.abs(z) + Math.abs(q)) { - H[i + 1][n - 1] = (-ra - w * H[i][n - 1] + q * H[i][n]) / x; - H[i + 1][n] = (-sa - w * H[i][n] - q * H[i][n - 1]) / x; - } else { - cdivres = cdiv(-r - y * H[i][n - 1], -s - y * H[i][n], z, q); - H[i + 1][n - 1] = cdivres[0]; - H[i + 1][n] = cdivres[1]; - } - } - - t = Math.max(Math.abs(H[i][n - 1]), Math.abs(H[i][n])); - if (eps * t * t > 1) { - for (j = i; j <= n; j++) { - H[j][n - 1] = H[j][n - 1] / t; - H[j][n] = H[j][n] / t; - } - } - } - } - } - } - - for (i = 0; i < nn; i++) { - if (i < low || i > high) { - for (j = i; j < nn; j++) { - V[i][j] = H[i][j]; - } - } - } - - for (j = nn - 1; j >= low; j--) { - for (i = low; i <= high; i++) { - z = 0; - for (k = low; k <= Math.min(j, high); k++) { - z = z + V[i][k] * H[k][j]; - } - V[i][j] = z; - } - } -} - -function cdiv(xr, xi, yr, yi) { - var r, d; - if (Math.abs(yr) > Math.abs(yi)) { - r = yi / yr; - d = yr + r * yi; - return [(xr + r * xi) / d, (xi - r * xr) / d]; - } else { - r = yr / yi; - d = yi + r * yr; - return [(r * xr + xi) / d, (r * xi - xr) / d]; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/dc/cholesky.js - - -/** - * @class CholeskyDecomposition - * @link https://github.com/lutzroeder/Mapack/blob/master/Source/CholeskyDecomposition.cs - * @param {Matrix} value - */ -class cholesky_CholeskyDecomposition { - constructor(value) { - value = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(value); - if (!value.isSymmetric()) { - throw new Error('Matrix is not symmetric'); - } - - var a = value; - var dimension = a.rows; - var l = new matrix_Matrix(dimension, dimension); - var positiveDefinite = true; - var i, j, k; - - for (j = 0; j < dimension; j++) { - var Lrowj = l[j]; - var d = 0; - for (k = 0; k < j; k++) { - var Lrowk = l[k]; - var s = 0; - for (i = 0; i < k; i++) { - s += Lrowk[i] * Lrowj[i]; - } - Lrowj[k] = s = (a.get(j, k) - s) / l[k][k]; - d = d + s * s; - } - - d = a.get(j, j) - d; - - positiveDefinite &= d > 0; - l[j][j] = Math.sqrt(Math.max(d, 0)); - for (k = j + 1; k < dimension; k++) { - l[j][k] = 0; - } - } - - if (!positiveDefinite) { - throw new Error('Matrix is not positive definite'); - } - - this.L = l; - } - - /** - * - * @param {Matrix} value - * @return {Matrix} - */ - solve(value) { - value = WrapperMatrix2D_WrapperMatrix2D.checkMatrix(value); - - var l = this.L; - var dimension = l.rows; - - if (value.rows !== dimension) { - throw new Error('Matrix dimensions do not match'); - } - - var count = value.columns; - var B = value.clone(); - var i, j, k; - - for (k = 0; k < dimension; k++) { - for (j = 0; j < count; j++) { - for (i = 0; i < k; i++) { - B[k][j] -= B[i][j] * l[k][i]; - } - B[k][j] /= l[k][k]; - } - } - - for (k = dimension - 1; k >= 0; k--) { - for (j = 0; j < count; j++) { - for (i = k + 1; i < dimension; i++) { - B[k][j] -= B[i][j] * l[i][k]; - } - B[k][j] /= l[k][k]; - } - } - - return B; - } - - /** - * - * @return {Matrix} - */ - get lowerTriangularMatrix() { - return this.L; - } -} - -// CONCATENATED MODULE: ./node_modules/ml-matrix/src/index.js -/* concated harmony reexport default */__webpack_require__.d(__webpack_exports__, "default", function() { return matrix_Matrix; }); -/* concated harmony reexport Matrix */__webpack_require__.d(__webpack_exports__, "Matrix", function() { return matrix_Matrix; }); -/* concated harmony reexport abstractMatrix */__webpack_require__.d(__webpack_exports__, "abstractMatrix", function() { return AbstractMatrix; }); -/* concated harmony reexport wrap */__webpack_require__.d(__webpack_exports__, "wrap", function() { return wrap; }); -/* concated harmony reexport WrapperMatrix2D */__webpack_require__.d(__webpack_exports__, "WrapperMatrix2D", function() { return WrapperMatrix2D_WrapperMatrix2D; }); -/* concated harmony reexport WrapperMatrix1D */__webpack_require__.d(__webpack_exports__, "WrapperMatrix1D", function() { return WrapperMatrix1D_WrapperMatrix1D; }); -/* concated harmony reexport solve */__webpack_require__.d(__webpack_exports__, "solve", function() { return solve; }); -/* concated harmony reexport inverse */__webpack_require__.d(__webpack_exports__, "inverse", function() { return inverse; }); -/* concated harmony reexport linearDependencies */__webpack_require__.d(__webpack_exports__, "linearDependencies", function() { return linearDependencies; }); -/* concated harmony reexport SingularValueDecomposition */__webpack_require__.d(__webpack_exports__, "SingularValueDecomposition", function() { return svd_SingularValueDecomposition; }); -/* concated harmony reexport SVD */__webpack_require__.d(__webpack_exports__, "SVD", function() { return svd_SingularValueDecomposition; }); -/* concated harmony reexport EigenvalueDecomposition */__webpack_require__.d(__webpack_exports__, "EigenvalueDecomposition", function() { return evd_EigenvalueDecomposition; }); -/* concated harmony reexport EVD */__webpack_require__.d(__webpack_exports__, "EVD", function() { return evd_EigenvalueDecomposition; }); -/* concated harmony reexport CholeskyDecomposition */__webpack_require__.d(__webpack_exports__, "CholeskyDecomposition", function() { return cholesky_CholeskyDecomposition; }); -/* concated harmony reexport CHO */__webpack_require__.d(__webpack_exports__, "CHO", function() { return cholesky_CholeskyDecomposition; }); -/* concated harmony reexport LuDecomposition */__webpack_require__.d(__webpack_exports__, "LuDecomposition", function() { return lu_LuDecomposition; }); -/* concated harmony reexport LU */__webpack_require__.d(__webpack_exports__, "LU", function() { return lu_LuDecomposition; }); -/* concated harmony reexport QrDecomposition */__webpack_require__.d(__webpack_exports__, "QrDecomposition", function() { return qr_QrDecomposition; }); -/* concated harmony reexport QR */__webpack_require__.d(__webpack_exports__, "QR", function() { return qr_QrDecomposition; }); - - - - - - - - - - - - - - - - -/***/ }) -/******/ ]); -}); \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/source/anonymization/make-axii.js b/spaces/merve/uncertainty-calibration/source/anonymization/make-axii.js deleted file mode 100644 index c69b5eba387ec07f01ce2849726fda5461002aef..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/anonymization/make-axii.js +++ /dev/null @@ -1,86 +0,0 @@ -window.makeAxii = function(){ - - var stateScale = d3.scaleBand().domain(states).range(c.x.range()) - var stateAxis = c.svg.append('g.axis.state.init-hidden') - - var bw = stateScale.bandwidth()/2 - - stateAxis.appendMany('text', states) - .translate(d => [stateScale(d) + bw, c.height + 22]) - .text(d => d) - .at({ - textAnchor: 'middle', - }) - .st({fill: '#444'}) - - stateAxis.appendMany('path', d3.range(ages.length + 1)) - .at({ - d: d => ['M', d*c.width/(ages.length), '0 V', c.height].join(' '), - stroke: '#aaa', - }) - - stateAxis.append('text.bold').text('Home State') - .translate([c.width/2, c.height + 45]) - .at({textAnchor: 'middle'}) - - var ageScale = d3.scaleBand().domain(ages.slice().reverse()).range(c.x.range()) - var ageAxis = c.svg.append('g.axis.age.init-hidden') - - ageAxis.appendMany('text', ages) - .translate(d => [-30, ageScale(d) + bw]) - .text(d => d) - .at({dy: '.33em'}) - .st({fill: '#444'}) - - ageAxis.appendMany('path', d3.range(ages.length + 1)) - .at({ - d: d => ['M 0', d*c.width/(ages.length), 'H', c.width].join(' '), - stroke: '#aaa', - }) - - if (scale == 1){ - ageAxis - .append('g').translate([-43, c.height/2]) - .append('text.bold').text('Age') - .at({textAnchor: 'middle', transform: 'rotate(-90)'}) - } else { - ageAxis - .append('g').translate([-22, 14]) - .append('text.bold').text('Age') - .at({textAnchor: 'middle'}) - } - - var seasonAxis = c.svg.append('g.axis.state.init-hidden').lower() - seasonAxis.appendMany('g', ages) - .translate(d => ageScale(d), 1) - .appendMany('path', d3.range(1, 4)) - .at({ - d: d => ['M 0', d*bw/4*2, 'H', c.width].join(' '), - stroke: '#ddd', - }) - - var headAxis = c.svg.append('g.axis.state.init-hidden') - headAxis.appendMany('text.bold', ['Heads', 'Tails']) - .text(d => d) - .translate((d, i) => [i ? c.width/4*3 + 20 : c.width/4 - 20, 88]) - .at({textAnchor: 'middle'}) - - - var headCaptionAxis = c.svg.append('g.axis.state.init-hidden') - headCaptionAxis.appendMany('text', ['reports plagiarism', 'reports truth']) - .text(d => d) - .translate((d, i) => [i ? c.width/4*3 + 20 : c.width/4 - 20, 88 + 15]) - .at({textAnchor: 'middle'}) - .st({fill: '#444'}) - - - return {stateScale, stateAxis, headAxis, headCaptionAxis, ageScale, ageAxis, bw, seasonAxis} -} - - - - - - - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/mikeee/llama-2-70b-guanaco-qlora-ggml/README.md b/spaces/mikeee/llama-2-70b-guanaco-qlora-ggml/README.md deleted file mode 100644 index 664195922d8bd5ce537cdc684b8ccf800781ec63..0000000000000000000000000000000000000000 --- a/spaces/mikeee/llama-2-70b-guanaco-qlora-ggml/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: llama-2-7b-or-13b-ggml -emoji: 🚀 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: true -duplicated_from: mikeee/llama2-7b-chat-ggml ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mikeee/ultimatumbee/ubee/uclas.py b/spaces/mikeee/ultimatumbee/ubee/uclas.py deleted file mode 100644 index 097a42050cfd73f71ad8b800a944e69ec249c714..0000000000000000000000000000000000000000 --- a/spaces/mikeee/ultimatumbee/ubee/uclas.py +++ /dev/null @@ -1,86 +0,0 @@ -"""Define uclas.""" -# pylint: disable=invalid-name - -from typing import List, Tuple, Union - -import logzero -import numpy as np -from joblib import Memory -from logzero import logger -# set PYTHONPATH=..\align-model-pool # in win10 -from model_pool.fetch_check_aux import fetch_check_aux -from model_pool.load_model import load_model -from model_pool.model_s import load_model_s -from sklearn.metrics.pairwise import cosine_similarity - -# logzero.loglevel(20) - -# fetch_check_aux("/home/user") -try: - fetch_check_aux() -except Exception as _: - logger.error(_) - -model_s = load_model_s() -clas = load_model("clas-l-user") - -location = "./cachedir" -memory = Memory(location, verbose=0) - - -@memory.cache -def cached_clas(*args, **kw): - """Cache clas-l-user.""" - return clas(*args, **kw) - - -# cached_clas = memory.cache(cached_clas) - - -@memory.cache -def encode(*args, **kw): - """Cache model_s.encode.""" - return model_s.encode(*args, **kw) - - -def uclas( - seq: str, - labels: Union[List[str], np.ndarray, Tuple[str, ...]], - thresh: float = 0.5, - multi_label: bool = False, -) -> Tuple[str, Union[float, str]]: - """Classify seq with a filter. - - if clas > thresh, return - if clas * csim > thresh return - if csim > thresh return - return "" - """ - # _ = clas(seq, labels, multi_label=multi_label) - _ = cached_clas(seq, labels, multi_label=multi_label) - - logger.debug("1 %s, %s", _.get("labels")[0], round(_.get("scores")[0], 2)) - - if _.get("scores")[0] > thresh: - return _.get("labels")[0], round(_.get("scores")[0], 2) - - _ = dict(zip(_.get("labels"), _.get("scores"))) - - corr = np.array([_.get(elm) for elm in labels]) - - csim = cosine_similarity(encode([seq]), encode(labels)) - - corr = corr * csim - - logger.debug("2 %s, %s", corr.argmax(), round(corr.max(), 2)) - - if corr.max() > thresh: - return labels[corr.argmax()], round(corr.max(), 2) - - logger.debug("3 %s, %s, %s", csim.argmax(), round(csim.max(), 2), thresh / 2) - - logger.debug("T or F: %s", csim.max() > (thresh / 2)) - if csim.max() > (thresh / 2): - return labels[csim.argmax()], round(csim.max(), 2) - - return "", "" diff --git a/spaces/mindtube/protogen-models/README.md b/spaces/mindtube/protogen-models/README.md deleted file mode 100644 index 4d3162ecdffa6cd793a680669815b134343d0652..0000000000000000000000000000000000000000 --- a/spaces/mindtube/protogen-models/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Maximum Multiplier -emoji: 🛕🛕 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: true -duplicated_from: mindtube/maximum_multiplier_places ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/benchmark/dummy_model.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/benchmark/dummy_model.py deleted file mode 100644 index ff26e4fe655d8e8d7f9942c4bd3df7cd267405fb..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/benchmark/dummy_model.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn -import torch.nn.functional as F -from fairseq.data import Dictionary -from fairseq.models import ( - FairseqDecoder, - FairseqLanguageModel, - register_model, - register_model_architecture, -) - - -@register_model("dummy_model") -class DummyModel(FairseqLanguageModel): - def __init__(self, args, encoder): - super().__init__(encoder) - self.args = args - - @staticmethod - def add_args(parser): - parser.add_argument("--num-layers", type=int, default=24) - parser.add_argument("--embed-dim", type=int, default=1024) - - @classmethod - def build_model(cls, args, task): - encoder = DummyEncoder( - num_embed=len(task.target_dictionary), - embed_dim=args.embed_dim, - num_layers=args.num_layers, - ) - return cls(args, encoder) - - def forward(self, src_tokens, masked_tokens=None, **kwargs): - return self.decoder(src_tokens, masked_tokens=masked_tokens) - - -class DummyEncoder(FairseqDecoder): - def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24): - super().__init__(Dictionary()) - self.embed = nn.Embedding( - num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0 - ) - self.layers_a = nn.ModuleList( - [ - nn.Sequential( - nn.LayerNorm(embed_dim), - nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection - nn.Linear(3 * embed_dim, embed_dim), # skip self-attention - nn.Linear(embed_dim, embed_dim), # output projection - nn.Dropout(), - ) - for i in range(num_layers) - ] - ) - self.layers_b = nn.ModuleList( - [ - nn.Sequential( - nn.LayerNorm(embed_dim), - nn.Linear(embed_dim, 4 * embed_dim), # FFN - nn.ReLU(), - nn.Linear(4 * embed_dim, embed_dim), # FFN - nn.Dropout(0.1), - ) - for i in range(num_layers) - ] - ) - self.out_proj = nn.Linear(embed_dim, num_embed) - - def forward(self, tokens, masked_tokens=None): - x = self.embed(tokens) - for layer_a, layer_b in zip(self.layers_a, self.layers_b): - x = x + layer_a(x) - x = x + layer_b(x) - x = self.out_proj(x) - if masked_tokens is not None: - x = x[masked_tokens] - return (x,) - - def max_positions(self): - return 1024 - - def get_normalized_probs(self, net_output, log_probs, sample=None): - logits = net_output[0].float() - if log_probs: - return F.log_softmax(logits, dim=-1) - else: - return F.softmax(logits, dim=-1) - - -@register_model_architecture("dummy_model", "dummy_model") -def base_architecture(args): - pass diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/distributed/fully_sharded_data_parallel.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/distributed/fully_sharded_data_parallel.py deleted file mode 100644 index 8a96bfc76516682ac8e2b7e2c3bc2e6aa3d8ef0c..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/distributed/fully_sharded_data_parallel.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -from typing import Optional - -import torch -from fairseq.dataclass.configs import DistributedTrainingConfig -from fairseq.distributed import utils as dist_utils - - -try: - from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP - - has_FSDP = True -except ImportError: - FSDP = torch.nn.Module - has_FSDP = False - - -class FullyShardedDataParallel(FSDP): - """ - A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some - fairseq-specific checkpoint saving/loading logic. - - Args: - use_sharded_state (bool): if True, then ``state_dict`` will return - ``FSDP.local_state_dict`` and ``load_state_dict`` will call - ``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will - return the full model weights on data parallel rank 0 (empty on - other ranks) and ``load_state_dict`` will broadcast model weights - from rank 0 to other ranks. - """ - - def __init__(self, *args, use_sharded_state: bool = False, **kwargs): - if not has_FSDP: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - super().__init__(*args, **kwargs) - self.use_sharded_state = use_sharded_state - - @property - def unwrapped_module(self) -> torch.nn.Module: - if self.flatten_parameters: - return self.module.module - else: - return self.module - - def state_dict(self, destination=None, prefix="", keep_vars=False): - if self.use_sharded_state: - return super().local_state_dict( - destination=destination, prefix=prefix, keep_vars=keep_vars - ) - else: - if self.rank == 0: - return super().state_dict( - destination=destination, prefix=prefix, keep_vars=keep_vars - ) - else: - # We must call state_dict() due to use of communication - # primitives. But we don't use the result. - super().state_dict() - return destination or {} - - def load_state_dict(self, state_dict, strict=True, model_cfg=None): - if self.use_sharded_state: - return super().load_local_state_dict(state_dict, strict=strict) - else: - state_dict = dist_utils.broadcast_object( - state_dict, src_rank=0, group=self.process_group - ) - return super().load_state_dict(state_dict, strict=strict) - - -@contextlib.contextmanager -def fsdp_enable_wrap(cfg: DistributedTrainingConfig): - try: - from fairscale.nn import enable_wrap - except ImportError: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - if cfg.memory_efficient_fp16: - assert cfg.fp16 # memory_efficient_fp16 should imply fp16 - group = dist_utils.get_data_parallel_group() - if group is None and cfg.distributed_world_size == 1: - from fairscale.utils.testing import DummyProcessGroup - - group = DummyProcessGroup(rank=0, size=1) - fsdp_config = { - "process_group": group, - "reshard_after_forward": not cfg.no_reshard_after_forward, - "mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16, - "fp32_reduce_scatter": cfg.fp32_reduce_scatter, - "flatten_parameters": True, - "cpu_offload": cfg.cpu_offload, - "compute_dtype": torch.float16 if cfg.fp16 else torch.float32, - "bucket_cap_mb": cfg.bucket_cap_mb, - "state_dict_device": torch.device("cpu"), # reduce GPU mem usage - } - with enable_wrap( - wrapper_cls=FullyShardedDataParallel, - use_sharded_state=cfg.use_sharded_state, - **fsdp_config, - ): - yield - - -def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs): - """ - Helper to wrap layers/modules in FSDP. This falls back to a no-op if - fairscale is not available. - - Args: - module (nn.Module): module to (maybe) wrap - min_num_params (int, Optional): minimum number of layer params to wrap - """ - try: - from fairscale.nn import wrap - - if min_num_params is not None: - num_params = sum(p.numel() for p in module.parameters()) - if num_params >= min_num_params: - return wrap(module, **kwargs) - else: - return module - else: - return wrap(module, **kwargs) - except ImportError: - return module diff --git a/spaces/muhtasham/TajBERTo/app.py b/spaces/muhtasham/TajBERTo/app.py deleted file mode 100644 index 30eb9a72a22b8216e773f88793535438e3a8da3f..0000000000000000000000000000000000000000 --- a/spaces/muhtasham/TajBERTo/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import gradio as gr -from transformers import pipeline -import numpy as np -import torch - -BASE_MODEL = "muhtasham/TajBERTo" -mask_filler = pipeline("fill-mask", model=BASE_MODEL) - -def mask_fill(text): - k = [] - preds = mask_filler(text) - for pred in preds: - k.append(pred["sequence"]) - final_string = '\n'.join(k) - return final_string - -gradio_ui = gr.Interface( - fn=mask_fill, - title="Predicting masked words in Tajik Language 🇹🇯", - description="Enter a a sentence to predict the masked word", - inputs=[ - gr.inputs.Textbox(lines=3), - ], - outputs=[ - gr.outputs.Textbox(label="Answer"), - ], - examples=[ - ["Пойтахти Душанбе"], - ], - enable_queue=True, - allow_screenshot=False, - allow_flagging=False, -) -gradio_ui.launch(debug=True) \ No newline at end of file diff --git a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/models/__init__.py b/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/nakas/MusicGenDemucs/audiocraft/quantization/vq.py b/spaces/nakas/MusicGenDemucs/audiocraft/quantization/vq.py deleted file mode 100644 index f67c3a0cd30d4b8993a36c587f00dc8a451d926f..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/audiocraft/quantization/vq.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math -import typing as tp - -import torch - -from .base import BaseQuantizer, QuantizedResult -from .core_vq import ResidualVectorQuantization - - -class ResidualVectorQuantizer(BaseQuantizer): - """Residual Vector Quantizer. - - Args: - dimension (int): Dimension of the codebooks. - n_q (int): Number of residual vector quantizers used. - q_dropout (bool): Random quantizer drop out at train time. - bins (int): Codebook size. - decay (float): Decay for exponential moving average over the codebooks. - kmeans_init (bool): Whether to use kmeans to initialize the codebooks. - kmeans_iters (int): Number of iterations used for kmeans initialization. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - orthogonal_reg_weight (float): Orthogonal regularization weights. - orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. - orthogonal_reg_max_codes (optional int): Maximum number of codes to consider. - for orthogonal regulariation. - """ - def __init__( - self, - dimension: int = 256, - n_q: int = 8, - q_dropout: bool = False, - bins: int = 1024, - decay: float = 0.99, - kmeans_init: bool = True, - kmeans_iters: int = 10, - threshold_ema_dead_code: int = 2, - orthogonal_reg_weight: float = 0.0, - orthogonal_reg_active_codes_only: bool = False, - orthogonal_reg_max_codes: tp.Optional[int] = None, - ): - super().__init__() - self.max_n_q = n_q - self.n_q = n_q - self.q_dropout = q_dropout - self.dimension = dimension - self.bins = bins - self.decay = decay - self.kmeans_init = kmeans_init - self.kmeans_iters = kmeans_iters - self.threshold_ema_dead_code = threshold_ema_dead_code - self.orthogonal_reg_weight = orthogonal_reg_weight - self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only - self.orthogonal_reg_max_codes = orthogonal_reg_max_codes - self.vq = ResidualVectorQuantization( - dim=self.dimension, - codebook_size=self.bins, - num_quantizers=self.n_q, - decay=self.decay, - kmeans_init=self.kmeans_init, - kmeans_iters=self.kmeans_iters, - threshold_ema_dead_code=self.threshold_ema_dead_code, - orthogonal_reg_weight=self.orthogonal_reg_weight, - orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only, - orthogonal_reg_max_codes=self.orthogonal_reg_max_codes, - channels_last=False - ) - - def forward(self, x: torch.Tensor, frame_rate: int): - n_q = self.n_q - if self.training and self.q_dropout: - n_q = int(torch.randint(1, self.n_q + 1, (1,)).item()) - bw_per_q = math.log2(self.bins) * frame_rate / 1000 - quantized, codes, commit_loss = self.vq(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - bw = torch.tensor(n_q * bw_per_q).to(x) - return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss)) - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified frame rate at the given bandwidth. - The RVQ encode method sets the appropriate number of quantizer to use - and returns indices for each quantizer. - """ - n_q = self.n_q - codes = self.vq.encode(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - return codes - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - """ - # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T]. - codes = codes.transpose(0, 1) - quantized = self.vq.decode(codes) - return quantized - - @property - def total_codebooks(self): - return self.max_n_q - - @property - def num_codebooks(self): - return self.n_q - - def set_num_codebooks(self, n: int): - assert n > 0 and n <= self.max_n_q - self.n_q = n diff --git a/spaces/nazneen/interactive-model-cards/interactive_model_cards/utils/misc.py b/spaces/nazneen/interactive-model-cards/interactive_model_cards/utils/misc.py deleted file mode 100644 index 50181cbffab82f80498941379193aabb49d85366..0000000000000000000000000000000000000000 --- a/spaces/nazneen/interactive-model-cards/interactive_model_cards/utils/misc.py +++ /dev/null @@ -1,138 +0,0 @@ -import pandas as pd -from numpy import floor - - -#--- gensim --- -from nltk.tokenize import word_tokenize -from gensim.models.doc2vec import Doc2Vec, TaggedDocument - - -def conf_level(val): - """ Translates probability value into - a plain english statement """ - # https://www.dni.gov/files/documents/ICD/ICD%20203%20Analytic%20Standards.pdf - conf = "undefined" - - if val < 0.05: - conf = "Extremely Low Probability" - elif val >= 0.05 and val < 0.20: - conf = "Very Low Probability" - elif val >= 0.20 and val < 0.45: - conf = "Low Probability" - elif val >= 0.45 and val < 0.55: - conf = "Middling Probability" - elif val >= 0.55 and val < 0.80: - conf = "High Probability" - elif val >= 0.80 and val < 0.95: - conf = "Very High Probability" - elif val >= 0.95: - conf = "Extremely High Probability" - - return conf - - -def subsample_df(df=None, size=10, sample_type="Random Sample"): - """ Subsample the dataframe """ - size = int(size) - if sample_type == "Random Sample": - return df.sample(size) - elif sample_type == "Highest Probabilities": - df.sort_values(by="probability", ascending=False, inplace=True) - return df.head(size) - elif sample_type == "Lowest Probabilities": - df.sort_values(by="probability", ascending=True, inplace=True) - return df.head(size) - else: - # sample probabilities in the middle - tmp = df[(df["probability"] > 0.45) & (df["probability"] < 0.55)] - samp = min([size, int(tmp.shape[0])]) - return tmp.sample(samp) - - -def down_samp(embedding): - """Down sample a data frame for altiar visualization """ - #total number of positive and negative sentiments in the class - total_size = embedding.groupby(['name', 'sentiment'],as_index=False).count() - - user_data = 0 - if 'Your Sentences' in str(total_size['name']): - tmp = embedding.groupby(['name'],as_index=False).count() - val = int(tmp[tmp['name'] == "Your Sentences"]['source']) - user_data=val - - max_sample = total_size.groupby('name').max()['source'] - - #down sample to meeting altair's max values - #but keep the proportional representation of groups - down_samp = 1/(sum(max_sample)/(5000-user_data)) - - max_samp = max_sample.apply(lambda x: floor(x*down_samp)).astype(int).to_dict() - max_samp['Your Sentences'] = user_data - - #sample down for each group in the data frame - embedding= embedding.groupby('name').apply(lambda x: x.sample(n=max_samp.get(x.name))).reset_index(drop = True) - - #order the embedding - return(embedding.sort_values(['sort_order'],ascending=True)) - - - -def prep_embed_data(data,model): - ''' Basic data tagging''' - tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data)] - embedding = [model.infer_vector(tagged_data[i].words) for i in range(len(tagged_data))] - return embedding - -def prep_sentence_embedding(name,source, sentence, sentiment, sort_order,embed_model,idx,type="single"): - """ Prepare a custom sentence to add to the embedding""" - - - if type == "single": - #get vector embedding - tagged_data = TaggedDocument(words=word_tokenize(sentence.lower()), tags=['source']) - vector = embed_model.infer_vector(tagged_data.words) - - tmp = { - 'source': source, - 'name': name, - 'sort_order': sort_order, - 'sentence': sentence, - 'sentiment': sentiment, - 'x': vector[0], - 'y':vector[1] - } - - return(pd.DataFrame(tmp,index=[idx])) - else: - #go through each group and add - df = {"source":[], - "name":[], - "sentence":[], - "sentiment":[], - "x":[], - "y":[], - "sort_order":[] - } - - - slice_short = sentence - slice_sentiment = sentiment - vec_embedding = prep_embed_data(sentence,embed_model) - - df['source'] = df['source'] + [source]*len(slice_short) - df['name'] = df['name'] + [name]*len(slice_short) - - #the sort order effects how its drawn by altair - df['sort_order'] = df['sort_order'] + [sort_order]*len(slice_short) - - #add individual elements - for i in range(len(slice_short)): - df['sentence'].append(slice_short[i]) - df['sentiment'].append(slice_sentiment[i]) - df['x'].append(vec_embedding[i][0]) - df['y'].append(vec_embedding[i][1]) - - df = pd.DataFrame(df) - return(df) - - diff --git a/spaces/ncoop57/clifs/README.md b/spaces/ncoop57/clifs/README.md deleted file mode 100644 index 245f4c0f2ae7af59ed1afad615978f9ade21c4e6..0000000000000000000000000000000000000000 --- a/spaces/ncoop57/clifs/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: YT CLIFS -emoji: 🎥 -colorFrom: pink -colorTo: indigo -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Haraamkhor 2 Movie In Hindi Download Mp4.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Haraamkhor 2 Movie In Hindi Download Mp4.md deleted file mode 100644 index 93793033836bf7f9d56d58726f15cfc50dd5fe8b..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Haraamkhor 2 Movie In Hindi Download Mp4.md +++ /dev/null @@ -1,51 +0,0 @@ - -Title: Haraamkhor 2 Movie In Hindi Download Mp4: A Review and Guide - -Article: - -```markdown -

    Haraamkhor 2 is a sequel to the 2017 Indian drama film Haraamkhor, directed by Shlok Sharma and starring Nawazuddin Siddiqui and Shweta Tripathi. The film follows the lives of a teacher and his student who are involved in a forbidden relationship.

    - -

    The film was released on Netflix on April 15, 2023, and has received mixed reviews from critics and audiences. Some praised the performances of the lead actors and the realistic portrayal of rural India, while others criticized the film for being too dark, depressing and disturbing.

    -

    Haraamkhor 2 Movie In Hindi Download Mp4


    Downloadhttps://urlcod.com/2uIbbl



    - -

    If you are interested in watching Haraamkhor 2, you might be wondering how to download it in Hindi and in MP4 format. In this article, we will provide you with a review and a guide on how to do that.

    - -

    Review of Haraamkhor 2

    - -

    Haraamkhor 2 picks up where the first film left off, with Shyam (Siddiqui), a married school teacher, and Sandhya (Tripathi), his teenage student, fleeing from their village after their affair is exposed. They end up in a nearby town, where they try to start a new life together.

    - -

    However, their past soon catches up with them, as Sandhya's father (Trimala Adhikari), who is a police officer, tracks them down and tries to bring them back. Meanwhile, Shyam's wife (Shreya Shah) also finds out about his infidelity and decides to take revenge.

    - -

    The film explores the consequences of their illicit relationship, as well as the themes of love, lust, betrayal, violence and morality. The film does not shy away from showing the dark and ugly side of human nature, and does not offer any easy answers or resolutions.

    - -

    The film is not for the faint-hearted, as it contains scenes of graphic violence, sexual abuse, rape and murder. The film also does not have any songs or comic relief, making it a bleak and depressing watch.

    - -

    However, the film also has some positive aspects, such as the brilliant performances of Siddiqui and Tripathi, who bring depth and nuance to their complex characters. The film also has a realistic and authentic feel, as it uses natural lighting, handheld camera work and local dialects.

    - -

    The film also raises some important questions about the nature of love, consent, power and justice. The film does not judge or glorify its characters, but rather shows them as flawed and human beings who make mistakes and suffer the consequences.

    - -

    Guide on How to Download Haraamkhor 2 in Hindi and MP4 Format

    - -

    If you want to download Haraamkhor 2 in Hindi and MP4 format, you will need to use a third-party website or app that offers this service. However, you should be careful when doing so, as some of these websites or apps might contain malware or viruses that can harm your device or steal your personal information.

    -

    - -

    Therefore, we recommend that you use a trusted and reliable website or app that has good reviews and ratings from other users. One such website that we suggest is Moviesflix, which is one of the most popular platforms for downloading Bollywood movies in various languages and formats.

    - -

    To download Haraamkhor 2 from Moviesflix, you will need to follow these steps:

    - -
      -
    1. Go to Moviesflix website on your browser.
    2. -
    3. Search for Haraamkhor 2 in the search bar or browse through the categories.
    4. -
    5. Select the movie from the results and click on it.
    6. -
    7. Choose the language option as Hindi and the quality option as MP4.
    8. -
    9. Click on the download button and wait for the movie to be downloaded on your device.
    10. -
    - -

    Note: You might need to create an account or verify your identity before downloading the movie. You might also need to disable your ad blocker or allow pop-ups on the website.

    - -

    Conclusion

    - -

    Haraamkhor 2 is a sequel

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/N1996 Driver Free Download REPACK.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/N1996 Driver Free Download REPACK.md deleted file mode 100644 index 2ef759e2a283b7c7c58f5058b8df3368a16abbd2..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/N1996 Driver Free Download REPACK.md +++ /dev/null @@ -1,42 +0,0 @@ - -

    How to Download and Install MSI N1996 Driver for Windows

    -

    If you have an MSI N1996 motherboard or graphics card, you may need to update its driver to ensure optimal performance and compatibility with your Windows operating system. The MSI N1996 driver is a legacy device that supports Windows XP and Vista, but it can also work on Windows 10, 8, and 7 with some tweaks. In this article, we will show you how to download and install the MSI N1996 driver for Windows using different methods.

    -

    N1996 Driver Free Download


    Download Filehttps://urlcod.com/2uIbJJ



    -

    Method 1: Use DriverGuide

    -

    DriverGuide is a website that offers a large collection of drivers for various devices and operating systems. You can use DriverGuide to find and download the latest version of the MSI N1996 driver for your Windows system. Here are the steps to follow:

    -
      -
    1. Go to https://www.driverguide.com/driver/download/MSI-N1996 and scroll down to find the list of MSI N1996 drivers.
    2. -
    3. Select the driver that matches your hardware and operating system. You can use the custom driver search engine to narrow down your options.
    4. -
    5. Click on the "Download" button next to the driver you want to download. You may need to create a free account or log in with your existing account to access the download link.
    6. -
    7. Save the driver file to your computer and unzip it if necessary.
    8. -
    9. Follow the instructions in the readme file or the installation wizard to install the driver on your system.
    10. -
    11. Restart your computer if prompted.
    12. -
    -

    Method 2: Use Device Manager

    -

    Device Manager is a built-in utility in Windows that allows you to manage and update your hardware devices and drivers. You can use Device Manager to install the MSI N1996 driver manually if you have downloaded it from another source or if you have a CD or DVD that contains the driver. Here are the steps to follow:

    -
      -
    1. Press Windows + R keys on your keyboard to open the Run dialog box.
    2. -
    3. Type devmgmt.msc and click OK to open Device Manager.
    4. -
    5. Expand the category that corresponds to your MSI N1996 device, such as Display adapters or Sound, video and game controllers.
    6. -
    7. Right-click on your MSI N1996 device and select Update driver.
    8. -
    9. Select Browse my computer for driver software.
    10. -
    11. Click on Browse and navigate to the folder where you saved or extracted the MSI N1996 driver file.
    12. -
    13. Click on Next and follow the instructions on the screen to install the driver.
    14. -
    15. Restart your computer if prompted.
    16. -
    -

    Method 3: Use MSI Live Update

    -

    MSI Live Update is a software tool that allows you to update your MSI motherboard or graphics card drivers automatically. You can use MSI Live Update to scan your system and download the latest version of the MSI N1996 driver for your Windows system. Here are the steps to follow:

    -
      -
    1. Go to https://www.msi.com/page/live-update-5-manual and download MSI Live Update 5.
    2. -
    3. Install MSI Live Update 5 on your computer and launch it.
    4. -
    5. Select Scan from the main menu and wait for MSI Live Update 5 to detect your hardware devices and drivers.
    6. -
    7. Select Update from the main menu and check the box next to MSI N1996 driver if it is available.
    8. -
    9. Click on Start Download and wait for MSI Live Update 5 to download and install the driver on your system.
    10. -
    11. Restart your computer if prompted.
    12. -
    - -

    Conclusion

    - -

    The MSI N1996 driver is a legacy device that supports Windows XP and Vista, but it can also work on Windows 10, 8, and 7 with some tweaks. You can download and install the MSI N1996 driver for Windows using different methods, such as DriverGuide, Device Manager, or MSI Live Update. We hope this article has helped you find and update your MSI N1996 driver

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/RimWorld 0.18.1722 LINK Free Download Mac.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/RimWorld 0.18.1722 LINK Free Download Mac.md deleted file mode 100644 index 13d4bdf000e6cf481eae37ccf20db81687e3b752..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/RimWorld 0.18.1722 LINK Free Download Mac.md +++ /dev/null @@ -1,18 +0,0 @@ - -

    How to Download RimWorld 0.18.1722 for Mac for Free

    -

    RimWorld is a popular sci-fi colony simulation game that lets you create and manage your own settlement on a distant planet. You can customize your colonists, choose from different scenarios and storytellers, and face various challenges and events. RimWorld is available on Steam for $34.99, but you can also download it for free on Mac using the following steps:

    -
      -
    1. Go to https://www.macupdate.com/app/mac/63351/rimworld and click on the green "Download" button.
    2. -
    3. Wait for the download to finish and then open the .dmg file.
    4. -
    5. Drag and drop the RimWorld icon into your Applications folder.
    6. -
    7. Launch RimWorld from your Applications folder and enjoy!
    8. -
    -

    Note that this is an unofficial download and may not be updated to the latest version of RimWorld. If you want to support the developers and get access to future updates and mods, you should buy the game on Steam.

    -

    RimWorld 0.18.1722 Free Download Mac


    DOWNLOAD ››››› https://urlcod.com/2uIc8k



    RimWorld is a game that offers endless replayability and variety. You can choose from different biomes, factions, animals, plants, and events that will shape your colony's story. You can also customize your colonists' skills, traits, passions, and relationships. You can even modify the game's difficulty and AI storyteller, which will determine how often and how harshly the game will throw challenges at you.

    -

    RimWorld is also a game that supports modding and has a vibrant community of modders and players. You can find thousands of mods on Steam Workshop or other websites that add new content, features, graphics, and gameplay to RimWorld. You can also create your own mods using the game's built-in modding tools and share them with others.

    -

    RimWorld is a game that will keep you hooked for hours and hours as you explore, build, survive, and thrive on a rimworld. Whether you want to create a peaceful utopia, a ruthless warzone, or anything in between, RimWorld will let you do it. Download RimWorld 0.18.1722 for Mac for free today and start your own adventure!

    If you are looking for a game that combines sci-fi, simulation, and strategy, RimWorld is the game for you. RimWorld is a game that simulates the lives of colonists on a distant world. You can control their actions, but not their thoughts and feelings. You have to deal with their needs, desires, moods, and relationships. You also have to manage their resources, defenses, research, and production.

    -

    RimWorld is a game that is inspired by classic sci-fi stories and movies. You can encounter pirates, raiders, traders, aliens, ancient ruins, and more. You can also experience different events and scenarios that will test your colony's survival skills. You can face solar flares, blizzards, toxic fallout, volcanic winter, and more. You can also trigger quests and missions that will reward you with loot, allies, or enemies.

    -

    RimWorld is a game that has a lot of depth and complexity. You can customize every aspect of your colony and colonists. You can design your base layout, furniture, power system, security system, and more. You can also research new technologies, craft new items, grow new crops, and breed new animals. You can also perform surgeries, install bionic parts, or use drugs and implants to enhance your colonists' abilities.

    -

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/nickloughren/Robot-or-Not/app.py b/spaces/nickloughren/Robot-or-Not/app.py deleted file mode 100644 index 8a2d4cf1a90ad381a0518047e78687c265e59d43..0000000000000000000000000000000000000000 --- a/spaces/nickloughren/Robot-or-Not/app.py +++ /dev/null @@ -1,29 +0,0 @@ -# import gradio as gr - -# def greet(name): - # return "holllllllla, mi amigx " + name + " !! welcome !!" - -# iface = gr.Interface(fn=greet, inputs="text", outputs="text") -# iface.launch() - -__all__ = ['is_robot','learn','classify_images','categories','image','label','examples','intf'] - -from fastai.vision.all import * -import gradio as gr - -def is_robot(x): return x[0].isupper() - -learn = load_learner('model.pkl') - -categories = ('Robot','Not Robot') - -def classify_image(img): - pred,idx,probs = learn.predict(img) - return dict(zip(categories, map(float,probs))) - -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() -examples = ['robot.jpg','not_robot.jpg','dunno.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tools/deploy/torchscript_mask_rcnn.cpp b/spaces/nikitaPDL2023/assignment4/detectron2/tools/deploy/torchscript_mask_rcnn.cpp deleted file mode 100644 index fd6e1e9f82652a1d4d221447cd140ab675f312b2..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tools/deploy/torchscript_mask_rcnn.cpp +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// @lint-ignore-every CLANGTIDY -// This is an example code that demonstrates how to run inference -// with a torchscript format Mask R-CNN model exported by ./export_model.py -// using export method=tracing, caffe2_tracing & scripting. - -#include -#include -#include - -#include -#include -#include -#include - -// only needed for export_method=tracing -#include // @oss-only -// @fb-only: #include - -using namespace std; - -c10::IValue get_caffe2_tracing_inputs(cv::Mat& img, c10::Device device) { - const int height = img.rows; - const int width = img.cols; - // FPN models require divisibility of 32. - // Tracing mode does padding inside the graph, but caffe2_tracing does not. - assert(height % 32 == 0 && width % 32 == 0); - const int channels = 3; - - auto input = - torch::from_blob(img.data, {1, height, width, channels}, torch::kUInt8); - // NHWC to NCHW - input = input.to(device, torch::kFloat).permute({0, 3, 1, 2}).contiguous(); - - std::array im_info_data{height * 1.0f, width * 1.0f, 1.0f}; - auto im_info = - torch::from_blob(im_info_data.data(), {1, 3}).clone().to(device); - return std::make_tuple(input, im_info); -} - -c10::IValue get_tracing_inputs(cv::Mat& img, c10::Device device) { - const int height = img.rows; - const int width = img.cols; - const int channels = 3; - - auto input = - torch::from_blob(img.data, {height, width, channels}, torch::kUInt8); - // HWC to CHW - input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous(); - return input; -} - -// create a Tuple[Dict[str, Tensor]] which is the input type of scripted model -c10::IValue get_scripting_inputs(cv::Mat& img, c10::Device device) { - const int height = img.rows; - const int width = img.cols; - const int channels = 3; - - auto img_tensor = - torch::from_blob(img.data, {height, width, channels}, torch::kUInt8); - // HWC to CHW - img_tensor = - img_tensor.to(device, torch::kFloat).permute({2, 0, 1}).contiguous(); - auto dic = c10::Dict(); - dic.insert("image", img_tensor); - return std::make_tuple(dic); -} - -c10::IValue -get_inputs(std::string export_method, cv::Mat& img, c10::Device device) { - // Given an image, create inputs in the format required by the model. - if (export_method == "tracing") - return get_tracing_inputs(img, device); - if (export_method == "caffe2_tracing") - return get_caffe2_tracing_inputs(img, device); - if (export_method == "scripting") - return get_scripting_inputs(img, device); - abort(); -} - -struct MaskRCNNOutputs { - at::Tensor pred_boxes, pred_classes, pred_masks, scores; - int num_instances() const { - return pred_boxes.sizes()[0]; - } -}; - -MaskRCNNOutputs get_outputs(std::string export_method, c10::IValue outputs) { - // Given outputs of the model, extract tensors from it to turn into a - // common MaskRCNNOutputs format. - if (export_method == "tracing") { - auto out_tuple = outputs.toTuple()->elements(); - // They are ordered alphabetically by their field name in Instances - return MaskRCNNOutputs{ - out_tuple[0].toTensor(), - out_tuple[1].toTensor(), - out_tuple[2].toTensor(), - out_tuple[3].toTensor()}; - } - if (export_method == "caffe2_tracing") { - auto out_tuple = outputs.toTuple()->elements(); - // A legacy order used by caffe2 models - return MaskRCNNOutputs{ - out_tuple[0].toTensor(), - out_tuple[2].toTensor(), - out_tuple[3].toTensor(), - out_tuple[1].toTensor()}; - } - if (export_method == "scripting") { - // With the ScriptableAdapter defined in export_model.py, the output is - // List[Dict[str, Any]]. - auto out_dict = outputs.toList().get(0).toGenericDict(); - return MaskRCNNOutputs{ - out_dict.at("pred_boxes").toTensor(), - out_dict.at("pred_classes").toTensor(), - out_dict.at("pred_masks").toTensor(), - out_dict.at("scores").toTensor()}; - } - abort(); -} - -int main(int argc, const char* argv[]) { - if (argc != 4) { - cerr << R"xx( -Usage: - ./torchscript_mask_rcnn model.ts input.jpg EXPORT_METHOD - - EXPORT_METHOD can be "tracing", "caffe2_tracing" or "scripting". -)xx"; - return 1; - } - std::string image_file = argv[2]; - std::string export_method = argv[3]; - assert( - export_method == "caffe2_tracing" || export_method == "tracing" || - export_method == "scripting"); - - torch::jit::FusionStrategy strat = {{torch::jit::FusionBehavior::DYNAMIC, 1}}; - torch::jit::setFusionStrategy(strat); - torch::autograd::AutoGradMode guard(false); - auto module = torch::jit::load(argv[1]); - - assert(module.buffers().size() > 0); - // Assume that the entire model is on the same device. - // We just put input to this device. - auto device = (*begin(module.buffers())).device(); - - cv::Mat input_img = cv::imread(image_file, cv::IMREAD_COLOR); - auto inputs = get_inputs(export_method, input_img, device); - - // Run the network - auto output = module.forward({inputs}); - if (device.is_cuda()) - c10::cuda::getCurrentCUDAStream().synchronize(); - - // run 3 more times to benchmark - int N_benchmark = 3, N_warmup = 1; - auto start_time = chrono::high_resolution_clock::now(); - for (int i = 0; i < N_benchmark + N_warmup; ++i) { - if (i == N_warmup) - start_time = chrono::high_resolution_clock::now(); - output = module.forward({inputs}); - if (device.is_cuda()) - c10::cuda::getCurrentCUDAStream().synchronize(); - } - auto end_time = chrono::high_resolution_clock::now(); - auto ms = chrono::duration_cast(end_time - start_time) - .count(); - cout << "Latency (should vary with different inputs): " - << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl; - - // Parse Mask R-CNN outputs - auto rcnn_outputs = get_outputs(export_method, output); - cout << "Number of detected objects: " << rcnn_outputs.num_instances() - << endl; - - cout << "pred_boxes: " << rcnn_outputs.pred_boxes.toString() << " " - << rcnn_outputs.pred_boxes.sizes() << endl; - cout << "scores: " << rcnn_outputs.scores.toString() << " " - << rcnn_outputs.scores.sizes() << endl; - cout << "pred_classes: " << rcnn_outputs.pred_classes.toString() << " " - << rcnn_outputs.pred_classes.sizes() << endl; - cout << "pred_masks: " << rcnn_outputs.pred_masks.toString() << " " - << rcnn_outputs.pred_masks.sizes() << endl; - - cout << rcnn_outputs.pred_boxes << endl; - return 0; -} diff --git a/spaces/noelshin/selfmask/networks/resnet_models.py b/spaces/noelshin/selfmask/networks/resnet_models.py deleted file mode 100644 index e5fe1285a0eb657cdc6f865edcf95023db946dcb..0000000000000000000000000000000000000000 --- a/spaces/noelshin/selfmask/networks/resnet_models.py +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Author: Donny You(youansheng@gmail.com) -import math -import torch.nn as nn -from collections import OrderedDict -from .module_helper import ModuleHelper - - -model_urls = { - 'resnet18': 'https://download.pytorch.org/backbones/resnet18-5c106cde.pth', - 'resnet34': 'https://download.pytorch.org/backbones/resnet34-333f7ec4.pth', - 'resnet50': 'https://download.pytorch.org/backbones/resnet50-19c8e357.pth', - 'resnet101': 'https://download.pytorch.org/backbones/resnet101-5d3b4d8f.pth', - 'resnet152': 'https://download.pytorch.org/backbones/resnet152-b121ed2d.pth' -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, norm_type=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, norm_type=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, bias=False) - self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - def __init__(self, block, layers, width_multiplier=1.0, num_classes=1000, deep_base=False, norm_type=None): - super(ResNet, self).__init__() - self.inplanes = 128 if deep_base else int(64 * width_multiplier) - self.width_multiplier = width_multiplier - if deep_base: - self.prefix = nn.Sequential(OrderedDict([ - ('conv1', nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)), - ('bn1', ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)), - ('relu1', nn.ReLU(inplace=False)), - ('conv2', nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)), - ('bn2', ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)), - ('relu2', nn.ReLU(inplace=False)), - ('conv3', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)), - ('bn3', ModuleHelper.BatchNorm2d(norm_type=norm_type)(self.inplanes)), - ('relu3', nn.ReLU(inplace=False))] - )) - else: - self.prefix = nn.Sequential(OrderedDict([ - ('conv1', nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)), - ('bn1', ModuleHelper.BatchNorm2d(norm_type=norm_type)(self.inplanes)), - ('relu', nn.ReLU(inplace=False))] - )) - - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False) # change. - - self.layer1 = self._make_layer(block, int(64 * width_multiplier), layers[0], norm_type=norm_type) - self.layer2 = self._make_layer(block, int(128 * width_multiplier), layers[1], stride=2, norm_type=norm_type) - self.layer3 = self._make_layer(block, int(256 * width_multiplier), layers[2], stride=2, norm_type=norm_type) - self.layer4 = self._make_layer(block, int(512 * width_multiplier), layers[3], stride=2, norm_type=norm_type) - self.avgpool = nn.AvgPool2d(7, stride=1) - self.fc = nn.Linear(int(512 * block.expansion * width_multiplier), num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, ModuleHelper.BatchNorm2d(norm_type=norm_type, ret_cls=True)): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1, norm_type=None): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - ModuleHelper.BatchNorm2d(norm_type=norm_type)(int(planes * block.expansion * self.width_multiplier)), - ) - - layers = [] - layers.append(block(self.inplanes, planes, - stride, downsample, norm_type=norm_type)) - - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, norm_type=norm_type)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.prefix(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - - -def resnet18(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-18 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - norm_type (str): choose norm type - """ - model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, deep_base=False, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def deepbase_resnet18(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-18 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, deep_base=True, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def resnet34(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-34 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, deep_base=False, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def deepbase_resnet34(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-34 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, deep_base=True, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def resnet50(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-50 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, deep_base=False, norm_type=norm_type, - width_multiplier=kwargs["width_multiplier"]) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def deepbase_resnet50(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-50 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, deep_base=True, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def resnet101(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-101 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, deep_base=False, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def deepbase_resnet101(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-101 model. - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, deep_base=True, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def resnet152(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-152 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, deep_base=False, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model - - -def deepbase_resnet152(num_classes=1000, pretrained=None, norm_type='batchnorm', **kwargs): - """Constructs a ResNet-152 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, deep_base=True, norm_type=norm_type) - model = ModuleHelper.load_model(model, pretrained=pretrained) - return model diff --git a/spaces/nomic-ai/amazon_reviews_multi/style.css b/spaces/nomic-ai/amazon_reviews_multi/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/amazon_reviews_multi/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/nontGcob/T2E_Vocabulary_Exam_Generator/modules/inference.py b/spaces/nontGcob/T2E_Vocabulary_Exam_Generator/modules/inference.py deleted file mode 100644 index fbf5cce09c4dd0844bb300e7afb161a15f7b0149..0000000000000000000000000000000000000000 --- a/spaces/nontGcob/T2E_Vocabulary_Exam_Generator/modules/inference.py +++ /dev/null @@ -1,11 +0,0 @@ -from transformers import T5Tokenizer, T5ForConditionalGeneration - -tokenizer = T5Tokenizer.from_pretrained("t5-small") -model = T5ForConditionalGeneration.from_pretrained("t5-small") - - -def infer_t5(input): - input_ids = tokenizer(input, return_tensors="pt").input_ids - outputs = model.generate(input_ids) - - return tokenizer.decode(outputs[0], skip_special_tokens=True) diff --git a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/numerics/type_utils.h b/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/numerics/type_utils.h deleted file mode 100644 index 51291abeefa1637a246f2095a4e4a3470e6ef853..0000000000000000000000000000000000000000 --- a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/numerics/type_utils.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LYRA_CODEC_SPARSE_MATMUL_NUMERICS_TYPE_UTILS_H_ -#define LYRA_CODEC_SPARSE_MATMUL_NUMERICS_TYPE_UTILS_H_ - -// A collection of useful utilities for determining types based on other types. - -#include - -#include "sparse_matmul/numerics/fixed_types.h" -#include "sparse_matmul/numerics/float16_types.h" - -namespace csrblocksparse { - -// Basic idea is that any two float types yield a float, fixed16 types -// yield a fixed32 with the exponent bits summed. Other options are not -// allowed. -template -struct TypeOfProduct {}; - -template -struct TypeOfProduct< - LhsType, RhsType, - typename std::enable_if::value && - IsAnyFloatType::value>::type> { - using type = float; -}; - -template -struct TypeOfProduct< - LhsType, RhsType, - typename std::enable_if::value && - IsFixed16Type::value>::type> { - static_assert(LhsType::kMantissaBits + RhsType::kMantissaBits < 31, - "Sum of mantissa bits must not exceed 31."); - using type = fixed32<31 - LhsType::kMantissaBits - RhsType::kMantissaBits>; -}; - -// Given a weight type T, determine what the RhsType should be for that type. -// bfloat16 / fp16 -> float; fixed16 = fixed16 -template -struct RhsTypeIs { - using type = float; -}; - -template -struct RhsTypeIs::value>::type> { - using type = T; -}; - -template -struct MantissaBitsOf { - // Although int types have zero mantissa bits, use 1 to avoid division by 0. - static constexpr int value = 1; -}; - -template -struct MantissaBitsOf< - T, typename std::enable_if::value || - IsCustomFloatType::value>::type> { - public: - static constexpr int value = T::kMantissaBits; -}; - -template -struct MantissaBitsOf< - T, typename std::enable_if::value>::type> { - public: - // Ignoring the fact that doubles have more mantissa bits. - static constexpr int value = 24; -}; - -} // namespace csrblocksparse - -#endif // LYRA_CODEC_SPARSE_MATMUL_NUMERICS_TYPE_UTILS_H_ diff --git a/spaces/ochyai/ochyai_test/template.md b/spaces/ochyai/ochyai_test/template.md deleted file mode 100644 index 1bf2fa69afa0b79e225446a04355dcae6badd194..0000000000000000000000000000000000000000 --- a/spaces/ochyai/ochyai_test/template.md +++ /dev/null @@ -1,11 +0,0 @@ -### Your Reply - -Please write your reply and brainstorm every point of your reply step-by-step to fill the details. - -### Your Explanation of Reply Here - -Please write your explanation of your reply and brainstorm every point of your reply step-by-step to fill the details. - -### Your Findings Here - -Please write your findings Here and brainstorm every point of your reply step-by-step to fill the details. \ No newline at end of file diff --git a/spaces/odettecantswim/rvc-mlbb/infer_pack/modules.py b/spaces/odettecantswim/rvc-mlbb/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/odettecantswim/rvc-mlbb/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/odhier/MGX-Midjourney-v4/app.py b/spaces/odhier/MGX-Midjourney-v4/app.py deleted file mode 100644 index bea4accb45793c8e748731c184dee0ffaf509dd5..0000000000000000000000000000000000000000 --- a/spaces/odhier/MGX-Midjourney-v4/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr - -description = """
    - -
    - """ - -gr.Interface.load("models/prompthero/openjourney", description=description).launch() \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/util/MaskModel.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/util/MaskModel.py deleted file mode 100644 index 9cca4f962889e9b3fd30d0f92f19c8b3104bfd3a..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/util/MaskModel.py +++ /dev/null @@ -1,123 +0,0 @@ -import random -import numpy as np - -class RandomMask(): - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maskHeight, self.maskWidth = dataInfo['mask']['mask_height'], \ - dataInfo['mask']['mask_width'] - try: - self.maxDeltaHeight, self.maxDeltaWidth = dataInfo['mask']['max_delta_height'], \ - dataInfo['mask']['max_delta_width'] - except KeyError: - self.maxDeltaHeight, self.maxDeltaWidth = 0, 0 - - try: - self.verticalMargin, self.horizontalMargin = dataInfo['mask']['vertical_margin'], \ - dataInfo['mask']['horizontal_margin'] - except KeyError: - self.verticalMargin, self.horizontalMargin = 0, 0 - - def __call__(self): - from .utils import random_bbox - from .utils import bbox2mask - masks = [] - bbox = random_bbox(self.imageHeight, self.imageWidth, self.verticalMargin, self.horizontalMargin, - self.maskHeight, self.maskWidth) - if random.uniform(0, 1) > 0.5: - mask = bbox2mask(self.imageHeight, self.imageWidth, 0, 0, bbox) - for frame in range(self.videoLength): - masks.append(mask) - else: - for frame in range(self.videoLength): - delta_h, delta_w = random.randint(-3, 3), random.randint(-3, 3) # 每次向四个方向移动三个像素以内 - bbox = list(bbox) - bbox[0] = min(max(self.verticalMargin, bbox[0] + delta_h), self.imageHeight - self.verticalMargin - bbox[2]) - bbox[1] = min(max(self.horizontalMargin, bbox[1] + delta_w), self.imageWidth - self.horizontalMargin - bbox[3]) - mask = bbox2mask(self.imageHeight, self.imageWidth, 0, 0, bbox) - masks.append(mask) - masks = np.stack(masks, axis=0) - if len(masks.shape) == 3: - masks = masks[:, :, :, np.newaxis] - assert len(masks.shape) == 4, 'Wrong mask dimension {}'.format(len(masks.shape)) - return masks - - -class MidRandomMask(): - ### This mask is considered without random motion - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maskHeight, self.maskWidth = dataInfo['mask']['mask_height'], \ - dataInfo['mask']['mask_width'] - - def __call__(self): - from .utils import mid_bbox_mask - mask = mid_bbox_mask(self.imageHeight, self.imageWidth, self.maskHeight, self.maskWidth) - masks = [] - for _ in range(self.videoLength): - masks.append(mask) - return mask - - -class MatrixMask(): - ### This mask is considered without random motion - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maskHeight, self.maskWidth = dataInfo['mask']['mask_height'], \ - dataInfo['mask']['mask_width'] - try: - self.row, self.column = dataInfo['mask']['row'], \ - dataInfo['mask']['column'] - except KeyError: - self.row, self.column = 5, 4 - - def __call__(self): - from .utils import matrix2bbox - mask = matrix2bbox(self.imageHeight, self.imageWidth, self.maskHeight, - self.maskWidth, self.row, self.column) - masks = [] - for video in range(self.videoLength): - masks.append(mask) - return mask - - -class FreeFormMask(): - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maxVertex = dataInfo['mask']['max_vertex'] - self.maxLength = dataInfo['mask']['max_length'] - self.maxBrushWidth = dataInfo['mask']['max_brush_width'] - self.maxAngle = dataInfo['mask']['max_angle'] - - def __call__(self): - from .utils import freeFormMask - mask = freeFormMask(self.imageHeight, self.imageWidth, - self.maxVertex, self.maxLength, - self.maxBrushWidth, self.maxAngle) - return mask - - -class StationaryMask(): - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - # self.maxPointNum = dataInfo['mask']['max_point_num'] - # self.maxLength = dataInfo['mask']['max_length'] - - def __call__(self): - from .STTN_mask import create_random_shape_with_random_motion - masks = create_random_shape_with_random_motion(self.videoLength, 0.9, 1.1, 1, 10, self.imageHeight, self.imageWidth) - masks = np.stack(masks, axis=0) - if len(masks.shape) == 3: - masks = masks[:, :, :, np.newaxis] - assert len(masks.shape) == 4, 'Your masks with a wrong shape {}'.format(len(masks.shape)) - return masks \ No newline at end of file diff --git a/spaces/omsree/myGenAIapp-1/app.py b/spaces/omsree/myGenAIapp-1/app.py deleted file mode 100644 index b81bf231e223eb1c9eb3da9d54ed240adfac4297..0000000000000000000000000000000000000000 --- a/spaces/omsree/myGenAIapp-1/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are an enthusiastic high school student passionate about science and exploration. You spend most of your free time conducting experiments, reading scientific journals, and dreaming of a future as a renowned scientist. Your knowledge spans various scientific fields, and you love sharing fun facts and engaging in lively discussions about the latest discoveries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/perilli/tortoise-tts-v2/tortoise/utils/samples_generator.py b/spaces/perilli/tortoise-tts-v2/tortoise/utils/samples_generator.py deleted file mode 100644 index 61d30141e1fe652fe1abcad61e5d21db11f88298..0000000000000000000000000000000000000000 --- a/spaces/perilli/tortoise-tts-v2/tortoise/utils/samples_generator.py +++ /dev/null @@ -1,51 +0,0 @@ -import os - -# This script builds the sample webpage. - -if __name__ == '__main__': - result = "These words were never spoken.

    Handpicked results

    " - for fv in os.listdir('../../results/favorites'): - url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/favorites/{fv}' - result = result + f'
    \n' - - result = result + "

    Handpicked longform result:

    " - url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/favorite_riding_hood.mp3' - result = result + f'
    \n' - - result = result + "

    Compared to Tacotron2 (with the LJSpeech voice):

    " - for k in range(2,5,1): - url1 = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/tacotron_comparison/{k}-tacotron2.mp3' - url2 = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/tacotron_comparison/{k}-tortoise.mp3' - result = result + f'' \ - f'' - result = result + "
    Tacotron2+WaveglowTorToiSe

    \n

    \n
    " - - result = result + "

    Various spoken texts for all voices:

    " - voices = ['angie', 'daniel', 'deniro', 'emma', 'freeman', 'geralt', 'halle', 'jlaw', 'lj', 'myself', - 'pat', 'snakes', 'tom', 'train_atkins', 'train_dotrice', 'train_kennard', 'weaver', 'william'] - lines = ['' + ''.join([f'' for v in voices])] - line = f'' - for v in voices: - url = f'https://github.com/neonbjb/tortoise-tts/raw/main/voices/{v}/1.wav' - line = line + f'' - line = line + "" - lines.append(line) - for txt in os.listdir('../../results/various/'): - if 'desktop' in txt: - continue - line = f'' - for v in voices: - url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/various/{txt}/{v}.mp3' - line = line + f'' - line = line + "" - lines.append(line) - result = result + '\n'.join(lines) + "
    text{v}
    reference clip
    {txt}
    " - - result = result + "

    Longform result for all voices:

    " - for lf in os.listdir('../../results/riding_hood'): - url = f'https://github.com/neonbjb/tortoise-tts/raw/main/results/riding_hood/{lf}' - result = result + f'
    \n' - - result = result + "" - with open('result.html', 'w', encoding='utf-8') as f: - f.write(result) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/platformdirs/__main__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/platformdirs/__main__.py deleted file mode 100644 index 6a0d6dd12e36092c1497f5390470f85b1afbbb17..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/platformdirs/__main__.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Main entry point.""" -from __future__ import annotations - -from pip._vendor.platformdirs import PlatformDirs, __version__ - -PROPS = ( - "user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "user_documents_dir", - "user_downloads_dir", - "user_pictures_dir", - "user_videos_dir", - "user_music_dir", - "user_runtime_dir", - "site_data_dir", - "site_config_dir", - "site_cache_dir", -) - - -def main() -> None: - """Run main entry point.""" - app_name = "MyApp" - app_author = "MyCompany" - - print(f"-- platformdirs {__version__} --") # noqa: T201 - - print("-- app dirs (with optional 'version')") # noqa: T201 - dirs = PlatformDirs(app_name, app_author, version="1.0") - for prop in PROPS: - print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 - - print("\n-- app dirs (without optional 'version')") # noqa: T201 - dirs = PlatformDirs(app_name, app_author) - for prop in PROPS: - print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 - - print("\n-- app dirs (without optional 'appauthor')") # noqa: T201 - dirs = PlatformDirs(app_name) - for prop in PROPS: - print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 - - print("\n-- app dirs (with disabled 'appauthor')") # noqa: T201 - dirs = PlatformDirs(app_name, appauthor=False) - for prop in PROPS: - print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201 - - -if __name__ == "__main__": - main() diff --git a/spaces/platzi/platzi-curso-streamlit-segmentacion-imagenes/README.md b/spaces/platzi/platzi-curso-streamlit-segmentacion-imagenes/README.md deleted file mode 100644 index 91efd0b52f5ee9e521c7c59c0412dfdb97eb7497..0000000000000000000000000000000000000000 --- a/spaces/platzi/platzi-curso-streamlit-segmentacion-imagenes/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Curso de Streamlit - Segmentacion Imagenes -emoji: 💻 -colorFrom: pink -colorTo: indigo -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/preechanon/Cutto/app.py b/spaces/preechanon/Cutto/app.py deleted file mode 100644 index 342e361b076d53c21a452e16f0e4e640e2ae99d7..0000000000000000000000000000000000000000 --- a/spaces/preechanon/Cutto/app.py +++ /dev/null @@ -1,127 +0,0 @@ -import streamlit as st -from keras.layers import LSTM, Dropout, Bidirectional, Dense,Embedding,Flatten,Maximum,Activation,Conv2D,LayerNormalization,add\ -, BatchNormalization, SpatialDropout1D ,Input,Layer,Multiply,Reshape ,Add, GRU,Concatenate,Conv1D,TimeDistributed,ZeroPadding1D,concatenate,MaxPool1D,GlobalMaxPooling1D -import keras.backend as K -from keras import initializers, regularizers, constraints, activations -from keras.initializers import Constant -from keras import Model -import sys -import json -import pandas as pd -import numpy as np - -with open('CHAR_TYPES_MAP.json') as json_file: - CHAR_TYPES_MAP = json.load(json_file) -with open('CHARS_MAP.json') as json_file: - CHARS_MAP = json.load(json_file) -with open('CHAR_TYPE_FLATTEN.json') as json_file: - CHAR_TYPE_FLATTEN = json.load(json_file) - - -class TimestepDropout(Dropout): - - def __init__(self, rate, **kwargs): - super(TimestepDropout, self).__init__(rate, **kwargs) - - def _get_noise_shape(self, inputs): - input_shape = K.shape(inputs) - noise_shape = (input_shape[0], input_shape[1], 1) - return noise_shape - - -def model_(n_gram = 21): - - input1 = Input(shape=(21,),dtype='float32',name = 'char_input') - input2 = Input(shape=(21,),dtype='float32',name = 'type_input') - - a = Embedding(178, 32,input_length=21)(input1) - a = SpatialDropout1D(0.15)(a) - #a = TimestepDropout(0.05)(a) - char_input = BatchNormalization()(a) - - a_concat = [] - filters = [[1,200],[2,200],[3,200],[4,200],[5,200],[6,200],[8,200],[11,150],[12,100]] - #filters = [[1,200],[2,200],[3,200],[4,200],[5,200],[6,200],[7,200],[8,200],[9,150],[10,150],[11,150],[12,100]] - - for (window_size, filters_size) in filters: - convs = Conv1D(filters=filters_size, kernel_size=window_size, strides=1)(char_input) - convs = Activation('elu')(convs) - convs = TimeDistributed(Dense(5, input_shape=(21, filters_size)))(convs) - convs = ZeroPadding1D(padding=(0, window_size-1))(convs) - a_concat.append(convs) - token_max = Maximum()(a_concat) - lstm_char = Bidirectional(LSTM(128 ,return_sequences=True,kernel_regularizer=regularizers.L2(0.0000001),bias_regularizer=regularizers.L2(0.0000001)))(char_input) - lstm_char = Dense(64, activation='elu')(lstm_char) - #lstm_char = Bidirectional(LSTM(64 ,return_sequences=True))(lstm_char) - #lstm_char = Attention(return_sequences=True)(lstm_char) - - b = Embedding(12, 12, input_length=21)(input2) - type_inputs = SpatialDropout1D(0.15)(b) - #type_inputs = TimestepDropout(0.05)(b) - - x = Concatenate()([type_inputs, char_input, lstm_char, token_max]) - x = BatchNormalization()(x) - - x = Flatten()(x) - x = Dense(100, activation='elu')(x) - x = Dropout(0.2)(x) - out = Dense(1, activation='sigmoid',dtype = 'float32',kernel_regularizer=regularizers.L2(0.01),bias_regularizer=regularizers.L2(0.01))(x) - - - model = Model(inputs=[input1, input2], outputs=out) - - return model - - -def create_feature_array(text, n_pad=21): - - n = len(text) - n_pad_2 = int((n_pad - 1)/2) - text_pad = [' '] * n_pad_2 + [t for t in text] + [' '] * n_pad_2 - x_char, x_type = [], [] - for i in range(n_pad_2, n_pad_2 + n): - char_list = text_pad[i + 1: i + n_pad_2 + 1] + \ - list(reversed(text_pad[i - n_pad_2: i])) + \ - [text_pad[i]] - char_map = [CHARS_MAP.get(c, 179) for c in char_list] - char_type = [CHAR_TYPES_MAP.get(CHAR_TYPE_FLATTEN.get(c, 'o'), 4) - for c in char_list] - x_char.append(char_map) - x_type.append(char_type) - x_char = np.array(x_char).astype(float) - x_type = np.array(x_type).astype(float) - return x_char, x_type - -def tokenize(text): - n_pad = 21 - - if not text: - return [''] - - if isinstance(text, str) and sys.version_info.major == 2: - text = text.decode('utf-8') - - x_char, x_type = create_feature_array(text, n_pad=n_pad) - word_end = [] - - y_predict = model.predict([x_char, x_type], batch_size = 512) - y_predict = (y_predict.ravel() > 0.46542968749999997).astype(int) - word_end = y_predict[1:].tolist() + [1] - - tokens = [] - word = '' - for char, w_e in zip(text, word_end): - word += char - if w_e: - tokens.append(word) - word = '' - return tokens - - -model = model_() -model.load_weights("cutto_tf2.h5") - -text = st.text_area("Enter original text!") -words = tokenize(text) - -st.write('|'.join(words)) \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/chatbot/shared/autorender.d.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/chatbot/shared/autorender.d.ts deleted file mode 100644 index 946e678e94e682f47568ac2863fabe280b7788c4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/chatbot/shared/autorender.d.ts +++ /dev/null @@ -1 +0,0 @@ -declare module "katex/dist/contrib/auto-render.js"; diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f deleted file mode 100644 index 4128f004e840087ab8e08a06c76995b249a561b0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f +++ /dev/null @@ -1,8 +0,0 @@ - BLOCK DATA PARAM_INI - COMMON /MYCOM/ MYTAB - INTEGER MYTAB(3) - DATA MYTAB/ - * 0, ! 1 and more commenty stuff - * 4, ! 2 - * 0 / - END diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.py deleted file mode 100644 index 4aaf3de9a23b2416603947db312bb49eea343ba8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - - -@pytest.fixture(params=[["inner"], ["inner", "outer"]]) -def frame(request): - levels = request.param - df = pd.DataFrame( - { - "outer": ["a", "a", "a", "b", "b", "b"], - "inner": [1, 2, 3, 1, 2, 3], - "A": np.arange(6), - "B": ["one", "one", "two", "two", "one", "one"], - } - ) - if levels: - df = df.set_index(levels) - - return df - - -@pytest.fixture() -def series(): - df = pd.DataFrame( - { - "outer": ["a", "a", "a", "b", "b", "b"], - "inner": [1, 2, 3, 1, 2, 3], - "A": np.arange(6), - "B": ["one", "one", "two", "two", "one", "one"], - } - ) - s = df.set_index(["outer", "inner", "B"])["A"] - - return s - - -@pytest.mark.parametrize( - "key_strs,groupers", - [ - ("inner", pd.Grouper(level="inner")), # Index name - (["inner"], [pd.Grouper(level="inner")]), # List of index name - (["B", "inner"], ["B", pd.Grouper(level="inner")]), # Column and index - (["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column - ], -) -def test_grouper_index_level_as_string(frame, key_strs, groupers): - if "B" not in key_strs or "outer" in frame.columns: - result = frame.groupby(key_strs).mean(numeric_only=True) - expected = frame.groupby(groupers).mean(numeric_only=True) - else: - result = frame.groupby(key_strs).mean() - expected = frame.groupby(groupers).mean() - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize( - "levels", - [ - "inner", - "outer", - "B", - ["inner"], - ["outer"], - ["B"], - ["inner", "outer"], - ["outer", "inner"], - ["inner", "outer", "B"], - ["B", "outer", "inner"], - ], -) -def test_grouper_index_level_as_string_series(series, levels): - # Compute expected result - if isinstance(levels, list): - groupers = [pd.Grouper(level=lv) for lv in levels] - else: - groupers = pd.Grouper(level=levels) - - expected = series.groupby(groupers).mean() - - # Compute and check result - result = series.groupby(levels).mean() - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.py deleted file mode 100644 index 40aa95d0a46058d2dc3fc5208ca39328d96b23fb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.py +++ /dev/null @@ -1,22 +0,0 @@ -from pandas import ( - Index, - NaT, - Timedelta, - TimedeltaIndex, -) -import pandas._testing as tm - - -class TestFillNA: - def test_fillna_timedelta(self): - # GH#11343 - idx = TimedeltaIndex(["1 day", NaT, "3 day"]) - - exp = TimedeltaIndex(["1 day", "2 day", "3 day"]) - tm.assert_index_equal(idx.fillna(Timedelta("2 day")), exp) - - exp = TimedeltaIndex(["1 day", "3 hour", "3 day"]) - idx.fillna(Timedelta("3 hour")) - - exp = Index([Timedelta("1 day"), "x", Timedelta("3 day")], dtype=object) - tm.assert_index_equal(idx.fillna("x"), exp) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/cachecontrol/compat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/cachecontrol/compat.py deleted file mode 100644 index ccec9379dba2b03015ce123dd04a042f32431235..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/cachecontrol/compat.py +++ /dev/null @@ -1,32 +0,0 @@ -# SPDX-FileCopyrightText: 2015 Eric Larson -# -# SPDX-License-Identifier: Apache-2.0 - -try: - from urllib.parse import urljoin -except ImportError: - from urlparse import urljoin - - -try: - import cPickle as pickle -except ImportError: - import pickle - -# Handle the case where the requests module has been patched to not have -# urllib3 bundled as part of its source. -try: - from pip._vendor.requests.packages.urllib3.response import HTTPResponse -except ImportError: - from pip._vendor.urllib3.response import HTTPResponse - -try: - from pip._vendor.requests.packages.urllib3.util import is_fp_closed -except ImportError: - from pip._vendor.urllib3.util import is_fp_closed - -# Replicate some six behaviour -try: - text_type = unicode -except NameError: - text_type = str diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pyparsing/diagram/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 895b97b80407cd6a371d8376ad3f5d38f7e5b694..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,593 +0,0 @@ -import railroad -from pip._vendor import pyparsing -from pip._vendor.pkg_resources import resource_filename -from typing import ( - List, - Optional, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - -with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp: - template = Template(fp.read()) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label)) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: str = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - ) -> Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, -) -> Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tenacity/wait.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tenacity/wait.py deleted file mode 100644 index 6ed97a7bcdc0d0d0e13f5e9a5a38996a24a3b642..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tenacity/wait.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2016–2021 Julien Danjou -# Copyright 2016 Joshua Harlow -# Copyright 2013-2014 Ray Holder -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import random -import typing - -from pip._vendor.tenacity import _utils - -if typing.TYPE_CHECKING: - from pip._vendor.tenacity import RetryCallState - - -class wait_base(abc.ABC): - """Abstract base class for wait strategies.""" - - @abc.abstractmethod - def __call__(self, retry_state: "RetryCallState") -> float: - pass - - def __add__(self, other: "wait_base") -> "wait_combine": - return wait_combine(self, other) - - def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]: - # make it possible to use multiple waits with the built-in sum function - if other == 0: - return self - return self.__add__(other) - - -class wait_fixed(wait_base): - """Wait strategy that waits a fixed amount of time between each retry.""" - - def __init__(self, wait: float) -> None: - self.wait_fixed = wait - - def __call__(self, retry_state: "RetryCallState") -> float: - return self.wait_fixed - - -class wait_none(wait_fixed): - """Wait strategy that doesn't wait at all before retrying.""" - - def __init__(self) -> None: - super().__init__(0) - - -class wait_random(wait_base): - """Wait strategy that waits a random amount of time between min/max.""" - - def __init__(self, min: typing.Union[int, float] = 0, max: typing.Union[int, float] = 1) -> None: # noqa - self.wait_random_min = min - self.wait_random_max = max - - def __call__(self, retry_state: "RetryCallState") -> float: - return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min)) - - -class wait_combine(wait_base): - """Combine several waiting strategies.""" - - def __init__(self, *strategies: wait_base) -> None: - self.wait_funcs = strategies - - def __call__(self, retry_state: "RetryCallState") -> float: - return sum(x(retry_state=retry_state) for x in self.wait_funcs) - - -class wait_chain(wait_base): - """Chain two or more waiting strategies. - - If all strategies are exhausted, the very last strategy is used - thereafter. - - For example:: - - @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] + - [wait_fixed(2) for j in range(5)] + - [wait_fixed(5) for k in range(4))) - def wait_chained(): - print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s - thereafter.") - """ - - def __init__(self, *strategies: wait_base) -> None: - self.strategies = strategies - - def __call__(self, retry_state: "RetryCallState") -> float: - wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies)) - wait_func = self.strategies[wait_func_no - 1] - return wait_func(retry_state=retry_state) - - -class wait_incrementing(wait_base): - """Wait an incremental amount of time after each attempt. - - Starting at a starting value and incrementing by a value for each attempt - (and restricting the upper limit to some maximum value). - """ - - def __init__( - self, - start: typing.Union[int, float] = 0, - increment: typing.Union[int, float] = 100, - max: typing.Union[int, float] = _utils.MAX_WAIT, # noqa - ) -> None: - self.start = start - self.increment = increment - self.max = max - - def __call__(self, retry_state: "RetryCallState") -> float: - result = self.start + (self.increment * (retry_state.attempt_number - 1)) - return max(0, min(result, self.max)) - - -class wait_exponential(wait_base): - """Wait strategy that applies exponential backoff. - - It allows for a customized multiplier and an ability to restrict the - upper and lower limits to some maximum and minimum value. - - The intervals are fixed (i.e. there is no jitter), so this strategy is - suitable for balancing retries against latency when a required resource is - unavailable for an unknown duration, but *not* suitable for resolving - contention between multiple processes for a shared resource. Use - wait_random_exponential for the latter case. - """ - - def __init__( - self, - multiplier: typing.Union[int, float] = 1, - max: typing.Union[int, float] = _utils.MAX_WAIT, # noqa - exp_base: typing.Union[int, float] = 2, - min: typing.Union[int, float] = 0, # noqa - ) -> None: - self.multiplier = multiplier - self.min = min - self.max = max - self.exp_base = exp_base - - def __call__(self, retry_state: "RetryCallState") -> float: - try: - exp = self.exp_base ** (retry_state.attempt_number - 1) - result = self.multiplier * exp - except OverflowError: - return self.max - return max(max(0, self.min), min(result, self.max)) - - -class wait_random_exponential(wait_exponential): - """Random wait with exponentially widening window. - - An exponential backoff strategy used to mediate contention between multiple - uncoordinated processes for a shared resource in distributed systems. This - is the sense in which "exponential backoff" is meant in e.g. Ethernet - networking, and corresponds to the "Full Jitter" algorithm described in - this blog post: - - https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ - - Each retry occurs at a random time in a geometrically expanding interval. - It allows for a custom multiplier and an ability to restrict the upper - limit of the random interval to some maximum value. - - Example:: - - wait_random_exponential(multiplier=0.5, # initial window 0.5s - max=60) # max 60s timeout - - When waiting for an unavailable resource to become available again, as - opposed to trying to resolve contention for a shared resource, the - wait_exponential strategy (which uses a fixed interval) may be preferable. - - """ - - def __call__(self, retry_state: "RetryCallState") -> float: - high = super().__call__(retry_state=retry_state) - return random.uniform(0, high) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/connectionpool.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/connectionpool.py deleted file mode 100644 index 15bffcb23a902baa14f2332fddc83a416cc783b1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/connectionpool.py +++ /dev/null @@ -1,1108 +0,0 @@ -from __future__ import absolute_import - -import errno -import logging -import re -import socket -import sys -import warnings -from socket import error as SocketError -from socket import timeout as SocketTimeout - -from .connection import ( - BaseSSLError, - BrokenPipeError, - DummyConnection, - HTTPConnection, - HTTPException, - HTTPSConnection, - VerifiedHTTPSConnection, - port_by_scheme, -) -from .exceptions import ( - ClosedPoolError, - EmptyPoolError, - HeaderParsingError, - HostChangedError, - InsecureRequestWarning, - LocationValueError, - MaxRetryError, - NewConnectionError, - ProtocolError, - ProxyError, - ReadTimeoutError, - SSLError, - TimeoutError, -) -from .packages import six -from .packages.six.moves import queue -from .request import RequestMethods -from .response import HTTPResponse -from .util.connection import is_connection_dropped -from .util.proxy import connection_requires_http_tunnel -from .util.queue import LifoQueue -from .util.request import set_file_position -from .util.response import assert_header_parsing -from .util.retry import Retry -from .util.ssl_match_hostname import CertificateError -from .util.timeout import Timeout -from .util.url import Url, _encode_target -from .util.url import _normalize_host as normalize_host -from .util.url import get_host, parse_url - -xrange = six.moves.xrange - -log = logging.getLogger(__name__) - -_Default = object() - - -# Pool objects -class ConnectionPool(object): - """ - Base class for all connection pools, such as - :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. - - .. note:: - ConnectionPool.urlopen() does not normalize or percent-encode target URIs - which is useful if your target server doesn't support percent-encoded - target URIs. - """ - - scheme = None - QueueCls = LifoQueue - - def __init__(self, host, port=None): - if not host: - raise LocationValueError("No host specified.") - - self.host = _normalize_host(host, scheme=self.scheme) - self._proxy_host = host.lower() - self.port = port - - def __str__(self): - return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - # Return False to re-raise any potential exceptions - return False - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - pass - - -# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 -_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} - - -class HTTPConnectionPool(ConnectionPool, RequestMethods): - """ - Thread-safe connection pool for one host. - - :param host: - Host used for this HTTP Connection (e.g. "localhost"), passed into - :class:`http.client.HTTPConnection`. - - :param port: - Port used for this HTTP Connection (None is equivalent to 80), passed - into :class:`http.client.HTTPConnection`. - - :param strict: - Causes BadStatusLine to be raised if the status line can't be parsed - as a valid HTTP/1.0 or 1.1 status line, passed into - :class:`http.client.HTTPConnection`. - - .. note:: - Only works in Python 2. This parameter is ignored in Python 3. - - :param timeout: - Socket timeout in seconds for each individual connection. This can - be a float or integer, which sets the timeout for the HTTP request, - or an instance of :class:`urllib3.util.Timeout` which gives you more - fine-grained control over request timeouts. After the constructor has - been parsed, this is always a `urllib3.util.Timeout` object. - - :param maxsize: - Number of connections to save that can be reused. More than 1 is useful - in multithreaded situations. If ``block`` is set to False, more - connections will be created but they will not be saved once they've - been used. - - :param block: - If set to True, no more than ``maxsize`` connections will be used at - a time. When no free connections are available, the call will block - until a connection has been released. This is a useful side effect for - particular multithreaded situations where one does not want to use more - than maxsize connections per host to prevent flooding. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param retries: - Retry configuration to use by default with requests in this pool. - - :param _proxy: - Parsed proxy URL, should not be used directly, instead, see - :class:`urllib3.ProxyManager` - - :param _proxy_headers: - A dictionary with proxy headers, should not be used directly, - instead, see :class:`urllib3.ProxyManager` - - :param \\**conn_kw: - Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, - :class:`urllib3.connection.HTTPSConnection` instances. - """ - - scheme = "http" - ConnectionCls = HTTPConnection - ResponseCls = HTTPResponse - - def __init__( - self, - host, - port=None, - strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, - maxsize=1, - block=False, - headers=None, - retries=None, - _proxy=None, - _proxy_headers=None, - _proxy_config=None, - **conn_kw - ): - ConnectionPool.__init__(self, host, port) - RequestMethods.__init__(self, headers) - - self.strict = strict - - if not isinstance(timeout, Timeout): - timeout = Timeout.from_float(timeout) - - if retries is None: - retries = Retry.DEFAULT - - self.timeout = timeout - self.retries = retries - - self.pool = self.QueueCls(maxsize) - self.block = block - - self.proxy = _proxy - self.proxy_headers = _proxy_headers or {} - self.proxy_config = _proxy_config - - # Fill the queue up so that doing get() on it will block properly - for _ in xrange(maxsize): - self.pool.put(None) - - # These are mostly for testing and debugging purposes. - self.num_connections = 0 - self.num_requests = 0 - self.conn_kw = conn_kw - - if self.proxy: - # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. - # We cannot know if the user has added default socket options, so we cannot replace the - # list. - self.conn_kw.setdefault("socket_options", []) - - self.conn_kw["proxy"] = self.proxy - self.conn_kw["proxy_config"] = self.proxy_config - - def _new_conn(self): - """ - Return a fresh :class:`HTTPConnection`. - """ - self.num_connections += 1 - log.debug( - "Starting new HTTP connection (%d): %s:%s", - self.num_connections, - self.host, - self.port or "80", - ) - - conn = self.ConnectionCls( - host=self.host, - port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, - **self.conn_kw - ) - return conn - - def _get_conn(self, timeout=None): - """ - Get a connection. Will return a pooled connection if one is available. - - If no connections are available and :prop:`.block` is ``False``, then a - fresh connection is returned. - - :param timeout: - Seconds to wait before giving up and raising - :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and - :prop:`.block` is ``True``. - """ - conn = None - try: - conn = self.pool.get(block=self.block, timeout=timeout) - - except AttributeError: # self.pool is None - raise ClosedPoolError(self, "Pool is closed.") - - except queue.Empty: - if self.block: - raise EmptyPoolError( - self, - "Pool reached maximum size and no more connections are allowed.", - ) - pass # Oh well, we'll create a new connection then - - # If this is a persistent connection, check if it got disconnected - if conn and is_connection_dropped(conn): - log.debug("Resetting dropped connection: %s", self.host) - conn.close() - if getattr(conn, "auto_open", 1) == 0: - # This is a proxied connection that has been mutated by - # http.client._tunnel() and cannot be reused (since it would - # attempt to bypass the proxy) - conn = None - - return conn or self._new_conn() - - def _put_conn(self, conn): - """ - Put a connection back into the pool. - - :param conn: - Connection object for the current host and port as returned by - :meth:`._new_conn` or :meth:`._get_conn`. - - If the pool is already full, the connection is closed and discarded - because we exceeded maxsize. If connections are discarded frequently, - then maxsize should be increased. - - If the pool is closed, then the connection will be closed and discarded. - """ - try: - self.pool.put(conn, block=False) - return # Everything is dandy, done. - except AttributeError: - # self.pool is None. - pass - except queue.Full: - # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s. Connection pool size: %s", - self.host, - self.pool.qsize(), - ) - # Connection never got put back into the pool, close it. - if conn: - conn.close() - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - pass - - def _prepare_proxy(self, conn): - # Nothing to do for HTTP connections. - pass - - def _get_timeout(self, timeout): - """Helper that always returns a :class:`urllib3.util.Timeout`""" - if timeout is _Default: - return self.timeout.clone() - - if isinstance(timeout, Timeout): - return timeout.clone() - else: - # User passed us an int/float. This is for backwards compatibility, - # can be removed later - return Timeout.from_float(timeout) - - def _raise_timeout(self, err, url, timeout_value): - """Is the error actually a timeout? Will raise a ReadTimeout or pass""" - - if isinstance(err, SocketTimeout): - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % timeout_value - ) - - # See the above comment about EAGAIN in Python 3. In Python 2 we have - # to specifically catch it and throw the timeout error - if hasattr(err, "errno") and err.errno in _blocking_errnos: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % timeout_value - ) - - # Catch possible read timeouts thrown as SSL errors. If not the - # case, rethrow the original. We need to do this because of: - # http://bugs.python.org/issue10272 - if "timed out" in str(err) or "did not complete (read)" in str( - err - ): # Python < 2.7.4 - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % timeout_value - ) - - def _make_request( - self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw - ): - """ - Perform a request on a given urllib connection object taken from our - pool. - - :param conn: - a connection from one of our connection pools - - :param timeout: - Socket timeout in seconds for the request. This can be a - float or integer, which will set the same timeout value for - the socket connect and the socket read, or an instance of - :class:`urllib3.util.Timeout`, which gives you more fine-grained - control over your timeouts. - """ - self.num_requests += 1 - - timeout_obj = self._get_timeout(timeout) - timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout - - # Trigger any extra validation we need to do. - try: - self._validate_conn(conn) - except (SocketTimeout, BaseSSLError) as e: - # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. - self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) - raise - - # conn.request() calls http.client.*.request, not the method in - # urllib3.request. It also calls makefile (recv) on the socket. - try: - if chunked: - conn.request_chunked(method, url, **httplib_request_kw) - else: - conn.request(method, url, **httplib_request_kw) - - # We are swallowing BrokenPipeError (errno.EPIPE) since the server is - # legitimately able to close the connection after sending a valid response. - # With this behaviour, the received response is still readable. - except BrokenPipeError: - # Python 3 - pass - except IOError as e: - # Python 2 and macOS/Linux - # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS - # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ - if e.errno not in { - errno.EPIPE, - errno.ESHUTDOWN, - errno.EPROTOTYPE, - }: - raise - - # Reset the timeout for the recv() on the socket - read_timeout = timeout_obj.read_timeout - - # App Engine doesn't have a sock attr - if getattr(conn, "sock", None): - # In Python 3 socket.py will catch EAGAIN and return None when you - # try and read into the file pointer created by http.client, which - # instead raises a BadStatusLine exception. Instead of catching - # the exception and assuming all BadStatusLine exceptions are read - # timeouts, check for a zero timeout before making the request. - if read_timeout == 0: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout - ) - if read_timeout is Timeout.DEFAULT_TIMEOUT: - conn.sock.settimeout(socket.getdefaulttimeout()) - else: # None or a value - conn.sock.settimeout(read_timeout) - - # Receive the response from the server - try: - try: - # Python 2.7, use buffering of HTTP responses - httplib_response = conn.getresponse(buffering=True) - except TypeError: - # Python 3 - try: - httplib_response = conn.getresponse() - except BaseException as e: - # Remove the TypeError from the exception chain in - # Python 3 (including for exceptions like SystemExit). - # Otherwise it looks like a bug in the code. - six.raise_from(e, None) - except (SocketTimeout, BaseSSLError, SocketError) as e: - self._raise_timeout(err=e, url=url, timeout_value=read_timeout) - raise - - # AppEngine doesn't have a version attr. - http_version = getattr(conn, "_http_vsn_str", "HTTP/?") - log.debug( - '%s://%s:%s "%s %s %s" %s %s', - self.scheme, - self.host, - self.port, - method, - url, - http_version, - httplib_response.status, - httplib_response.length, - ) - - try: - assert_header_parsing(httplib_response.msg) - except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 - log.warning( - "Failed to parse headers (url=%s): %s", - self._absolute_url(url), - hpe, - exc_info=True, - ) - - return httplib_response - - def _absolute_url(self, path): - return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - if self.pool is None: - return - # Disable access to the pool - old_pool, self.pool = self.pool, None - - try: - while True: - conn = old_pool.get(block=False) - if conn: - conn.close() - - except queue.Empty: - pass # Done. - - def is_same_host(self, url): - """ - Check if the given ``url`` is a member of the same host as this - connection pool. - """ - if url.startswith("/"): - return True - - # TODO: Add optional support for socket.gethostbyname checking. - scheme, host, port = get_host(url) - if host is not None: - host = _normalize_host(host, scheme=scheme) - - # Use explicit default port for comparison when none is given - if self.port and not port: - port = port_by_scheme.get(scheme) - elif not self.port and port == port_by_scheme.get(scheme): - port = None - - return (scheme, host, port) == (self.scheme, self.host, self.port) - - def urlopen( - self, - method, - url, - body=None, - headers=None, - retries=None, - redirect=True, - assert_same_host=True, - timeout=_Default, - pool_timeout=None, - release_conn=None, - chunked=False, - body_pos=None, - **response_kw - ): - """ - Get a connection from the pool and perform an HTTP request. This is the - lowest level call for making a request, so you'll need to specify all - the raw details. - - .. note:: - - More commonly, it's appropriate to use a convenience method provided - by :class:`.RequestMethods`, such as :meth:`request`. - - .. note:: - - `release_conn` will only behave as expected if - `preload_content=False` because we want to make - `preload_content=False` the default behaviour someday soon without - breaking backwards compatibility. - - :param method: - HTTP request method (such as GET, POST, PUT, etc.) - - :param url: - The URL to perform the request on. - - :param body: - Data to send in the request body, either :class:`str`, :class:`bytes`, - an iterable of :class:`str`/:class:`bytes`, or a file-like object. - - :param headers: - Dictionary of custom headers to send, such as User-Agent, - If-None-Match, etc. If None, pool headers are used. If provided, - these headers completely replace any pool-specific headers. - - :param retries: - Configure the number of retries to allow before raising a - :class:`~urllib3.exceptions.MaxRetryError` exception. - - Pass ``None`` to retry until you receive a response. Pass a - :class:`~urllib3.util.retry.Retry` object for fine-grained control - over different types of retries. - Pass an integer number to retry connection errors that many times, - but no other types of errors. Pass zero to never retry. - - If ``False``, then retries are disabled and any exception is raised - immediately. Also, instead of raising a MaxRetryError on redirects, - the redirect response will be returned. - - :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. - - :param redirect: - If True, automatically handle redirects (status codes 301, 302, - 303, 307, 308). Each redirect counts as a retry. Disabling retries - will disable redirect, too. - - :param assert_same_host: - If ``True``, will make sure that the host of the pool requests is - consistent else will raise HostChangedError. When ``False``, you can - use the pool on an HTTP proxy and request foreign hosts. - - :param timeout: - If specified, overrides the default timeout for this one - request. It may be a float (in seconds) or an instance of - :class:`urllib3.util.Timeout`. - - :param pool_timeout: - If set and the pool is set to block=True, then this method will - block for ``pool_timeout`` seconds and raise EmptyPoolError if no - connection is available within the time period. - - :param release_conn: - If False, then the urlopen call will not release the connection - back into the pool once a response is received (but will release if - you read the entire contents of the response such as when - `preload_content=True`). This is useful if you're not preloading - the response's content immediately. You will need to call - ``r.release_conn()`` on the response ``r`` to return the connection - back into the pool. If None, it takes the value of - ``response_kw.get('preload_content', True)``. - - :param chunked: - If True, urllib3 will send the body using chunked transfer - encoding. Otherwise, urllib3 will send the body using the standard - content-length form. Defaults to False. - - :param int body_pos: - Position to seek to in file-like body in the event of a retry or - redirect. Typically this won't need to be set because urllib3 will - auto-populate the value when needed. - - :param \\**response_kw: - Additional parameters are passed to - :meth:`urllib3.response.HTTPResponse.from_httplib` - """ - - parsed_url = parse_url(url) - destination_scheme = parsed_url.scheme - - if headers is None: - headers = self.headers - - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect, default=self.retries) - - if release_conn is None: - release_conn = response_kw.get("preload_content", True) - - # Check host - if assert_same_host and not self.is_same_host(url): - raise HostChangedError(self, url, retries) - - # Ensure that the URL we're connecting to is properly encoded - if url.startswith("/"): - url = six.ensure_str(_encode_target(url)) - else: - url = six.ensure_str(parsed_url.url) - - conn = None - - # Track whether `conn` needs to be released before - # returning/raising/recursing. Update this variable if necessary, and - # leave `release_conn` constant throughout the function. That way, if - # the function recurses, the original value of `release_conn` will be - # passed down into the recursive call, and its value will be respected. - # - # See issue #651 [1] for details. - # - # [1] - release_this_conn = release_conn - - http_tunnel_required = connection_requires_http_tunnel( - self.proxy, self.proxy_config, destination_scheme - ) - - # Merge the proxy headers. Only done when not using HTTP CONNECT. We - # have to copy the headers dict so we can safely change it without those - # changes being reflected in anyone else's copy. - if not http_tunnel_required: - headers = headers.copy() - headers.update(self.proxy_headers) - - # Must keep the exception bound to a separate variable or else Python 3 - # complains about UnboundLocalError. - err = None - - # Keep track of whether we cleanly exited the except block. This - # ensures we do proper cleanup in finally. - clean_exit = False - - # Rewind body position, if needed. Record current position - # for future rewinds in the event of a redirect/retry. - body_pos = set_file_position(body, body_pos) - - try: - # Request a connection from the queue. - timeout_obj = self._get_timeout(timeout) - conn = self._get_conn(timeout=pool_timeout) - - conn.timeout = timeout_obj.connect_timeout - - is_new_proxy_conn = self.proxy is not None and not getattr( - conn, "sock", None - ) - if is_new_proxy_conn and http_tunnel_required: - self._prepare_proxy(conn) - - # Make the request on the httplib connection object. - httplib_response = self._make_request( - conn, - method, - url, - timeout=timeout_obj, - body=body, - headers=headers, - chunked=chunked, - ) - - # If we're going to release the connection in ``finally:``, then - # the response doesn't need to know about the connection. Otherwise - # it will also try to release it and we'll have a double-release - # mess. - response_conn = conn if not release_conn else None - - # Pass method to Response for length checking - response_kw["request_method"] = method - - # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib( - httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw - ) - - # Everything went great! - clean_exit = True - - except EmptyPoolError: - # Didn't get a connection from the pool, no need to clean up - clean_exit = True - release_this_conn = False - raise - - except ( - TimeoutError, - HTTPException, - SocketError, - ProtocolError, - BaseSSLError, - SSLError, - CertificateError, - ) as e: - # Discard the connection for these exceptions. It will be - # replaced during the next _get_conn() call. - clean_exit = False - - def _is_ssl_error_message_from_http_proxy(ssl_error): - # We're trying to detect the message 'WRONG_VERSION_NUMBER' but - # SSLErrors are kinda all over the place when it comes to the message, - # so we try to cover our bases here! - message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) - return ( - "wrong version number" in message or "unknown protocol" in message - ) - - # Try to detect a common user error with proxies which is to - # set an HTTP proxy to be HTTPS when it should be 'http://' - # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) - # Instead we add a nice error message and point to a URL. - if ( - isinstance(e, BaseSSLError) - and self.proxy - and _is_ssl_error_message_from_http_proxy(e) - ): - e = ProxyError( - "Your proxy appears to only use HTTP and not HTTPS, " - "try changing your proxy URL to be HTTP. See: " - "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" - "#https-proxy-error-http-proxy", - SSLError(e), - ) - elif isinstance(e, (BaseSSLError, CertificateError)): - e = SSLError(e) - elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError("Cannot connect to proxy.", e) - elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError("Connection aborted.", e) - - retries = retries.increment( - method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] - ) - retries.sleep() - - # Keep track of the error for the retry warning. - err = e - - finally: - if not clean_exit: - # We hit some kind of exception, handled or otherwise. We need - # to throw the connection away unless explicitly told not to. - # Close the connection, set the variable to None, and make sure - # we put the None back in the pool to avoid leaking it. - conn = conn and conn.close() - release_this_conn = True - - if release_this_conn: - # Put the connection back to be reused. If the connection is - # expired then it will be None, which will get replaced with a - # fresh connection during _get_conn. - self._put_conn(conn) - - if not conn: - # Try again - log.warning( - "Retrying (%r) after connection broken by '%r': %s", retries, err, url - ) - return self.urlopen( - method, - url, - body, - headers, - retries, - redirect, - assert_same_host, - timeout=timeout, - pool_timeout=pool_timeout, - release_conn=release_conn, - chunked=chunked, - body_pos=body_pos, - **response_kw - ) - - # Handle redirect? - redirect_location = redirect and response.get_redirect_location() - if redirect_location: - if response.status == 303: - method = "GET" - - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - response.drain_conn() - raise - return response - - response.drain_conn() - retries.sleep_for_retry(response) - log.debug("Redirecting %s -> %s", url, redirect_location) - return self.urlopen( - method, - redirect_location, - body, - headers, - retries=retries, - redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, - pool_timeout=pool_timeout, - release_conn=release_conn, - chunked=chunked, - body_pos=body_pos, - **response_kw - ) - - # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader("Retry-After")) - if retries.is_retry(method, response.status, has_retry_after): - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_status: - response.drain_conn() - raise - return response - - response.drain_conn() - retries.sleep(response) - log.debug("Retry: %s", url) - return self.urlopen( - method, - url, - body, - headers, - retries=retries, - redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, - pool_timeout=pool_timeout, - release_conn=release_conn, - chunked=chunked, - body_pos=body_pos, - **response_kw - ) - - return response - - -class HTTPSConnectionPool(HTTPConnectionPool): - """ - Same as :class:`.HTTPConnectionPool`, but HTTPS. - - :class:`.HTTPSConnection` uses one of ``assert_fingerprint``, - ``assert_hostname`` and ``host`` in this order to verify connections. - If ``assert_hostname`` is False, no verification is done. - - The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` - is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade - the connection socket into an SSL socket. - """ - - scheme = "https" - ConnectionCls = HTTPSConnection - - def __init__( - self, - host, - port=None, - strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, - maxsize=1, - block=False, - headers=None, - retries=None, - _proxy=None, - _proxy_headers=None, - key_file=None, - cert_file=None, - cert_reqs=None, - key_password=None, - ca_certs=None, - ssl_version=None, - assert_hostname=None, - assert_fingerprint=None, - ca_cert_dir=None, - **conn_kw - ): - - HTTPConnectionPool.__init__( - self, - host, - port, - strict, - timeout, - maxsize, - block, - headers, - retries, - _proxy, - _proxy_headers, - **conn_kw - ) - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.key_password = key_password - self.ca_certs = ca_certs - self.ca_cert_dir = ca_cert_dir - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - - def _prepare_conn(self, conn): - """ - Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` - and establish the tunnel if proxy is used. - """ - - if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert( - key_file=self.key_file, - key_password=self.key_password, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint, - ) - conn.ssl_version = self.ssl_version - return conn - - def _prepare_proxy(self, conn): - """ - Establishes a tunnel connection through HTTP CONNECT. - - Tunnel connection is established early because otherwise httplib would - improperly set Host: header to proxy's IP:port. - """ - - conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) - - if self.proxy.scheme == "https": - conn.tls_in_tls_required = True - - conn.connect() - - def _new_conn(self): - """ - Return a fresh :class:`http.client.HTTPSConnection`. - """ - self.num_connections += 1 - log.debug( - "Starting new HTTPS connection (%d): %s:%s", - self.num_connections, - self.host, - self.port or "443", - ) - - if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError( - "Can't connect to HTTPS URL because the SSL module is not available." - ) - - actual_host = self.host - actual_port = self.port - if self.proxy is not None: - actual_host = self.proxy.host - actual_port = self.proxy.port - - conn = self.ConnectionCls( - host=actual_host, - port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, - cert_file=self.cert_file, - key_file=self.key_file, - key_password=self.key_password, - **self.conn_kw - ) - - return self._prepare_conn(conn) - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - super(HTTPSConnectionPool, self)._validate_conn(conn) - - # Force connect early to allow us to validate the connection. - if not getattr(conn, "sock", None): # AppEngine might not have `.sock` - conn.connect() - - if not conn.is_verified: - warnings.warn( - ( - "Unverified HTTPS request is being made to host '%s'. " - "Adding certificate verification is strongly advised. See: " - "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" - "#ssl-warnings" % conn.host - ), - InsecureRequestWarning, - ) - - if getattr(conn, "proxy_is_verified", None) is False: - warnings.warn( - ( - "Unverified HTTPS connection done to an HTTPS proxy. " - "Adding certificate verification is strongly advised. See: " - "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" - "#ssl-warnings" - ), - InsecureRequestWarning, - ) - - -def connection_from_url(url, **kw): - """ - Given a url, return an :class:`.ConnectionPool` instance of its host. - - This is a shortcut for not having to parse out the scheme, host, and port - of the url before creating an :class:`.ConnectionPool` instance. - - :param url: - Absolute URL string that must include the scheme. Port is optional. - - :param \\**kw: - Passes additional parameters to the constructor of the appropriate - :class:`.ConnectionPool`. Useful for specifying things like - timeout, maxsize, headers, etc. - - Example:: - - >>> conn = connection_from_url('http://google.com/') - >>> r = conn.request('GET', '/') - """ - scheme, host, port = get_host(url) - port = port or port_by_scheme.get(scheme, 80) - if scheme == "https": - return HTTPSConnectionPool(host, port=port, **kw) - else: - return HTTPConnectionPool(host, port=port, **kw) - - -def _normalize_host(host, scheme): - """ - Normalize hosts for comparisons and use with sockets. - """ - - host = normalize_host(host, scheme) - - # httplib doesn't like it when we include brackets in IPv6 addresses - # Specifically, if we include brackets but also pass the port then - # httplib crazily doubles up the square brackets on the Host header. - # Instead, we need to make sure we never pass ``None`` as the port. - # However, for backward compatibility reasons we can't actually - # *assert* that. See http://bugs.python.org/issue28539 - if host.startswith("[") and host.endswith("]"): - host = host[1:-1] - return host diff --git a/spaces/propilot/seo-powered-by-ia/cases/monitoring.py b/spaces/propilot/seo-powered-by-ia/cases/monitoring.py deleted file mode 100644 index 4ed8e1998efa2d498aa736b800935c76735228da..0000000000000000000000000000000000000000 --- a/spaces/propilot/seo-powered-by-ia/cases/monitoring.py +++ /dev/null @@ -1,12 +0,0 @@ -import openai -from os import getenv - -openai.api_base = "https://oai.hconeai.com/v1" -HELICONE_API_KEY = getenv("HELICONE_API_KEY") - -HEADERS = { - "Helicone-Auth": f"Bearer {HELICONE_API_KEY}" if HELICONE_API_KEY else "", - "Helicone-Cache-Enabled": "true", - "Helicone-Property-App": "HuggingFace", - "Helicone-Property-DataSource": "SEO Powered by AI", -} diff --git a/spaces/pyInter/Liyuu_sovits4/vdecoder/hifigan/utils.py b/spaces/pyInter/Liyuu_sovits4/vdecoder/hifigan/utils.py deleted file mode 100644 index 9c93c996d3cc73c30d71c1fc47056e4230f35c0f..0000000000000000000000000000000000000000 --- a/spaces/pyInter/Liyuu_sovits4/vdecoder/hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -# matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/pyodide-demo/self-hosted/numpy.js b/spaces/pyodide-demo/self-hosted/numpy.js deleted file mode 100644 index 563c0b2864f85f205756facc04040506abadcedd..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/numpy.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="numpy.data";var REMOTE_PACKAGE_BASE="numpy.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","numpy",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","compat",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","core",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/core","include",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/core/include","numpy",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/core/include/numpy","libdivide",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/core/include/numpy","random",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/core","lib",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","distutils",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/distutils","command",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/distutils","fcompiler",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/distutils","mingw",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/distutils","checks",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","doc",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","f2py",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/f2py","src",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","fft",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","lib",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","linalg",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","ma",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","matrixlib",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","polynomial",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","random",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/random","_examples",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/random/_examples","cffi",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/random/_examples","cython",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/random/_examples","numba",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/random","lib",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","testing",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy/testing","_private",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/numpy","typing",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","numpy-1.21.4-py3.9.egg-info",true,true);Module["FS_createPath"]("/","bin",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:7419419,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1352,2845,4245,5379,6836,8025,9522,10935,12331,13662,15128,16633,17552,18713,19907,21174,22630,23973,25183,26507,27830,29074,30452,31610,32766,33816,34904,36238,37549,38757,39822,40916,42059,43130,44299,45348,46426,47297,48126,48820,49851,51044,51648,52138,52737,53630,54475,55486,56210,56816,57214,57723,58720,59629,60531,61346,62043,62869,63339,63980,64564,65133,65701,66318,66880,67480,68068,68658,69245,69854,70473,70940,71411,71934,72579,73089,73588,74110,74561,75471,76219,77e3,77744,78669,79658,80398,81086,82028,82988,84066,85096,86131,86907,87774,88683,89954,90877,91720,92906,93921,94766,95363,96184,96978,97977,98893,99622,100499,101632,102371,103321,103784,104886,106030,106872,108048,108988,109859,110774,111334,112209,113105,113949,114940,115635,116567,117650,118403,119216,119948,120936,122422,123703,125054,126188,127134,128342,129649,130533,131597,132439,133744,135002,136233,137350,138506,139769,140685,141913,143046,144223,145366,146507,147722,149066,150434,151723,153097,154477,155849,157304,158761,159979,161308,162500,163897,165248,166610,167869,169279,170678,171966,173341,174516,175808,177060,178435,179826,181223,182566,183912,185193,186503,187817,189067,190390,191665,192955,194228,195555,197003,198476,199849,201259,202595,203984,205399,206787,208080,209480,210775,212044,213353,214634,216017,217034,218502,219835,221280,222496,223915,225362,226689,228241,229430,230732,231927,233008,233779,234584,235594,236955,238032,239347,240320,241431,242730,243699,244954,246177,247518,248659,249737,251173,252320,253624,254670,255466,256743,257979,259265,260618,261740,263130,264474,265867,267174,268652,269977,271384,272718,274164,275573,276907,278203,279655,280619,281410,282725,284073,285238,286300,287479,288806,290002,291279,292690,293647,294436,295814,297144,298064,299211,300379,301711,303075,304189,305335,306475,307800,309035,310294,311667,312990,313978,315453,316739,318063,319466,320674,322027,323321,324611,325957,327303,328243,329274,330639,331978,333334,334648,336084,337376,338757,339920,341134,342242,343573,344767,346045,347108,348249,349324,350719,351945,353111,353584,354303,354656,355292,356709,358009,358664,359651,360821,361937,363068,364314,365444,366147,366771,367824,369030,370158,371233,372409,373598,374762,375794,376889,378018,379495,380738,381937,382444,383555,384365,385073,385891,386654,387664,388925,389973,391259,392712,394024,395082,396289,397455,398611,399827,401021,402366,403488,404808,405980,407048,408093,409059,410439,411640,412797,414015,415188,416436,417840,419258,420612,422113,423170,424010,425276,426448,427828,429141,430383,431587,432753,433940,434827,436013,437318,438240,439356,440387,441370,442572,443148,443963,444478,445075,445588,446324,446970,447780,449081,450159,451662,452956,454383,455658,456909,458016,458980,460448,461835,463076,464305,465460,466832,468203,469677,470982,472321,473763,475084,476401,477424,478773,480099,481304,482453,483573,484871,486241,487666,488939,490335,491712,492995,494320,495725,497126,498568,499695,501043,502261,503503,504837,506231,507588,508824,509823,511247,512580,513932,515265,516706,517911,519286,520660,521919,523125,524514,525839,526963,528214,529465,530764,531978,533437,534727,536051,537272,538492,539724,540770,541549,542874,544088,545290,546616,547873,549081,549787,551115,552238,553302,554277,555603,556841,557990,559140,560307,561435,562741,564026,565344,566560,567761,569041,570133,571445,572556,573937,575296,576566,577815,579028,580258,581549,582862,584094,585356,586743,588161,589498,590677,592001,593038,594272,595498,596707,597947,599297,600589,601681,602890,603896,605163,606458,607676,608646,609964,611250,612612,613931,615221,616518,617845,618958,620356,621380,622669,623921,625117,626299,627564,628946,630260,631520,632598,633795,634868,636079,637540,638803,640066,641135,642336,643668,644999,646372,647746,649041,650333,651606,652832,654241,655456,656782,658190,659230,660410,661784,663019,664258,665422,666733,668022,669408,670832,672047,673383,674279,675150,676513,677806,678933,680113,681290,682564,683755,684631,685545,686540,687824,688816,690060,691214,692630,693883,694939,696093,697143,698592,699793,701303,702588,703887,705164,706721,707565,708834,710224,711130,712117,713584,715138,716499,717966,718816,719139,719844,721110,722749,724415,725971,727550,728953,730114,731505,732940,734382,735763,737252,738569,739426,739863,741117,742317,743696,745034,746225,747606,748845,750228,751551,752543,753419,754237,755682,757019,758173,759201,760612,762036,763414,764618,765788,766606,767852,769002,769840,770570,771699,772950,773986,775052,776064,776898,777863,778844,780146,781209,782237,783638,785026,786560,787993,789399,791310,793305,794534,795596,796658,797719,798779,799838,800901,801974,803040,804183,805233,806282,807443,808508,809513,810765,811837,812896,813956,815183,816280,817352,818410,819468,820515,821589,822654,823774,824894,826102,827290,828233,829335,830388,831528,832702,834183,835736,836529,837374,838206,839e3,840249,841805,843300,844149,845066,845766,846459,846784,847168,847477,847805,848126,848479,849066,849819,850331,851070,851806,852696,853819,855115,856709,857970,859313,860343,861717,862996,863928,865293,866572,867554,868803,870077,871164,872122,872780,874128,874616,875844,877313,877865,879171,880478,880999,882133,883610,884489,885677,886684,886978,888172,889726,891326,892774,894266,895870,897303,898874,900381,901926,903324,904863,906338,907828,908954,909345,910246,911795,912724,913964,915215,916561,918101,919512,921030,922545,924078,925512,926855,928211,929277,930862,932374,933651,935009,936333,937816,939153,940562,942020,943489,944934,946227,947648,949267,950550,951597,952344,953957,955500,956997,958465,959893,961327,962888,964343,965692,967167,968567,969874,971018,972517,973956,975178,976660,977859,979199,980623,982119,983556,984822,986283,987237,988859,990160,991454,992900,994503,995829,997156,998730,1000217,1001498,1002953,1004476,1005962,1007325,1008856,1010293,1011721,1013105,1014785,1016333,1017368,1018531,1019958,1021155,1022433,1023952,1025541,1026978,1028502,1029902,1031135,1032780,1034283,1035591,1037048,1038632,1039902,1041191,1042652,1044199,1045323,1046729,1047763,1048638,1049700,1050745,1052115,1052992,1053976,1055214,1056276,1057452,1058661,1059799,1060984,1062180,1063276,1064446,1065597,1066760,1067849,1068816,1069788,1070634,1071733,1073227,1074388,1075615,1077080,1078081,1079511,1081020,1082390,1083419,1084717,1086184,1087809,1089465,1091058,1092346,1093733,1095235,1096624,1098207,1099837,1101263,1102724,1104155,1105551,1107153,1108730,1110285,1111744,1112741,1113562,1114180,1115084,1116141,1116700,1117320,1118099,1118714,1119639,1120605,1121841,1122628,1123438,1124405,1125287,1126287,1127254,1128217,1129168,1130130,1131092,1132053,1133028,1133977,1134940,1135599,1136223,1136837,1137613,1138288,1138929,1139608,1140364,1141070,1141825,1142609,1143313,1144016,1144794,1145531,1146167,1146890,1147594,1148246,1148928,1149639,1150467,1151116,1151855,1152600,1153397,1154188,1154882,1155425,1155907,1156513,1157126,1157616,1158190,1158713,1159201,1159902,1160590,1161230,1161702,1162163,1162712,1163222,1163696,1164316,1165039,1165737,1166329,1166916,1167622,1168456,1169037,1169599,1170153,1171039,1172175,1173452,1174732,1175606,1176723,1178009,1178863,1179983,1181418,1182925,1184507,1186067,1187736,1189305,1190769,1192279,1193873,1195282,1196646,1198044,1199614,1201143,1202092,1203402,1204763,1206362,1207334,1208820,1210150,1211612,1213225,1214767,1216130,1217556,1218920,1220373,1221681,1222394,1223222,1224272,1225369,1226590,1227717,1228809,1230021,1231091,1231974,1232906,1233787,1234605,1235366,1236598,1237762,1238702,1240290,1241172,1242379,1243739,1245256,1246656,1247961,1249400,1250827,1252289,1253702,1255031,1256191,1257307,1258500,1259799,1261016,1262216,1263246,1264458,1265684,1266922,1267963,1269109,1270844,1272368,1273789,1275247,1276719,1278105,1279288,1280591,1282131,1283564,1284936,1286064,1287570,1289086,1290597,1291941,1293440,1294911,1296288,1297787,1299185,1300739,1301985,1303235,1303827,1304192,1304908,1305946,1307171,1307851,1308952,1310169,1311614,1312990,1314457,1315841,1317022,1318489,1319804,1321213,1322504,1323772,1324237,1324728,1325281,1326001,1327157,1328525,1329675,1331146,1332290,1333447,1334859,1335688,1336914,1338119,1339264,1340618,1340916,1342021,1343042,1344489,1346046,1347245,1348433,1349733,1351052,1352407,1353793,1355269,1356731,1358195,1359611,1360881,1362282,1363488,1364741,1365875,1367240,1368375,1369952,1370872,1371806,1372533,1373540,1375019,1375845,1377320,1378415,1379396,1380827,1382185,1383873,1385537,1387238,1388902,1390597,1392211,1393866,1395470,1397112,1398713,1400346,1401961,1403597,1405213,1406851,1408494,1410057,1411588,1413082,1414586,1416251,1417848,1419471,1421056,1422513,1423937,1425421,1426820,1428281,1429684,1431232,1432521,1433991,1435510,1436972,1438363,1439664,1441095,1442378,1443496,1444823,1446387,1447956,1449444,1450432,1451230,1452136,1453466,1454659,1455793,1456851,1457889,1459324,1460681,1462125,1463434,1464557,1465742,1466980,1468118,1469425,1470616,1471803,1473230,1474834,1476438,1478016,1479626,1481223,1482752,1484311,1485879,1487415,1488964,1490491,1492049,1493600,1495158,1496693,1498261,1499785,1501298,1502757,1504142,1505620,1507146,1508595,1510095,1511429,1512930,1514306,1515656,1517050,1518282,1519712,1520683,1522027,1523227,1524653,1525576,1526952,1528110,1529205,1529856,1530570,1531590,1533e3,1534171,1535464,1536813,1537840,1539340,1540637,1541564,1542971,1543858,1544809,1545586,1546519,1547459,1548496,1549474,1550389,1551331,1552285,1553370,1554234,1555223,1556131,1556987,1557909,1558739,1559646,1560574,1561528,1562567,1563480,1564357,1565318,1566458,1567296,1568326,1569362,1570304,1571192,1572044,1572940,1573969,1574998,1575930,1576946,1577788,1578661,1579690,1580633,1581751,1582603,1583578,1584607,1585521,1586424,1587324,1588354,1589174,1590091,1591021,1592072,1593014,1593910,1594867,1595858,1596877,1597777,1598703,1599641,1600496,1601470,1602354,1603442,1604337,1605332,1606255,1607171,1608120,1609197,1610102,1611078,1612030,1613055,1614005,1614890,1615818,1616757,1617722,1618665,1619616,1620566,1621439,1622409,1623248,1624237,1625245,1626220,1627163,1628121,1629075,1630110,1631231,1632400,1633622,1634712,1635653,1636741,1637426,1637978,1638771,1639944,1640914,1641409,1642239,1643034,1644116,1644604,1645440,1646446,1647370,1648761,1649708,1650393,1651225,1652257,1653089,1653946,1654837,1655613,1656543,1657733,1658532,1659607,1660616,1661931,1663168,1663985,1664982,1666172,1667505,1668749,1670039,1671229,1672269,1673395,1674540,1675604,1677108,1678626,1680012,1681530,1683028,1684408,1685930,1687381,1688926,1690361,1691706,1693302,1694722,1696158,1697655,1699282,1700838,1702263,1703563,1705077,1706469,1707084,1707598,1708368,1709160,1709597,1710359,1711127,1712051,1712816,1713672,1714294,1715008,1716022,1716825,1717417,1718436,1719100,1719755,1720842,1721609,1722305,1723106,1723831,1724806,1725430,1726135,1727118,1727837,1728769,1729912,1730615,1731397,1732407,1733097,1733809,1734687,1735479,1736581,1737226,1738433,1739029,1740212,1741129,1742357,1743288,1744621,1745489,1746660,1748129,1749646,1751077,1752555,1753581,1754567,1756106,1757371,1758935,1760463,1762120,1763448,1764972,1766597,1768146,1769460,1770737,1771111,1772154,1773557,1774988,1776383,1777730,1779181,1780483,1781358,1781819,1783138,1784145,1785267,1786741,1787975,1789278,1790807,1792302,1793825,1795161,1796584,1797994,1799078,1800211,1801487,1802893,1804334,1805087,1805112,1805153,1805178,1805226,1805251,1805307,1805332,1805409,1805434,1805547,1805572,1805759,1805784,1806122,1806147,1806778,1806803,1807210,1808042,1808748,1809854,1811181,1812498,1813876,1815332,1816971,1818462,1820104,1821619,1823231,1824613,1825857,1827524,1829113,1830678,1832281,1833856,1835332,1836828,1838468,1839891,1841222,1842653,1843806,1845158,1846489,1847618,1849067,1850310,1851713,1852972,1854179,1855679,1856634,1858087,1859563,1860983,1862454,1863924,1865330,1866833,1868174,1869507,1870717,1872105,1873483,1874932,1876366,1877741,1879121,1880484,1881990,1883443,1884605,1885791,1887364,1888953,1890536,1892069,1893441,1894625,1895803,1897092,1898443,1899924,1901346,1902723,1904130,1905351,1906446,1907771,1909155,1910578,1911831,1912995,1914443,1915929,1917212,1918638,1919586,1921143,1922643,1923931,1925372,1926528,1928043,1929591,1930807,1932207,1933042,1934363,1935531,1936506,1937968,1939493,1940553,1942105,1943530,1944894,1944919,1945050,1946594,1948050,1949390,1950762,1952161,1953185,1954205,1955812,1957365,1958510,1959181,1960388,1961576,1962445,1962943,1964675,1965326,1966625,1968050,1969012,1969908,1970851,1971040,1971164,1971267,1972090,1973157,1974192,1974679,1974704,1974729,1974754,1974779,1974804,1974829,1974854,1974879,1974904,1974929,1974954,1974979,1975004,1975029,1975054,1975079,1975104,1975129,1975154,1975179,1975204,1975229,1975254,1975279,1975304,1975329,1975354,1975379,1975404,1975429,1975454,1975479,1975504,1975529,1975554,1975579,1975604,1975629,1975654,1975679,1975704,1975729,1975754,1976107,1977570,1978730,1980065,1981121,1982184,1982984,1983814,1985070,1986293,1987499,1988732,1989649,1990645,1991800,1992496,1993166,1994411,1995833,1996716,1998105,1999360,2000459,2001874,2003079,2004490,2005976,2007196,2007601,2008801,2009304,2010147,2011307,2012495,2013351,2014046,2014765,2015554,2016453,2017720,2018805,2019430,2020408,2021560,2022327,2023667,2024757,2025647,2026394,2027184,2028487,2029707,2030682,2031917,2033050,2033978,2035210,2036281,2037215,2038123,2039118,2040124,2041465,2042702,2043615,2044679,2045758,2046937,2048011,2048954,2049919,2050660,2051775,2052651,2053709,2054695,2055742,2056799,2057672,2058790,2059500,2060368,2061118,2061907,2062683,2063471,2064272,2064986,2065747,2066590,2067242,2068129,2069145,2070348,2071373,2072705,2073560,2074345,2075196,2076019,2077238,2078317,2079289,2080194,2080782,2081690,2082176,2082692,2083645,2084347,2085154,2086463,2087631,2088807,2089918,2091127,2092385,2093381,2094355,2095435,2096662,2097971,2099409,2100684,2101909,2102700,2103640,2104657,2105844,2107033,2107910,2108558,2109387,2110211,2111214,2112302,2113554,2114258,2115097,2115811,2116427,2117310,2118327,2119631,2120944,2122181,2123177,2124278,2125273,2125872,2126731,2127856,2129230,2130408,2131788,2133141,2134211,2135359,2136708,2137926,2139241,2140532,2141799,2142805,2143990,2145321,2146326,2147446,2148124,2149060,2149920,2150950,2152010,2152840,2153810,2154443,2155438,2156410,2157350,2158413,2159233,2160152,2161289,2162545,2163558,2164604,2166076,2166664,2167248,2167848,2168483,2169107,2170206,2171404,2172394,2173431,2174451,2175920,2177687,2179006,2180343,2181751,2183011,2184359,2185654,2186930,2188024,2188572,2189107,2190987,2192506,2193609,2195175,2196421,2197776,2199068,2200229,2201569,2202592,2203812,2204936,2205873,2207071,2208190,2209351,2210577,2212037,2213146,2214342,2215624,2216437,2217557,2218738,2220073,2221362,2222576,2223824,2225221,2226653,2227988,2229331,2230757,2231785,2233214,2234522,2235833,2237229,2238654,2239985,2241308,2242576,2243871,2245149,2246511,2247902,2249258,2250511,2251548,2252634,2253851,2254875,2256145,2257178,2258494,2259694,2260649,2261813,2262754,2263742,2264711,2265895,2266876,2267871,2269052,2270060,2271056,2272144,2273438,2274547,2275735,2276924,2278080,2279221,2280283,2281302,2282392,2283455,2284481,2285462,2286644,2287940,2289320,2290555,2291657,2292739,2294014,2295138,2296570,2297960,2299268,2300577,2301785,2303143,2304296,2305609,2306816,2307958,2308958,2310019,2311283,2312745,2313583,2315137,2316494,2317674,2318849,2319969,2321114,2321950,2322702,2323602,2324584,2325697,2326755,2327909,2328963,2330029,2331115,2332227,2333202,2333985,2335133,2336222,2337152,2338295,2339130,2340487,2341960,2343094,2344261,2345390,2346264,2347366,2348291,2349221,2350414,2351630,2352718,2353853,2354893,2355854,2357197,2358394,2359409,2360779,2361833,2362792,2363882,2364889,2365816,2366530,2367361,2368699,2370128,2371279,2372546,2373611,2374913,2376173,2376785,2377326,2378095,2378913,2379737,2380261,2381350,2381946,2382697,2384057,2385371,2386661,2387875,2388942,2390351,2391604,2392765,2394057,2394953,2395871,2396589,2397267,2398085,2398754,2399957,2401025,2402320,2403669,2404626,2405853,2406868,2407973,2409151,2410233,2411373,2412514,2413630,2414647,2415680,2416726,2417847,2418969,2420206,2421297,2422234,2423404,2424472,2425437,2426353,2427293,2428252,2429140,2430375,2431576,2432754,2433693,2434567,2435568,2436582,2437763,2438875,2439839,2441045,2442343,2443584,2444578,2445659,2446787,2447947,2448970,2449994,2450948,2451822,2452890,2454004,2454887,2456105,2457329,2458366,2459542,2460612,2461603,2462609,2463753,2464743,2465707,2466618,2467512,2468504,2469558,2470499,2471637,2472491,2473586,2474729,2475938,2477078,2478272,2479262,2480302,2481278,2482369,2483290,2484421,2485038,2485826,2486840,2487935,2488868,2489990,2491127,2491980,2492681,2493799,2494962,2495909,2496754,2497408,2498268,2499239,2500267,2501261,2502386,2503537,2504877,2506e3,2506972,2508235,2509372,2510477,2511481,2512739,2513896,2514820,2516109,2517183,2518424,2519492,2520267,2521457,2522598,2523750,2524750,2525751,2526863,2527983,2528914,2529863,2531056,2532271,2533644,2534685,2535789,2537040,2538178,2539217,2540338,2541509,2542812,2543939,2545279,2546316,2547416,2548563,2549634,2550779,2551888,2553032,2554155,2555348,2556569,2557879,2559016,2559812,2561066,2562405,2563579,2564854,2565832,2567010,2568221,2569298,2570406,2571535,2572558,2573728,2575164,2576749,2577781,2578850,2579647,2580516,2581981,2583369,2584905,2586180,2587485,2588784,2590139,2591618,2592941,2594184,2595320,2596722,2597977,2599262,2600523,2601619,2602514,2603388,2604409,2605225,2606605,2607385,2608437,2609435,2610545,2611660,2612706,2613363,2614149,2615136,2616201,2617201,2618292,2619177,2620118,2621178,2622146,2623348,2624514,2625216,2626138,2627049,2627864,2628671,2629741,2630927,2631926,2632917,2634094,2635024,2636208,2637012,2637512,2638664,2639839,2640991,2642205,2643113,2643850,2644793,2645636,2646364,2647296,2648150,2649265,2650191,2651372,2652419,2653476,2654409,2655479,2656564,2657898,2659089,2660312,2661522,2662482,2663494,2664586,2665870,2667120,2668139,2669066,2670266,2671070,2671759,2672977,2674098,2675125,2676201,2677169,2678105,2678815,2679668,2680718,2681906,2682734,2683642,2684801,2685788,2686426,2687505,2688355,2689149,2689897,2690718,2691799,2692676,2693991,2695105,2696255,2697084,2697823,2698820,2699718,2700664,2701366,2702342,2703465,2704625,2705487,2706937,2708162,2709157,2710081,2711149,2712459,2713287,2713770,2714463,2715421,2716170,2717237,2718367,2719448,2720386,2721524,2722510,2723629,2724801,2725890,2727395,2728460,2729673,2730423,2731119,2732255,2733307,2734358,2735378,2736345,2737309,2738453,2739536,2740551,2741627,2742672,2743691,2745092,2746162,2747446,2748762,2749806,2751103,2752336,2753468,2754412,2755272,2756282,2757366,2758238,2759259,2760239,2761176,2762125,2763126,2764247,2765176,2766127,2767098,2767999,2769081,2770141,2771225,2772059,2773066,2773908,2774875,2775602,2776740,2777942,2779059,2779857,2780826,2781985,2783176,2784223,2785272,2786344,2787200,2788238,2789541,2790616,2791706,2792705,2793737,2794902,2795721,2796553,2797582,2798954,2800134,2801541,2802956,2804417,2805757,2807174,2808432,2809950,2811330,2812679,2814082,2815452,2816555,2818026,2819415,2820816,2821927,2823366,2824727,2826082,2827316,2828627,2829960,2831320,2832669,2833754,2835208,2836596,2838166,2839308,2840650,2842088,2843797,2845281,2846652,2847962,2849353,2850777,2852271,2853591,2854992,2856215,2857807,2859180,2860608,2861747,2862654,2863822,2864964,2866153,2867633,2868835,2870086,2871543,2872865,2874170,2875366,2876690,2877968,2879225,2880348,2881551,2882718,2884066,2885282,2886400,2887821,2889218,2890402,2891565,2892797,2894008,2895173,2896361,2897446,2898682,2899911,2901176,2902040,2903347,2904744,2906005,2907190,2908045,2909299,2910524,2911821,2912708,2913974,2915004,2916116,2917230,2918382,2919703,2920661,2922184,2923458,2924872,2926202,2927615,2928596,2929928,2931228,2932504,2933858,2935119,2936478,2937769,2938698,2940137,2941490,2942855,2944186,2945493,2946938,2948328,2949536,2950949,2952243,2953638,2955052,2956369,2957821,2958971,2960295,2961778,2963129,2964521,2965990,2967456,2968581,2969752,2970899,2972082,2973213,2974373,2975696,2976877,2978038,2979041,2980152,2981278,2982489,2983658,2984796,2986219,2987474,2988673,2990021,2991353,2992631,2993961,2995233,2996596,2997900,2999289,3000622,3001908,3003188,3004363,3005692,3007007,3007781,3009097,3010430,3011688,3013012,3014366,3015767,3017074,3018240,3019464,3020890,3022099,3023385,3024767,3025995,3027216,3028431,3029591,3030656,3031909,3033198,3034462,3035465,3036774,3038133,3039458,3040983,3042296,3043837,3045273,3046725,3048258,3049613,3051273,3052857,3054296,3055566,3057002,3058333,3059690,3061042,3062286,3063670,3065023,3066345,3067761,3068993,3070295,3071351,3072757,3074100,3075446,3076683,3078057,3079204,3080422,3081657,3083011,3084193,3085477,3086887,3088300,3089625,3091038,3092342,3093492,3094901,3096217,3097677,3098961,3100357,3101790,3103085,3104493,3105727,3107090,3108520,3109792,3111022,3112185,3113508,3114798,3116169,3117412,3118629,3119872,3121053,3122326,3123445,3124687,3126015,3127417,3128633,3129793,3130829,3131836,3132805,3133789,3134859,3135921,3137117,3138205,3139288,3140539,3141613,3142959,3144395,3145775,3147201,3148710,3150036,3151370,3152818,3154158,3155602,3156896,3158285,3159658,3160896,3162234,3163600,3165008,3166366,3167515,3168394,3169113,3170311,3171148,3171612,3172379,3173769,3174957,3176182,3177436,3178576,3179970,3181246,3182386,3183540,3184911,3186186,3187531,3188759,3190139,3191196,3192540,3193821,3195143,3196406,3197698,3198988,3200212,3201506,3202859,3204233,3205534,3206824,3208186,3209534,3210909,3211863,3212909,3214237,3215178,3216587,3217895,3219121,3220323,3221745,3223175,3224150,3225543,3226869,3228259,3229487,3230857,3232138,3233461,3234807,3236178,3237468,3238718,3240084,3241353,3242575,3243680,3244671,3245776,3246976,3248252,3249472,3250927,3252346,3253629,3255082,3256160,3257339,3258720,3260015,3261390,3262723,3263897,3265020,3266242,3267481,3268628,3269691,3270985,3271957,3273125,3274457,3275592,3276922,3278270,3279499,3280843,3282039,3283339,3284690,3286100,3287432,3288773,3290234,3291671,3292941,3294042,3295369,3296760,3297938,3299353,3300603,3301833,3302965,3304282,3305662,3306741,3307767,3309187,3310325,3311307,3312666,3313496,3314780,3315819,3316741,3318235,3319680,3321257,3322733,3324116,3325365,3326764,3327814,3328898,3330169,3331603,3332810,3334102,3335361,3336567,3337738,3339104,3340261,3341406,3342687,3343832,3344932,3346137,3347264,3348403,3349627,3350607,3351834,3352993,3354216,3355399,3356683,3358030,3359348,3360620,3361904,3363210,3364494,3365753,3366923,3368139,3369453,3370824,3372070,3373353,3374519,3375848,3376736,3377948,3378829,3379966,3380541,3381550,3382713,3383862,3384929,3386041,3387217,3388266,3389241,3390547,3391981,3393213,3394374,3395551,3396922,3398431,3399644,3401052,3402385,3403654,3404994,3406399,3407922,3409104,3410317,3411741,3413105,3414461,3415939,3417225,3418590,3419837,3421232,3422481,3423477,3424554,3425864,3427233,3428628,3430215,3431603,3433115,3434446,3435926,3437356,3438799,3440161,3441521,3442726,3443935,3444851,3446198,3447547,3448974,3450111,3451312,3452838,3454391,3455936,3457434,3458791,3460054,3461359,3462734,3464093,3465549,3467014,3468093,3469139,3470543,3471707,3473189,3474552,3475964,3477390,3478968,3480267,3481803,3483203,3484730,3485963,3487306,3488741,3490161,3491369,3492624,3493818,3494961,3496544,3498093,3499338,3500524,3501869,3502990,3504220,3505677,3507108,3508540,3510064,3511428,3512966,3514504,3515932,3517115,3518586,3519884,3521341,3522917,3524574,3525872,3527268,3528812,3530022,3530962,3532222,3533548,3534928,3536122,3537515,3538796,3540009,3541176,3542544,3543812,3545355,3546894,3548464,3550075,3551260,3552826,3554354,3555850,3557040,3558485,3560055,3561477,3562879,3564519,3566062,3567585,3569007,3570506,3571953,3573580,3575073,3576556,3577669,3578876,3579691,3581194,3582512,3584154,3585638,3587178,3588479,3589992,3591558,3592955,3594409,3595952,3597345,3598105,3599369,3600769,3602176,3603662,3605224,3606528,3607959,3609237,3610780,3612215,3613313,3614464,3615752,3617099,3618397,3619688,3621220,3622690,3624128,3625577,3627115,3628636,3630067,3631364,3632755,3634130,3635484,3636909,3638095,3639182,3640471,3641995,3643372,3644678,3646024,3647219,3648506,3649982,3651421,3652902,3654427,3655759,3657312,3658854,3660306,3661420,3662873,3664157,3665608,3667201,3668817,3670135,3671470,3673017,3674146,3675145,3676488,3677787,3679132,3680401,3681789,3683060,3684261,3685399,3686824,3688129,3689636,3691178,3692758,3694309,3695646,3697241,3698841,3700213,3701457,3702935,3704421,3705755,3707303,3708775,3710400,3711887,3713351,3714925,3716357,3718064,3719604,3721005,3722216,3723280,3724667,3726014,3727469,3729025,3730617,3732064,3733484,3735062,3736554,3738001,3739548,3740971,3741847,3742978,3744443,3745861,3747370,3748920,3750435,3751788,3753026,3754386,3755685,3757219,3758482,3759868,3760575,3762066,3763313,3764404,3765573,3767126,3768679,3770081,3771436,3772930,3774421,3775785,3777243,3778631,3780131,3781265,3782491,3783806,3785162,3786519,3788072,3789544,3791028,3792309,3793427,3794625,3795923,3797199,3798751,3800180,3801566,3802758,3804094,3805301,3806413,3807420,3809014,3810439,3811792,3813299,3814650,3816210,3817712,3819156,3820669,3822125,3823676,3824917,3826371,3827966,3829410,3831013,3831961,3832937,3834409,3835593,3836831,3838120,3839359,3840821,3842358,3843858,3845480,3846892,3848316,3849713,3851226,3852685,3854270,3855707,3857147,3858523,3859977,3861461,3862967,3864427,3865827,3867466,3868974,3870476,3871828,3873326,3874818,3876281,3877761,3879154,3880462,3881838,3883171,3884613,3886012,3887217,3888577,3889848,3891144,3892255,3893416,3894427,3895940,3897317,3898531,3900010,3901248,3902584,3904019,3905503,3906928,3908408,3909880,3911194,3912515,3913944,3915336,3916829,3918191,3919654,3921121,3922436,3923708,3924993,3926319,3927816,3929355,3930657,3932059,3933537,3935026,3936507,3937791,3939355,3940851,3942381,3943722,3945145,3946513,3947843,3949373,3950657,3952071,3952780,3954288,3955608,3956658,3957864,3959423,3960960,3962486,3963888,3965432,3966769,3968280,3969664,3971171,3972517,3973624,3974999,3976179,3977673,3979222,3980676,3982235,3983556,3984831,3986224,3987568,3988638,3990034,3991518,3993126,3994384,3995725,3997075,3998174,3999175,4000676,4002181,4003601,4005180,4006610,4008117,4009633,4010969,4012496,4013970,4015502,4016755,4018184,4019789,4021279,4022869,4023846,4024852,4026284,4027444,4028692,4029961,4031244,4032709,4034213,4035725,4037332,4038766,4040234,4041702,4043264,4044773,4046326,4047721,4049220,4050617,4052166,4053600,4055033,4056482,4057960,4059519,4061093,4062588,4064034,4065544,4067076,4068524,4070043,4071325,4072740,4074192,4075681,4077207,4078752,4080013,4081394,4082817,4084065,4085208,4086181,4087713,4089157,4090320,4091898,4093029,4094420,4095852,4097382,4098824,4100338,4101829,4103211,4104558,4106038,4107474,4109019,4110504,4111937,4113470,4114773,4115948,4117325,4118650,4120237,4121732,4123123,4124624,4126144,4127718,4129105,4130731,4132279,4133699,4134753,4135287,4136243,4137626,4139156,4140512,4141670,4143050,4144452,4145944,4147091,4148583,4149872,4151298,4152641,4153636,4155089,4156080,4157409,4158790,4160165,4161351,4162579,4163648,4165143,4166140,4167165,4168370,4169908,4171173,4172280,4173738,4174902,4176307,4177749,4179152,4180278,4181502,4182869,4184048,4185448,4186655,4187680,4188658,4189973,4191177,4192601,4194069,4195491,4196661,4197928,4199339,4200557,4201998,4203052,4204288,4205707,4207087,4208304,4209576,4210838,4212330,4213742,4214943,4216254,4217599,4218896,4220297,4221319,4222580,4223542,4225063,4226258,4227445,4228969,4230270,4231406,4232942,4233799,4234736,4235825,4237201,4238468,4239639,4240914,4242386,4243816,4245323,4246864,4248308,4249680,4251107,4251397,4251422,4251447,4251472,4251497,4251522,4251547,4251572,4251597,4251622,4251647,4251672,4251697,4251722,4251747,4251772,4251797,4251822,4251847,4251872,4251897,4251922,4251947,4251972,4251997,4252022,4252047,4252072,4252097,4252122,4252147,4252172,4252197,4252222,4252247,4252272,4252297,4252322,4252347,4252372,4252397,4252422,4252447,4252472,4252497,4252522,4252547,4252572,4252597,4252622,4252647,4252672,4252697,4252722,4252747,4252772,4252797,4252822,4252847,4252872,4252897,4252922,4252947,4252972,4252997,4253022,4253047,4253072,4253097,4253122,4253147,4253172,4253197,4253222,4253247,4253272,4253297,4253322,4253347,4253372,4253397,4253422,4253447,4253472,4253497,4253522,4253547,4253572,4253597,4253622,4253647,4253672,4253697,4253722,4253747,4253772,4253797,4253822,4253847,4253872,4253897,4253922,4253947,4253972,4253997,4254022,4254047,4254072,4254097,4254122,4254147,4254172,4254197,4254222,4254247,4254272,4254297,4254322,4254347,4254372,4254397,4254422,4254447,4254472,4254497,4254522,4254547,4254572,4254597,4254622,4254647,4254672,4254697,4254722,4254747,4254772,4254797,4254822,4254847,4254872,4254897,4254922,4254947,4254972,4254997,4255022,4255047,4255072,4255097,4255122,4255147,4255172,4255197,4255222,4255247,4255272,4255297,4255322,4255347,4255372,4255397,4255422,4255447,4255472,4255497,4255522,4255547,4255572,4255597,4255622,4255647,4255672,4255697,4255722,4255747,4255772,4255797,4255822,4255847,4255872,4255897,4255922,4255947,4255972,4255997,4256022,4256047,4256072,4256097,4256122,4256147,4256172,4256197,4256222,4256247,4256272,4256297,4256322,4256347,4256372,4256397,4256422,4256447,4256472,4256497,4256522,4256547,4256572,4256597,4256622,4256647,4256672,4256697,4256722,4256747,4256772,4256797,4256822,4256847,4256872,4256897,4256922,4256947,4256972,4256997,4257022,4257047,4257072,4257097,4257122,4257147,4257172,4257197,4257222,4257247,4257272,4257297,4257322,4257347,4257372,4257397,4257422,4257447,4257472,4257497,4257522,4257547,4257572,4257597,4257622,4257647,4257672,4257697,4257722,4257747,4257772,4257797,4257822,4257847,4257872,4257897,4257922,4257947,4257972,4257997,4258022,4258047,4258072,4258097,4258122,4258147,4258172,4258197,4258222,4258247,4258272,4258297,4258322,4258347,4258372,4258397,4258422,4258447,4258472,4258497,4258522,4258547,4258572,4258597,4258622,4258647,4258672,4258697,4258722,4258747,4258772,4258797,4258822,4258847,4258872,4258897,4258922,4258947,4258972,4258997,4259022,4259047,4259072,4259097,4259122,4259147,4259172,4259197,4259222,4259247,4259272,4259297,4259322,4259347,4259372,4259397,4259422,4259447,4259472,4259497,4259522,4259547,4259572,4259597,4259622,4259647,4259672,4259697,4259722,4259747,4259772,4259797,4259822,4259847,4259872,4259897,4259922,4259947,4259972,4259997,4260022,4260047,4261212,4262532,4264079,4265612,4267103,4268443,4269746,4271007,4272261,4273409,4274854,4276052,4277525,4278843,4280213,4281649,4283041,4284129,4285246,4286458,4287572,4288647,4290059,4291174,4292486,4293834,4295063,4296462,4298077,4299041,4300441,4301694,4302853,4304340,4305323,4306839,4307685,4309215,4310224,4311565,4313011,4314228,4315599,4316991,4318204,4319538,4320784,4322058,4323409,4324639,4326122,4327545,4328639,4329662,4331135,4332348,4333823,4335180,4336587,4338018,4339612,4340920,4342456,4343856,4345376,4346608,4347971,4349419,4350828,4352043,4353301,4354455,4355573,4357115,4358666,4359924,4361109,4362422,4363588,4364822,4366200,4367662,4369041,4370599,4372e3,4373488,4374980,4376401,4377759,4379158,4380427,4381822,4383433,4385118,4386188,4387524,4388982,4390262,4391104,4392343,4393732,4395104,4396253,4397530,4398847,4400183,4401375,4402732,4404041,4405575,4407048,4408530,4410171,4411496,4413030,4414634,4416109,4417425,4418730,4420144,4421519,4422826,4424375,4425838,4427421,4428800,4430305,4431844,4433301,4434951,4436490,4437884,4439113,4440179,4441593,4442950,4444418,4445955,4447524,4448926,4450309,4451860,4453324,4454812,4456370,4457782,4458804,4459733,4461159,4462482,4463982,4465420,4466767,4468209,4469690,4471164,4472644,4473928,4475020,4476407,4477623,4479159,4480422,4481629,4483140,4484695,4486014,4487566,4489043,4490563,4491887,4493247,4494701,4496168,4497467,4498637,4499841,4501011,4502558,4504092,4505372,4506513,4507881,4509223,4510429,4511942,4513210,4514800,4516339,4517808,4519344,4520776,4522366,4523598,4525016,4526300,4527860,4529469,4530748,4532088,4533065,4534657,4535621,4536537,4538061,4539240,4540329,4541483,4542816,4544213,4545456,4546679,4548178,4549722,4551205,4552703,4554297,4555706,4557218,4558810,4560407,4561742,4562965,4564419,4565837,4567141,4568704,4570174,4571764,4573213,4574721,4576298,4577807,4579444,4580974,4582306,4583536,4584520,4586058,4587456,4589028,4590591,4592156,4593543,4595046,4596618,4598070,4599565,4601126,4602576,4603350,4604638,4606031,4607435,4608930,4610486,4611944,4613315,4614571,4615867,4617201,4618713,4619934,4621122,4622008,4623455,4624711,4625737,4626938,4628506,4629988,4631486,4632833,4634352,4635755,4637210,4638631,4640085,4641536,4642654,4643916,4645230,4646720,4648140,4649644,4651123,4652523,4653843,4655013,4656192,4657386,4658618,4660174,4661614,4662961,4664136,4665476,4666619,4667768,4668762,4670340,4671753,4673114,4674638,4675974,4677545,4679020,4680476,4681993,4683421,4684972,4686207,4687683,4689272,4690714,4692318,4693245,4694216,4695693,4696875,4698131,4699412,4700648,4702103,4703647,4705145,4706741,4708176,4709585,4710993,4712515,4713970,4715578,4717021,4718462,4719817,4721181,4722680,4724146,4725551,4726961,4728611,4730117,4731571,4732941,4734423,4735921,4737364,4738861,4740303,4741575,4742973,4744305,4745758,4747111,4748388,4749711,4750924,4752267,4753427,4754595,4755586,4757128,4758586,4759709,4761259,4762363,4763728,4765141,4766676,4768039,4769538,4770961,4772318,4773680,4775080,4776484,4777950,4779347,4780829,4782304,4783660,4784901,4786276,4787544,4788964,4790558,4791959,4793358,4794874,4796397,4797871,4799080,4800697,4802249,4803833,4805147,4806626,4807867,4809232,4810683,4812170,4813610,4814352,4815770,4817110,4818365,4819544,4821151,4822691,4824161,4825593,4827111,4828606,4830017,4831504,4833017,4834484,4835604,4836834,4838184,4839698,4841153,4842668,4844185,4845606,4846891,4848082,4849276,4850424,4851778,4853307,4854855,4856240,4857500,4858878,4860027,4861088,4862328,4863867,4865281,4866842,4868372,4869719,4871284,4872680,4874196,4875718,4877149,4878560,4879615,4881207,4882841,4884228,4885524,4886654,4887939,4889151,4890370,4891752,4893022,4894413,4895865,4897319,4898813,4900412,4901716,4903176,4904687,4906204,4907911,4909415,4910709,4912131,4913377,4914971,4916429,4917858,4919205,4920735,4922308,4923829,4925209,4926706,4928121,4929665,4931141,4932612,4933928,4935398,4936779,4938321,4939852,4941102,4942517,4943965,4945412,4946441,4947597,4948712,4950220,4951634,4953011,4954252,4955775,4957198,4958730,4960209,4961751,4963138,4964658,4965977,4967388,4968865,4970353,4971865,4973377,4974886,4976287,4977542,4978946,4980225,4981651,4983237,4984645,4986037,4987590,4989127,4990598,4991830,4993447,4995002,4996346,4996995,4997534,4998737,5000262,5001664,5002941,5004240,5005524,5007032,5008422,5009691,5010911,5012375,5013731,5015170,5016569,5017856,5019103,5020578,5021780,5022979,5024517,5025830,5026960,5028456,5029333,5030340,5031468,5032783,5034069,5035349,5036659,5037893,5039344,5040727,5042121,5043308,5044468,5045728,5047066,5048552,5049466,5050657,5051814,5053253,5054504,5055971,5057413,5058837,5060067,5061309,5062500,5063904,5065355,5066252,5067700,5069103,5070406,5071554,5072940,5074146,5075518,5076732,5078090,5079334,5080807,5082032,5083459,5084950,5086175,5087506,5088957,5090223,5091174,5092572,5093785,5095055,5096311,5097267,5098584,5100044,5101093,5102216,5103442,5104930,5106259,5107853,5109114,5110611,5111808,5112882,5114357,5115770,5117251,5118470,5119897,5121228,5121628,5122534,5123641,5124480,5125712,5127088,5128728,5129552,5129577,5129602,5129627,5129652,5129677,5129702,5129727,5129752,5129777,5129802,5129827,5129852,5129877,5129902,5129927,5129952,5129977,5130002,5130027,5130052,5130077,5130102,5130127,5130152,5130177,5130202,5130227,5130252,5130277,5130302,5130327,5130352,5130377,5130402,5130427,5130452,5130477,5130502,5130527,5130552,5130577,5130602,5130627,5130652,5130677,5130702,5130727,5130752,5130777,5130802,5130827,5130852,5130877,5130902,5130927,5130952,5130977,5131002,5131027,5131052,5131077,5131102,5131127,5131152,5131177,5131202,5131227,5131252,5131277,5131302,5131327,5131352,5131377,5131402,5131427,5131452,5131477,5131502,5131527,5131552,5131577,5131602,5131627,5131652,5131677,5131702,5131727,5131752,5131777,5131802,5131827,5131852,5131877,5131902,5131927,5131952,5131977,5132002,5132027,5132052,5132077,5132102,5132127,5132152,5132177,5132202,5132227,5132252,5132277,5132302,5132327,5132352,5132377,5132402,5132427,5132452,5132477,5132502,5132527,5132552,5132577,5132602,5132627,5132652,5132677,5132702,5132727,5132752,5132777,5132802,5132827,5132852,5132877,5132902,5132927,5132952,5132977,5133002,5133027,5133052,5133077,5133102,5133127,5133152,5133177,5133202,5133227,5133252,5133277,5133302,5133327,5133352,5133377,5133402,5133427,5133452,5133477,5133502,5133527,5133552,5133577,5133602,5133627,5133652,5133677,5133702,5133727,5133752,5133777,5133802,5133827,5133852,5133877,5133902,5133927,5133952,5133977,5134002,5134027,5134052,5134077,5134102,5134127,5134152,5134177,5134202,5134227,5134252,5134277,5134302,5134327,5134352,5134377,5134402,5134427,5134452,5134477,5134502,5134527,5134552,5134577,5134602,5134627,5134652,5134677,5134702,5134727,5134752,5134777,5134802,5134827,5134852,5134877,5134902,5134927,5134952,5134977,5135002,5135027,5135052,5135077,5135102,5135127,5135152,5135177,5135202,5135227,5135252,5135277,5135302,5135327,5135352,5135377,5135402,5135427,5135452,5135477,5135502,5135527,5135552,5135577,5135602,5135627,5135652,5135677,5135702,5135727,5135752,5135777,5135802,5135827,5135852,5135877,5135902,5135927,5135952,5135977,5136002,5136027,5136052,5136077,5136102,5136127,5136152,5136177,5136202,5136227,5136252,5136277,5136302,5136327,5136352,5136377,5136402,5136427,5136452,5136477,5136502,5136527,5136552,5136577,5136602,5136627,5136652,5136677,5136702,5136727,5136752,5136777,5136802,5136827,5136852,5136877,5136902,5136927,5136952,5136977,5137002,5137027,5137052,5137077,5137102,5137127,5137152,5137177,5137202,5137227,5137252,5137277,5137302,5137327,5137352,5137377,5137402,5137427,5137452,5137477,5137502,5137527,5137552,5137577,5137602,5137627,5137652,5137677,5137702,5137727,5137752,5137777,5137802,5137827,5137852,5137877,5137902,5137927,5137952,5137977,5138002,5138027,5138052,5138077,5138102,5138127,5138152,5138177,5138202,5138565,5140065,5141375,5142574,5143723,5144835,5145688,5146858,5148078,5149153,5150303,5151636,5152843,5153803,5155058,5156255,5157333,5158585,5159678,5160318,5161268,5162504,5163565,5164877,5166074,5167415,5168585,5169671,5170731,5171871,5172452,5173340,5174258,5175356,5176546,5177798,5178747,5179926,5181264,5182524,5183788,5184940,5186243,5187255,5188239,5189521,5190831,5191920,5193059,5194369,5195521,5196698,5197798,5198917,5200060,5201099,5202450,5203408,5204684,5205919,5207066,5208264,5209367,5210618,5211863,5213121,5213967,5214575,5215261,5215945,5216910,5218186,5219384,5220580,5221784,5223038,5224104,5225228,5226390,5227437,5228781,5229951,5231094,5232249,5233428,5234676,5235793,5237101,5238303,5239392,5240617,5241873,5243077,5244206,5245430,5246649,5247584,5248802,5250047,5251171,5252329,5253609,5254790,5255896,5257067,5258406,5259520,5260679,5261472,5262562,5263815,5264983,5266234,5267330,5268522,5269470,5270659,5272037,5273347,5274556,5276035,5277316,5278572,5279853,5281087,5282333,5283363,5284649,5285851,5287072,5288145,5289051,5289994,5290968,5291586,5292465,5293249,5294366,5295799,5296978,5297889,5298923,5299646,5300631,5301421,5302676,5304092,5305216,5306398,5307532,5308559,5309817,5310751,5311805,5313204,5314457,5315862,5317087,5318460,5319674,5320578,5321612,5322770,5323970,5325073,5326418,5327693,5329042,5330225,5331405,5332668,5333880,5335082,5336031,5337401,5338711,5339748,5340886,5341990,5343177,5344239,5345316,5346437,5347540,5348877,5350108,5351454,5352613,5353681,5354745,5355700,5356900,5357798,5358565,5359409,5360101,5361059,5362087,5363284,5364432,5365838,5367088,5368175,5369282,5370383,5371501,5372295,5373334,5374190,5375034,5376090,5377008,5378157,5379473,5380629,5381849,5382831,5384113,5385340,5386444,5387829,5389064,5390425,5391938,5393334,5394603,5395971,5397441,5398790,5400115,5401524,5402948,5404306,5405752,5407202,5408545,5410084,5411511,5412801,5414289,5415690,5416843,5418112,5419302,5420550,5421645,5422928,5424114,5425357,5426672,5427949,5429309,5430790,5432147,5433521,5435012,5436455,5437795,5439182,5440587,5442011,5443401,5444827,5446316,5447728,5449128,5450474,5451949,5453161,5454473,5455737,5457022,5458148,5459399,5460720,5462029,5463403,5464887,5466324,5467628,5469050,5470466,5471787,5473145,5474556,5476037,5477430,5478848,5480320,5481731,5483153,5484572,5486046,5487221,5488474,5489795,5490716,5491825,5493233,5494622,5496074,5497378,5498746,5500127,5501450,5502857,5504161,5505430,5506834,5508230,5509601,5511029,5512484,5513855,5515322,5516708,5518036,5519241,5520670,5521866,5522957,5524138,5525439,5526672,5527683,5528916,5530145,5531519,5532915,5534378,5535779,5537064,5538444,5539872,5541208,5542542,5543956,5545394,5546780,5548189,5549673,5551097,5552453,5553926,5555355,5556592,5557864,5559187,5560612,5561809,5563161,5564505,5565952,5567217,5568449,5569609,5570827,5572246,5573476,5574743,5575963,5577152,5578197,5579482,5580773,5582119,5583539,5584989,5586326,5587691,5589112,5590492,5591760,5593166,5594558,5595994,5597388,5598817,5600224,5601729,5603066,5604509,5605979,5607243,5608582,5609404,5610142,5611175,5612321,5613304,5614393,5615219,5615961,5617031,5618090,5619359,5620454,5621757,5623058,5624191,5625081,5626281,5627144,5628103,5628990,5629825,5630833,5632181,5632899,5633736,5634633,5635460,5636261,5637458,5638399,5639667,5640896,5641995,5643036,5644423,5645733,5646753,5648195,5649819,5651355,5652772,5654241,5655514,5656751,5658205,5659571,5660930,5662233,5663569,5664893,5666342,5667669,5668802,5670068,5671330,5672431,5673992,5675359,5676714,5677874,5678964,5680473,5681851,5683342,5684786,5686370,5688422,5690470,5692527,5694584,5696632,5698689,5700746,5702794,5704847,5706895,5708388,5709637,5710731,5712057,5713264,5714320,5715466,5716702,5717946,5719154,5720558,5721966,5723452,5724879,5726271,5727485,5728992,5730368,5731532,5732633,5734026,5735251,5736711,5737967,5739331,5740442,5741912,5743279,5744498,5745644,5746916,5748294,5749781,5751202,5752583,5753849,5755734,5757782,5759838,5761886,5763934,5765987,5768035,5770092,5772140,5773942,5775074,5776122,5777387,5778598,5779698,5780809,5782119,5783225,5784504,5785644,5787210,5788645,5790153,5791568,5793169,5794468,5795896,5797178,5798481,5799287,5800823,5802212,5803634,5805016,5806207,5807497,5808842,5809923,5811287,5812669,5813964,5815098,5816619,5817987,5819310,5820482,5821627,5823103,5824526,5826006,5827381,5828872,5830404,5832452,5834509,5836557,5838607,5840660,5842708,5844756,5846805,5848853,5850090,5851323,5852531,5853478,5854790,5855917,5856998,5858350,5859671,5860787,5862400,5864013,5865424,5866855,5868187,5869509,5870952,5872463,5873703,5874964,5876517,5877868,5879266,5880357,5881496,5883013,5884477,5885890,5887348,5889381,5891438,5893494,5895544,5897601,5899655,5901703,5903753,5905809,5907425,5908464,5909825,5911004,5912135,5913219,5914252,5915652,5916645,5917642,5918953,5920146,5921162,5922522,5923704,5925065,5926517,5927824,5929278,5930570,5931756,5932982,5934290,5935528,5936798,5938124,5939220,5940216,5941347,5942371,5943754,5945021,5946313,5947616,5948934,5950257,5951511,5952842,5954033,5955381,5956700,5958141,5959320,5960640,5961874,5963219,5964584,5965848,5967256,5968555,5969878,5971387,5972826,5974161,5975607,5977080,5978531,5980070,5981462,5982707,5983936,5985026,5986414,5987680,5989030,5990275,5991781,5993223,5994798,5996178,5997680,5999149,6000619,6002010,6003384,6004793,6006278,6007703,6009139,6010625,6012112,6013481,6015016,6016621,6018180,6019620,6020882,6022402,6023761,6025263,6026506,6027983,6029183,6030498,6031980,6033337,6034730,6035806,6037013,6038502,6039896,6041305,6042128,6043111,6044563,6046611,6048659,6050707,6052755,6054812,6056860,6058908,6060956,6063004,6064330,6065073,6065218,6066514,6067759,6069001,6070162,6071256,6072294,6073414,6074699,6075840,6076772,6077779,6078903,6080301,6081625,6082551,6083912,6085161,6086187,6087409,6088633,6089758,6091060,6092464,6093856,6095365,6096786,6097974,6099243,6100583,6102071,6103584,6105068,6106624,6107999,6109534,6111071,6112422,6113891,6115065,6116339,6117621,6119087,6120468,6121857,6123347,6124794,6126140,6127528,6128882,6130166,6131557,6132895,6134345,6135786,6137008,6138290,6139803,6141139,6142533,6143603,6144788,6146294,6147748,6149264,6150447,6151876,6153123,6154247,6155654,6156813,6158e3,6159913,6161961,6164016,6166064,6168121,6170178,6172226,6174278,6176326,6177965,6179080,6179969,6181214,6182390,6183364,6184021,6185295,6186613,6187799,6189073,6190420,6191722,6193044,6194083,6195117,6196129,6197134,6198152,6199171,6200271,6201496,6202439,6203775,6204973,6205601,6206367,6207330,6208400,6209433,6210549,6211916,6212881,6213775,6214967,6216413,6217926,6219286,6220753,6222132,6223558,6225053,6226517,6228043,6229633,6231098,6232457,6233775,6235291,6236722,6238136,6239359,6240803,6241688,6242613,6243761,6245188,6246571,6248012,6249358,6250594,6251961,6253192,6254411,6255574,6256667,6257841,6258802,6259862,6261232,6262574,6263819,6265017,6266415,6267702,6268896,6270080,6271448,6272658,6273982,6275315,6276654,6278068,6279411,6280782,6282143,6283522,6284905,6285786,6286695,6287684,6289080,6290360,6291683,6293011,6294531,6295977,6297328,6298726,6300216,6301703,6302804,6304289,6305356,6306625,6308166,6309608,6310807,6312184,6313492,6314867,6316121,6317307,6318522,6319873,6321435,6322717,6324184,6325507,6326940,6328270,6329766,6331070,6332415,6333759,6334984,6336269,6337441,6338844,6340187,6341376,6342794,6344120,6345639,6346899,6348295,6349863,6351293,6352685,6354234,6355649,6357079,6358338,6359708,6361074,6362274,6363715,6364876,6366170,6367531,6368928,6370231,6371855,6373399,6374902,6376383,6377779,6379280,6380808,6382183,6383636,6385103,6386551,6387741,6389316,6390922,6392480,6393902,6395222,6396601,6398152,6399644,6400861,6402232,6403449,6404800,6406137,6407301,6408479,6409868,6411332,6412517,6413858,6415345,6416789,6418209,6419613,6421045,6422151,6422491,6423762,6424730,6426061,6427305,6428472,6429835,6431220,6432595,6433976,6435219,6436476,6437842,6439118,6440473,6441746,6443058,6444304,6445699,6446864,6448119,6449484,6450705,6452056,6453455,6454735,6455961,6457247,6458491,6459887,6461291,6462533,6463783,6464986,6466188,6467536,6468837,6470130,6471440,6472452,6473778,6475071,6476431,6477798,6479109,6480320,6481404,6482761,6484138,6485283,6486532,6487871,6489100,6490476,6491607,6492990,6494287,6495770,6497193,6498470,6499773,6501094,6502437,6503773,6505049,6506366,6507707,6509116,6510395,6511479,6512308,6513378,6514434,6515236,6517226,6519274,6521329,6523377,6525425,6527482,6529530,6531584,6533632,6535391,6536532,6537629,6538785,6539791,6541143,6542412,6543643,6544892,6546116,6547389,6548607,6549869,6551187,6552520,6553857,6555235,6556525,6557796,6559060,6560363,6561588,6562884,6564256,6565304,6566706,6568112,6569529,6570849,6572186,6573489,6574787,6576122,6577494,6578608,6580009,6581263,6582642,6583825,6585124,6586298,6587544,6588871,6590175,6591527,6592779,6594148,6595429,6596681,6598035,6599336,6600661,6601746,6603101,6604390,6605768,6607133,6608484,6609874,6611057,6612313,6613499,6614695,6616072,6617158,6618130,6619151,6620353,6620469,6620494,6621510,6622814,6624038,6625254,6626575,6627789,6628836,6630070,6631149,6632238,6633628,6634925,6636178,6637514,6638880,6640183,6641434,6642804,6644137,6645489,6646722,6648080,6649451,6650806,6652102,6653422,6654712,6656064,6657443,6658750,6660029,6661315,6662693,6664034,6665284,6666633,6667994,6669332,6670543,6671855,6673218,6674508,6675819,6677071,6678420,6679922,6681497,6682879,6684301,6685719,6687154,6688537,6689947,6691362,6692900,6694310,6695736,6697130,6698227,6699564,6700967,6702429,6703904,6705242,6706368,6707738,6709101,6710624,6712024,6713358,6714648,6716130,6717415,6718986,6720358,6721545,6722859,6724316,6725626,6727140,6728547,6729872,6731337,6732786,6734154,6735554,6736995,6738583,6740026,6741385,6742618,6744065,6745392,6746956,6748333,6749605,6750927,6752428,6753721,6755242,6756579,6757677,6759007,6760392,6761834,6763303,6764740,6765991,6767153,6768756,6770121,6771522,6772634,6773679,6775255,6776539,6777542,6778958,6780423,6781815,6783061,6784550,6785877,6786535,6787206,6788431,6789522,6790745,6791614,6793549,6795597,6797652,6799700,6801748,6803805,6805853,6807907,6809955,6811695,6812119,6812144,6813310,6814585,6815818,6817036,6818396,6819672,6820963,6822005,6823019,6824033,6825032,6826043,6827039,6828110,6829341,6830297,6831721,6832507,6833559,6834566,6835627,6837031,6837787,6838546,6839407,6840216,6841007,6841849,6842652,6843441,6844351,6845141,6846313,6847909,6849323,6850680,6852073,6853557,6854966,6856370,6857784,6858889,6860317,6861672,6863219,6864637,6866068,6867255,6868119,6869192,6870597,6872038,6873459,6874777,6876163,6877354,6878702,6880070,6881258,6882420,6883735,6885005,6886298,6887726,6889066,6890381,6891646,6892896,6894218,6895610,6896946,6898277,6899695,6901058,6902586,6903892,6905174,6906516,6907675,6908830,6909566,6910887,6912163,6913568,6914931,6916429,6917944,6919317,6920814,6922291,6923753,6925277,6926267,6927718,6929107,6930256,6931797,6933312,6934607,6935917,6937346,6938669,6939991,6941369,6942921,6944377,6945735,6947176,6948613,6949912,6951384,6952806,6954251,6955690,6957167,6958406,6959590,6961039,6962251,6963545,6964965,6966238,6967436,6968640,6969773,6971113,6972352,6973519,6975125,6976522,6977941,6979055,6980090,6981430,6982395,6983796,6985227,6986644,6987984,6989447,6990762,6991603,6992079,6993300,6994401,6995604,6996990,6998316,6999713,7001089,7002443,7003603,7004929,7006268,7007614,7008918,7010296,7011488,7012836,7014069,7015284,7016555,7017968,7019370,7020705,7021953,7023237,7024468,7025854,7027287,7028557,7029904,7031251,7032541,7033837,7035052,7036322,7037583,7038951,7040356,7041644,7043103,7044298,7045672,7047038,7048164,7049419,7050634,7051960,7053232,7054455,7055799,7057072,7058502,7059586,7060922,7062246,7063680,7065086,7066565,7067785,7069093,7070390,7071697,7073108,7074475,7075764,7077109,7078479,7079929,7081329,7082116,7083202,7084441,7086489,7088546,7090594,7092651,7094708,7096756,7098804,7100852,7102689,7103790,7104856,7105970,7107036,7108426,7109486,7110891,7112141,7113442,7114566,7115806,7116797,7118119,7119268,7120574,7121717,7123046,7124308,7125617,7127026,7128369,7129744,7131049,7132344,7133626,7134972,7136289,7137611,7138990,7140043,7141473,7142834,7144247,7145639,7146976,7148270,7149667,7150965,7152315,7153680,7154846,7156227,7157485,7158872,7160107,7161423,7162650,7163995,7165381,7166729,7168115,7169403,7170695,7172074,7173315,7174627,7175938,7177302,7178673,7179874,7181141,7182518,7183903,7185225,7186599,7187984,7189327,7190676,7191660,7192173,7192198,7192705,7193519,7194197,7194954,7195560,7196312,7197411,7198191,7198903,7199385,7199955,7200382,7201015,7201645,7202181,7202645,7203234,7203888,7205034,7205908,7206453,7206894,7207404,7208036,7208549,7209015,7209605,7210362,7211483,7212604,7213712,7214752,7215824,7216943,7218204,7219420,7220525,7221847,7223073,7224129,7225335,7227065,7228109,7229351,7230265,7231756,7233085,7234381,7235444,7236558,7238346,7240394,7242446,7244496,7246544,7248594,7250642,7252694,7254742,7256347,7257945,7259569,7261159,7262480,7263597,7264742,7265931,7267075,7268165,7269502,7270397,7271744,7272825,7274185,7275242,7276373,7277674,7278897,7279967,7281253,7282563,7283671,7284885,7285982,7287243,7288670,7289756,7291074,7292230,7293481,7294849,7296162,7297562,7298899,7300277,7301481,7302744,7304050,7305338,7305958,7307318,7308533,7310038,7311323,7312271,7313426,7314315,7315616,7316929,7318217,7319553,7320973,7322284,7323531,7324860,7326031,7327289,7328442,7329738,7331127,7332317,7333381,7334590,7335896,7337142,7338440,7339688,7341128,7342472,7343705,7345116,7346382,7347863,7349333,7350424,7351767,7352998,7354104,7355423,7356563,7357911,7359205,7360501,7361679,7362753,7363941,7365166,7366316,7367436,7368818,7370206,7371750,7373197,7374155,7375222,7376269,7377651,7378649,7379664,7380634,7381432,7382240,7383601,7384591,7385180,7385699,7386173,7386647,7387308,7388777,7389444,7390139,7390800,7391444,7392514,7393777,7394395,7395168,7396172,7396708,7397233,7397588,7398038,7398441,7398900,7399295,7399693,7400076,7400763,7401516,7402070,7402695,7403155,7403613,7404306,7405196,7405915,7406559,7407093,7407634,7408228,7408802,7409119,7409818,7410429,7411058,7411840,7412594,7413302,7414055,7414735,7415378,7416167,7416733,7417551,7418532,7419204],sizes:[1352,1493,1400,1134,1457,1189,1497,1413,1396,1331,1466,1505,919,1161,1194,1267,1456,1343,1210,1324,1323,1244,1378,1158,1156,1050,1088,1334,1311,1208,1065,1094,1143,1071,1169,1049,1078,871,829,694,1031,1193,604,490,599,893,845,1011,724,606,398,509,997,909,902,815,697,826,470,641,584,569,568,617,562,600,588,590,587,609,619,467,471,523,645,510,499,522,451,910,748,781,744,925,989,740,688,942,960,1078,1030,1035,776,867,909,1271,923,843,1186,1015,845,597,821,794,999,916,729,877,1133,739,950,463,1102,1144,842,1176,940,871,915,560,875,896,844,991,695,932,1083,753,813,732,988,1486,1281,1351,1134,946,1208,1307,884,1064,842,1305,1258,1231,1117,1156,1263,916,1228,1133,1177,1143,1141,1215,1344,1368,1289,1374,1380,1372,1455,1457,1218,1329,1192,1397,1351,1362,1259,1410,1399,1288,1375,1175,1292,1252,1375,1391,1397,1343,1346,1281,1310,1314,1250,1323,1275,1290,1273,1327,1448,1473,1373,1410,1336,1389,1415,1388,1293,1400,1295,1269,1309,1281,1383,1017,1468,1333,1445,1216,1419,1447,1327,1552,1189,1302,1195,1081,771,805,1010,1361,1077,1315,973,1111,1299,969,1255,1223,1341,1141,1078,1436,1147,1304,1046,796,1277,1236,1286,1353,1122,1390,1344,1393,1307,1478,1325,1407,1334,1446,1409,1334,1296,1452,964,791,1315,1348,1165,1062,1179,1327,1196,1277,1411,957,789,1378,1330,920,1147,1168,1332,1364,1114,1146,1140,1325,1235,1259,1373,1323,988,1475,1286,1324,1403,1208,1353,1294,1290,1346,1346,940,1031,1365,1339,1356,1314,1436,1292,1381,1163,1214,1108,1331,1194,1278,1063,1141,1075,1395,1226,1166,473,719,353,636,1417,1300,655,987,1170,1116,1131,1246,1130,703,624,1053,1206,1128,1075,1176,1189,1164,1032,1095,1129,1477,1243,1199,507,1111,810,708,818,763,1010,1261,1048,1286,1453,1312,1058,1207,1166,1156,1216,1194,1345,1122,1320,1172,1068,1045,966,1380,1201,1157,1218,1173,1248,1404,1418,1354,1501,1057,840,1266,1172,1380,1313,1242,1204,1166,1187,887,1186,1305,922,1116,1031,983,1202,576,815,515,597,513,736,646,810,1301,1078,1503,1294,1427,1275,1251,1107,964,1468,1387,1241,1229,1155,1372,1371,1474,1305,1339,1442,1321,1317,1023,1349,1326,1205,1149,1120,1298,1370,1425,1273,1396,1377,1283,1325,1405,1401,1442,1127,1348,1218,1242,1334,1394,1357,1236,999,1424,1333,1352,1333,1441,1205,1375,1374,1259,1206,1389,1325,1124,1251,1251,1299,1214,1459,1290,1324,1221,1220,1232,1046,779,1325,1214,1202,1326,1257,1208,706,1328,1123,1064,975,1326,1238,1149,1150,1167,1128,1306,1285,1318,1216,1201,1280,1092,1312,1111,1381,1359,1270,1249,1213,1230,1291,1313,1232,1262,1387,1418,1337,1179,1324,1037,1234,1226,1209,1240,1350,1292,1092,1209,1006,1267,1295,1218,970,1318,1286,1362,1319,1290,1297,1327,1113,1398,1024,1289,1252,1196,1182,1265,1382,1314,1260,1078,1197,1073,1211,1461,1263,1263,1069,1201,1332,1331,1373,1374,1295,1292,1273,1226,1409,1215,1326,1408,1040,1180,1374,1235,1239,1164,1311,1289,1386,1424,1215,1336,896,871,1363,1293,1127,1180,1177,1274,1191,876,914,995,1284,992,1244,1154,1416,1253,1056,1154,1050,1449,1201,1510,1285,1299,1277,1557,844,1269,1390,906,987,1467,1554,1361,1467,850,323,705,1266,1639,1666,1556,1579,1403,1161,1391,1435,1442,1381,1489,1317,857,437,1254,1200,1379,1338,1191,1381,1239,1383,1323,992,876,818,1445,1337,1154,1028,1411,1424,1378,1204,1170,818,1246,1150,838,730,1129,1251,1036,1066,1012,834,965,981,1302,1063,1028,1401,1388,1534,1433,1406,1911,1995,1229,1062,1062,1061,1060,1059,1063,1073,1066,1143,1050,1049,1161,1065,1005,1252,1072,1059,1060,1227,1097,1072,1058,1058,1047,1074,1065,1120,1120,1208,1188,943,1102,1053,1140,1174,1481,1553,793,845,832,794,1249,1556,1495,849,917,700,693,325,384,309,328,321,353,587,753,512,739,736,890,1123,1296,1594,1261,1343,1030,1374,1279,932,1365,1279,982,1249,1274,1087,958,658,1348,488,1228,1469,552,1306,1307,521,1134,1477,879,1188,1007,294,1194,1554,1600,1448,1492,1604,1433,1571,1507,1545,1398,1539,1475,1490,1126,391,901,1549,929,1240,1251,1346,1540,1411,1518,1515,1533,1434,1343,1356,1066,1585,1512,1277,1358,1324,1483,1337,1409,1458,1469,1445,1293,1421,1619,1283,1047,747,1613,1543,1497,1468,1428,1434,1561,1455,1349,1475,1400,1307,1144,1499,1439,1222,1482,1199,1340,1424,1496,1437,1266,1461,954,1622,1301,1294,1446,1603,1326,1327,1574,1487,1281,1455,1523,1486,1363,1531,1437,1428,1384,1680,1548,1035,1163,1427,1197,1278,1519,1589,1437,1524,1400,1233,1645,1503,1308,1457,1584,1270,1289,1461,1547,1124,1406,1034,875,1062,1045,1370,877,984,1238,1062,1176,1209,1138,1185,1196,1096,1170,1151,1163,1089,967,972,846,1099,1494,1161,1227,1465,1001,1430,1509,1370,1029,1298,1467,1625,1656,1593,1288,1387,1502,1389,1583,1630,1426,1461,1431,1396,1602,1577,1555,1459,997,821,618,904,1057,559,620,779,615,925,966,1236,787,810,967,882,1e3,967,963,951,962,962,961,975,949,963,659,624,614,776,675,641,679,756,706,755,784,704,703,778,737,636,723,704,652,682,711,828,649,739,745,797,791,694,543,482,606,613,490,574,523,488,701,688,640,472,461,549,510,474,620,723,698,592,587,706,834,581,562,554,886,1136,1277,1280,874,1117,1286,854,1120,1435,1507,1582,1560,1669,1569,1464,1510,1594,1409,1364,1398,1570,1529,949,1310,1361,1599,972,1486,1330,1462,1613,1542,1363,1426,1364,1453,1308,713,828,1050,1097,1221,1127,1092,1212,1070,883,932,881,818,761,1232,1164,940,1588,882,1207,1360,1517,1400,1305,1439,1427,1462,1413,1329,1160,1116,1193,1299,1217,1200,1030,1212,1226,1238,1041,1146,1735,1524,1421,1458,1472,1386,1183,1303,1540,1433,1372,1128,1506,1516,1511,1344,1499,1471,1377,1499,1398,1554,1246,1250,592,365,716,1038,1225,680,1101,1217,1445,1376,1467,1384,1181,1467,1315,1409,1291,1268,465,491,553,720,1156,1368,1150,1471,1144,1157,1412,829,1226,1205,1145,1354,298,1105,1021,1447,1557,1199,1188,1300,1319,1355,1386,1476,1462,1464,1416,1270,1401,1206,1253,1134,1365,1135,1577,920,934,727,1007,1479,826,1475,1095,981,1431,1358,1688,1664,1701,1664,1695,1614,1655,1604,1642,1601,1633,1615,1636,1616,1638,1643,1563,1531,1494,1504,1665,1597,1623,1585,1457,1424,1484,1399,1461,1403,1548,1289,1470,1519,1462,1391,1301,1431,1283,1118,1327,1564,1569,1488,988,798,906,1330,1193,1134,1058,1038,1435,1357,1444,1309,1123,1185,1238,1138,1307,1191,1187,1427,1604,1604,1578,1610,1597,1529,1559,1568,1536,1549,1527,1558,1551,1558,1535,1568,1524,1513,1459,1385,1478,1526,1449,1500,1334,1501,1376,1350,1394,1232,1430,971,1344,1200,1426,923,1376,1158,1095,651,714,1020,1410,1171,1293,1349,1027,1500,1297,927,1407,887,951,777,933,940,1037,978,915,942,954,1085,864,989,908,856,922,830,907,928,954,1039,913,877,961,1140,838,1030,1036,942,888,852,896,1029,1029,932,1016,842,873,1029,943,1118,852,975,1029,914,903,900,1030,820,917,930,1051,942,896,957,991,1019,900,926,938,855,974,884,1088,895,995,923,916,949,1077,905,976,952,1025,950,885,928,939,965,943,951,950,873,970,839,989,1008,975,943,958,954,1035,1121,1169,1222,1090,941,1088,685,552,793,1173,970,495,830,795,1082,488,836,1006,924,1391,947,685,832,1032,832,857,891,776,930,1190,799,1075,1009,1315,1237,817,997,1190,1333,1244,1290,1190,1040,1126,1145,1064,1504,1518,1386,1518,1498,1380,1522,1451,1545,1435,1345,1596,1420,1436,1497,1627,1556,1425,1300,1514,1392,615,514,770,792,437,762,768,924,765,856,622,714,1014,803,592,1019,664,655,1087,767,696,801,725,975,624,705,983,719,932,1143,703,782,1010,690,712,878,792,1102,645,1207,596,1183,917,1228,931,1333,868,1171,1469,1517,1431,1478,1026,986,1539,1265,1564,1528,1657,1328,1524,1625,1549,1314,1277,374,1043,1403,1431,1395,1347,1451,1302,875,461,1319,1007,1122,1474,1234,1303,1529,1495,1523,1336,1423,1410,1084,1133,1276,1406,1441,753,25,41,25,48,25,56,25,77,25,113,25,187,25,338,25,631,25,407,832,706,1106,1327,1317,1378,1456,1639,1491,1642,1515,1612,1382,1244,1667,1589,1565,1603,1575,1476,1496,1640,1423,1331,1431,1153,1352,1331,1129,1449,1243,1403,1259,1207,1500,955,1453,1476,1420,1471,1470,1406,1503,1341,1333,1210,1388,1378,1449,1434,1375,1380,1363,1506,1453,1162,1186,1573,1589,1583,1533,1372,1184,1178,1289,1351,1481,1422,1377,1407,1221,1095,1325,1384,1423,1253,1164,1448,1486,1283,1426,948,1557,1500,1288,1441,1156,1515,1548,1216,1400,835,1321,1168,975,1462,1525,1060,1552,1425,1364,25,131,1544,1456,1340,1372,1399,1024,1020,1607,1553,1145,671,1207,1188,869,498,1732,651,1299,1425,962,896,943,189,124,103,823,1067,1035,487,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,353,1463,1160,1335,1056,1063,800,830,1256,1223,1206,1233,917,996,1155,696,670,1245,1422,883,1389,1255,1099,1415,1205,1411,1486,1220,405,1200,503,843,1160,1188,856,695,719,789,899,1267,1085,625,978,1152,767,1340,1090,890,747,790,1303,1220,975,1235,1133,928,1232,1071,934,908,995,1006,1341,1237,913,1064,1079,1179,1074,943,965,741,1115,876,1058,986,1047,1057,873,1118,710,868,750,789,776,788,801,714,761,843,652,887,1016,1203,1025,1332,855,785,851,823,1219,1079,972,905,588,908,486,516,953,702,807,1309,1168,1176,1111,1209,1258,996,974,1080,1227,1309,1438,1275,1225,791,940,1017,1187,1189,877,648,829,824,1003,1088,1252,704,839,714,616,883,1017,1304,1313,1237,996,1101,995,599,859,1125,1374,1178,1380,1353,1070,1148,1349,1218,1315,1291,1267,1006,1185,1331,1005,1120,678,936,860,1030,1060,830,970,633,995,972,940,1063,820,919,1137,1256,1013,1046,1472,588,584,600,635,624,1099,1198,990,1037,1020,1469,1767,1319,1337,1408,1260,1348,1295,1276,1094,548,535,1880,1519,1103,1566,1246,1355,1292,1161,1340,1023,1220,1124,937,1198,1119,1161,1226,1460,1109,1196,1282,813,1120,1181,1335,1289,1214,1248,1397,1432,1335,1343,1426,1028,1429,1308,1311,1396,1425,1331,1323,1268,1295,1278,1362,1391,1356,1253,1037,1086,1217,1024,1270,1033,1316,1200,955,1164,941,988,969,1184,981,995,1181,1008,996,1088,1294,1109,1188,1189,1156,1141,1062,1019,1090,1063,1026,981,1182,1296,1380,1235,1102,1082,1275,1124,1432,1390,1308,1309,1208,1358,1153,1313,1207,1142,1e3,1061,1264,1462,838,1554,1357,1180,1175,1120,1145,836,752,900,982,1113,1058,1154,1054,1066,1086,1112,975,783,1148,1089,930,1143,835,1357,1473,1134,1167,1129,874,1102,925,930,1193,1216,1088,1135,1040,961,1343,1197,1015,1370,1054,959,1090,1007,927,714,831,1338,1429,1151,1267,1065,1302,1260,612,541,769,818,824,524,1089,596,751,1360,1314,1290,1214,1067,1409,1253,1161,1292,896,918,718,678,818,669,1203,1068,1295,1349,957,1227,1015,1105,1178,1082,1140,1141,1116,1017,1033,1046,1121,1122,1237,1091,937,1170,1068,965,916,940,959,888,1235,1201,1178,939,874,1001,1014,1181,1112,964,1206,1298,1241,994,1081,1128,1160,1023,1024,954,874,1068,1114,883,1218,1224,1037,1176,1070,991,1006,1144,990,964,911,894,992,1054,941,1138,854,1095,1143,1209,1140,1194,990,1040,976,1091,921,1131,617,788,1014,1095,933,1122,1137,853,701,1118,1163,947,845,654,860,971,1028,994,1125,1151,1340,1123,972,1263,1137,1105,1004,1258,1157,924,1289,1074,1241,1068,775,1190,1141,1152,1e3,1001,1112,1120,931,949,1193,1215,1373,1041,1104,1251,1138,1039,1121,1171,1303,1127,1340,1037,1100,1147,1071,1145,1109,1144,1123,1193,1221,1310,1137,796,1254,1339,1174,1275,978,1178,1211,1077,1108,1129,1023,1170,1436,1585,1032,1069,797,869,1465,1388,1536,1275,1305,1299,1355,1479,1323,1243,1136,1402,1255,1285,1261,1096,895,874,1021,816,1380,780,1052,998,1110,1115,1046,657,786,987,1065,1e3,1091,885,941,1060,968,1202,1166,702,922,911,815,807,1070,1186,999,991,1177,930,1184,804,500,1152,1175,1152,1214,908,737,943,843,728,932,854,1115,926,1181,1047,1057,933,1070,1085,1334,1191,1223,1210,960,1012,1092,1284,1250,1019,927,1200,804,689,1218,1121,1027,1076,968,936,710,853,1050,1188,828,908,1159,987,638,1079,850,794,748,821,1081,877,1315,1114,1150,829,739,997,898,946,702,976,1123,1160,862,1450,1225,995,924,1068,1310,828,483,693,958,749,1067,1130,1081,938,1138,986,1119,1172,1089,1505,1065,1213,750,696,1136,1052,1051,1020,967,964,1144,1083,1015,1076,1045,1019,1401,1070,1284,1316,1044,1297,1233,1132,944,860,1010,1084,872,1021,980,937,949,1001,1121,929,951,971,901,1082,1060,1084,834,1007,842,967,727,1138,1202,1117,798,969,1159,1191,1047,1049,1072,856,1038,1303,1075,1090,999,1032,1165,819,832,1029,1372,1180,1407,1415,1461,1340,1417,1258,1518,1380,1349,1403,1370,1103,1471,1389,1401,1111,1439,1361,1355,1234,1311,1333,1360,1349,1085,1454,1388,1570,1142,1342,1438,1709,1484,1371,1310,1391,1424,1494,1320,1401,1223,1592,1373,1428,1139,907,1168,1142,1189,1480,1202,1251,1457,1322,1305,1196,1324,1278,1257,1123,1203,1167,1348,1216,1118,1421,1397,1184,1163,1232,1211,1165,1188,1085,1236,1229,1265,864,1307,1397,1261,1185,855,1254,1225,1297,887,1266,1030,1112,1114,1152,1321,958,1523,1274,1414,1330,1413,981,1332,1300,1276,1354,1261,1359,1291,929,1439,1353,1365,1331,1307,1445,1390,1208,1413,1294,1395,1414,1317,1452,1150,1324,1483,1351,1392,1469,1466,1125,1171,1147,1183,1131,1160,1323,1181,1161,1003,1111,1126,1211,1169,1138,1423,1255,1199,1348,1332,1278,1330,1272,1363,1304,1389,1333,1286,1280,1175,1329,1315,774,1316,1333,1258,1324,1354,1401,1307,1166,1224,1426,1209,1286,1382,1228,1221,1215,1160,1065,1253,1289,1264,1003,1309,1359,1325,1525,1313,1541,1436,1452,1533,1355,1660,1584,1439,1270,1436,1331,1357,1352,1244,1384,1353,1322,1416,1232,1302,1056,1406,1343,1346,1237,1374,1147,1218,1235,1354,1182,1284,1410,1413,1325,1413,1304,1150,1409,1316,1460,1284,1396,1433,1295,1408,1234,1363,1430,1272,1230,1163,1323,1290,1371,1243,1217,1243,1181,1273,1119,1242,1328,1402,1216,1160,1036,1007,969,984,1070,1062,1196,1088,1083,1251,1074,1346,1436,1380,1426,1509,1326,1334,1448,1340,1444,1294,1389,1373,1238,1338,1366,1408,1358,1149,879,719,1198,837,464,767,1390,1188,1225,1254,1140,1394,1276,1140,1154,1371,1275,1345,1228,1380,1057,1344,1281,1322,1263,1292,1290,1224,1294,1353,1374,1301,1290,1362,1348,1375,954,1046,1328,941,1409,1308,1226,1202,1422,1430,975,1393,1326,1390,1228,1370,1281,1323,1346,1371,1290,1250,1366,1269,1222,1105,991,1105,1200,1276,1220,1455,1419,1283,1453,1078,1179,1381,1295,1375,1333,1174,1123,1222,1239,1147,1063,1294,972,1168,1332,1135,1330,1348,1229,1344,1196,1300,1351,1410,1332,1341,1461,1437,1270,1101,1327,1391,1178,1415,1250,1230,1132,1317,1380,1079,1026,1420,1138,982,1359,830,1284,1039,922,1494,1445,1577,1476,1383,1249,1399,1050,1084,1271,1434,1207,1292,1259,1206,1171,1366,1157,1145,1281,1145,1100,1205,1127,1139,1224,980,1227,1159,1223,1183,1284,1347,1318,1272,1284,1306,1284,1259,1170,1216,1314,1371,1246,1283,1166,1329,888,1212,881,1137,575,1009,1163,1149,1067,1112,1176,1049,975,1306,1434,1232,1161,1177,1371,1509,1213,1408,1333,1269,1340,1405,1523,1182,1213,1424,1364,1356,1478,1286,1365,1247,1395,1249,996,1077,1310,1369,1395,1587,1388,1512,1331,1480,1430,1443,1362,1360,1205,1209,916,1347,1349,1427,1137,1201,1526,1553,1545,1498,1357,1263,1305,1375,1359,1456,1465,1079,1046,1404,1164,1482,1363,1412,1426,1578,1299,1536,1400,1527,1233,1343,1435,1420,1208,1255,1194,1143,1583,1549,1245,1186,1345,1121,1230,1457,1431,1432,1524,1364,1538,1538,1428,1183,1471,1298,1457,1576,1657,1298,1396,1544,1210,940,1260,1326,1380,1194,1393,1281,1213,1167,1368,1268,1543,1539,1570,1611,1185,1566,1528,1496,1190,1445,1570,1422,1402,1640,1543,1523,1422,1499,1447,1627,1493,1483,1113,1207,815,1503,1318,1642,1484,1540,1301,1513,1566,1397,1454,1543,1393,760,1264,1400,1407,1486,1562,1304,1431,1278,1543,1435,1098,1151,1288,1347,1298,1291,1532,1470,1438,1449,1538,1521,1431,1297,1391,1375,1354,1425,1186,1087,1289,1524,1377,1306,1346,1195,1287,1476,1439,1481,1525,1332,1553,1542,1452,1114,1453,1284,1451,1593,1616,1318,1335,1547,1129,999,1343,1299,1345,1269,1388,1271,1201,1138,1425,1305,1507,1542,1580,1551,1337,1595,1600,1372,1244,1478,1486,1334,1548,1472,1625,1487,1464,1574,1432,1707,1540,1401,1211,1064,1387,1347,1455,1556,1592,1447,1420,1578,1492,1447,1547,1423,876,1131,1465,1418,1509,1550,1515,1353,1238,1360,1299,1534,1263,1386,707,1491,1247,1091,1169,1553,1553,1402,1355,1494,1491,1364,1458,1388,1500,1134,1226,1315,1356,1357,1553,1472,1484,1281,1118,1198,1298,1276,1552,1429,1386,1192,1336,1207,1112,1007,1594,1425,1353,1507,1351,1560,1502,1444,1513,1456,1551,1241,1454,1595,1444,1603,948,976,1472,1184,1238,1289,1239,1462,1537,1500,1622,1412,1424,1397,1513,1459,1585,1437,1440,1376,1454,1484,1506,1460,1400,1639,1508,1502,1352,1498,1492,1463,1480,1393,1308,1376,1333,1442,1399,1205,1360,1271,1296,1111,1161,1011,1513,1377,1214,1479,1238,1336,1435,1484,1425,1480,1472,1314,1321,1429,1392,1493,1362,1463,1467,1315,1272,1285,1326,1497,1539,1302,1402,1478,1489,1481,1284,1564,1496,1530,1341,1423,1368,1330,1530,1284,1414,709,1508,1320,1050,1206,1559,1537,1526,1402,1544,1337,1511,1384,1507,1346,1107,1375,1180,1494,1549,1454,1559,1321,1275,1393,1344,1070,1396,1484,1608,1258,1341,1350,1099,1001,1501,1505,1420,1579,1430,1507,1516,1336,1527,1474,1532,1253,1429,1605,1490,1590,977,1006,1432,1160,1248,1269,1283,1465,1504,1512,1607,1434,1468,1468,1562,1509,1553,1395,1499,1397,1549,1434,1433,1449,1478,1559,1574,1495,1446,1510,1532,1448,1519,1282,1415,1452,1489,1526,1545,1261,1381,1423,1248,1143,973,1532,1444,1163,1578,1131,1391,1432,1530,1442,1514,1491,1382,1347,1480,1436,1545,1485,1433,1533,1303,1175,1377,1325,1587,1495,1391,1501,1520,1574,1387,1626,1548,1420,1054,534,956,1383,1530,1356,1158,1380,1402,1492,1147,1492,1289,1426,1343,995,1453,991,1329,1381,1375,1186,1228,1069,1495,997,1025,1205,1538,1265,1107,1458,1164,1405,1442,1403,1126,1224,1367,1179,1400,1207,1025,978,1315,1204,1424,1468,1422,1170,1267,1411,1218,1441,1054,1236,1419,1380,1217,1272,1262,1492,1412,1201,1311,1345,1297,1401,1022,1261,962,1521,1195,1187,1524,1301,1136,1536,857,937,1089,1376,1267,1171,1275,1472,1430,1507,1541,1444,1372,1427,290,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,1165,1320,1547,1533,1491,1340,1303,1261,1254,1148,1445,1198,1473,1318,1370,1436,1392,1088,1117,1212,1114,1075,1412,1115,1312,1348,1229,1399,1615,964,1400,1253,1159,1487,983,1516,846,1530,1009,1341,1446,1217,1371,1392,1213,1334,1246,1274,1351,1230,1483,1423,1094,1023,1473,1213,1475,1357,1407,1431,1594,1308,1536,1400,1520,1232,1363,1448,1409,1215,1258,1154,1118,1542,1551,1258,1185,1313,1166,1234,1378,1462,1379,1558,1401,1488,1492,1421,1358,1399,1269,1395,1611,1685,1070,1336,1458,1280,842,1239,1389,1372,1149,1277,1317,1336,1192,1357,1309,1534,1473,1482,1641,1325,1534,1604,1475,1316,1305,1414,1375,1307,1549,1463,1583,1379,1505,1539,1457,1650,1539,1394,1229,1066,1414,1357,1468,1537,1569,1402,1383,1551,1464,1488,1558,1412,1022,929,1426,1323,1500,1438,1347,1442,1481,1474,1480,1284,1092,1387,1216,1536,1263,1207,1511,1555,1319,1552,1477,1520,1324,1360,1454,1467,1299,1170,1204,1170,1547,1534,1280,1141,1368,1342,1206,1513,1268,1590,1539,1469,1536,1432,1590,1232,1418,1284,1560,1609,1279,1340,977,1592,964,916,1524,1179,1089,1154,1333,1397,1243,1223,1499,1544,1483,1498,1594,1409,1512,1592,1597,1335,1223,1454,1418,1304,1563,1470,1590,1449,1508,1577,1509,1637,1530,1332,1230,984,1538,1398,1572,1563,1565,1387,1503,1572,1452,1495,1561,1450,774,1288,1393,1404,1495,1556,1458,1371,1256,1296,1334,1512,1221,1188,886,1447,1256,1026,1201,1568,1482,1498,1347,1519,1403,1455,1421,1454,1451,1118,1262,1314,1490,1420,1504,1479,1400,1320,1170,1179,1194,1232,1556,1440,1347,1175,1340,1143,1149,994,1578,1413,1361,1524,1336,1571,1475,1456,1517,1428,1551,1235,1476,1589,1442,1604,927,971,1477,1182,1256,1281,1236,1455,1544,1498,1596,1435,1409,1408,1522,1455,1608,1443,1441,1355,1364,1499,1466,1405,1410,1650,1506,1454,1370,1482,1498,1443,1497,1442,1272,1398,1332,1453,1353,1277,1323,1213,1343,1160,1168,991,1542,1458,1123,1550,1104,1365,1413,1535,1363,1499,1423,1357,1362,1400,1404,1466,1397,1482,1475,1356,1241,1375,1268,1420,1594,1401,1399,1516,1523,1474,1209,1617,1552,1584,1314,1479,1241,1365,1451,1487,1440,742,1418,1340,1255,1179,1607,1540,1470,1432,1518,1495,1411,1487,1513,1467,1120,1230,1350,1514,1455,1515,1517,1421,1285,1191,1194,1148,1354,1529,1548,1385,1260,1378,1149,1061,1240,1539,1414,1561,1530,1347,1565,1396,1516,1522,1431,1411,1055,1592,1634,1387,1296,1130,1285,1212,1219,1382,1270,1391,1452,1454,1494,1599,1304,1460,1511,1517,1707,1504,1294,1422,1246,1594,1458,1429,1347,1530,1573,1521,1380,1497,1415,1544,1476,1471,1316,1470,1381,1542,1531,1250,1415,1448,1447,1029,1156,1115,1508,1414,1377,1241,1523,1423,1532,1479,1542,1387,1520,1319,1411,1477,1488,1512,1512,1509,1401,1255,1404,1279,1426,1586,1408,1392,1553,1537,1471,1232,1617,1555,1344,649,539,1203,1525,1402,1277,1299,1284,1508,1390,1269,1220,1464,1356,1439,1399,1287,1247,1475,1202,1199,1538,1313,1130,1496,877,1007,1128,1315,1286,1280,1310,1234,1451,1383,1394,1187,1160,1260,1338,1486,914,1191,1157,1439,1251,1467,1442,1424,1230,1242,1191,1404,1451,897,1448,1403,1303,1148,1386,1206,1372,1214,1358,1244,1473,1225,1427,1491,1225,1331,1451,1266,951,1398,1213,1270,1256,956,1317,1460,1049,1123,1226,1488,1329,1594,1261,1497,1197,1074,1475,1413,1481,1219,1427,1331,400,906,1107,839,1232,1376,1640,824,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,363,1500,1310,1199,1149,1112,853,1170,1220,1075,1150,1333,1207,960,1255,1197,1078,1252,1093,640,950,1236,1061,1312,1197,1341,1170,1086,1060,1140,581,888,918,1098,1190,1252,949,1179,1338,1260,1264,1152,1303,1012,984,1282,1310,1089,1139,1310,1152,1177,1100,1119,1143,1039,1351,958,1276,1235,1147,1198,1103,1251,1245,1258,846,608,686,684,965,1276,1198,1196,1204,1254,1066,1124,1162,1047,1344,1170,1143,1155,1179,1248,1117,1308,1202,1089,1225,1256,1204,1129,1224,1219,935,1218,1245,1124,1158,1280,1181,1106,1171,1339,1114,1159,793,1090,1253,1168,1251,1096,1192,948,1189,1378,1310,1209,1479,1281,1256,1281,1234,1246,1030,1286,1202,1221,1073,906,943,974,618,879,784,1117,1433,1179,911,1034,723,985,790,1255,1416,1124,1182,1134,1027,1258,934,1054,1399,1253,1405,1225,1373,1214,904,1034,1158,1200,1103,1345,1275,1349,1183,1180,1263,1212,1202,949,1370,1310,1037,1138,1104,1187,1062,1077,1121,1103,1337,1231,1346,1159,1068,1064,955,1200,898,767,844,692,958,1028,1197,1148,1406,1250,1087,1107,1101,1118,794,1039,856,844,1056,918,1149,1316,1156,1220,982,1282,1227,1104,1385,1235,1361,1513,1396,1269,1368,1470,1349,1325,1409,1424,1358,1446,1450,1343,1539,1427,1290,1488,1401,1153,1269,1190,1248,1095,1283,1186,1243,1315,1277,1360,1481,1357,1374,1491,1443,1340,1387,1405,1424,1390,1426,1489,1412,1400,1346,1475,1212,1312,1264,1285,1126,1251,1321,1309,1374,1484,1437,1304,1422,1416,1321,1358,1411,1481,1393,1418,1472,1411,1422,1419,1474,1175,1253,1321,921,1109,1408,1389,1452,1304,1368,1381,1323,1407,1304,1269,1404,1396,1371,1428,1455,1371,1467,1386,1328,1205,1429,1196,1091,1181,1301,1233,1011,1233,1229,1374,1396,1463,1401,1285,1380,1428,1336,1334,1414,1438,1386,1409,1484,1424,1356,1473,1429,1237,1272,1323,1425,1197,1352,1344,1447,1265,1232,1160,1218,1419,1230,1267,1220,1189,1045,1285,1291,1346,1420,1450,1337,1365,1421,1380,1268,1406,1392,1436,1394,1429,1407,1505,1337,1443,1470,1264,1339,822,738,1033,1146,983,1089,826,742,1070,1059,1269,1095,1303,1301,1133,890,1200,863,959,887,835,1008,1348,718,837,897,827,801,1197,941,1268,1229,1099,1041,1387,1310,1020,1442,1624,1536,1417,1469,1273,1237,1454,1366,1359,1303,1336,1324,1449,1327,1133,1266,1262,1101,1561,1367,1355,1160,1090,1509,1378,1491,1444,1584,2052,2048,2057,2057,2048,2057,2057,2048,2053,2048,1493,1249,1094,1326,1207,1056,1146,1236,1244,1208,1404,1408,1486,1427,1392,1214,1507,1376,1164,1101,1393,1225,1460,1256,1364,1111,1470,1367,1219,1146,1272,1378,1487,1421,1381,1266,1885,2048,2056,2048,2048,2053,2048,2057,2048,1802,1132,1048,1265,1211,1100,1111,1310,1106,1279,1140,1566,1435,1508,1415,1601,1299,1428,1282,1303,806,1536,1389,1422,1382,1191,1290,1345,1081,1364,1382,1295,1134,1521,1368,1323,1172,1145,1476,1423,1480,1375,1491,1532,2048,2057,2048,2050,2053,2048,2048,2049,2048,1237,1233,1208,947,1312,1127,1081,1352,1321,1116,1613,1613,1411,1431,1332,1322,1443,1511,1240,1261,1553,1351,1398,1091,1139,1517,1464,1413,1458,2033,2057,2056,2050,2057,2054,2048,2050,2056,1616,1039,1361,1179,1131,1084,1033,1400,993,997,1311,1193,1016,1360,1182,1361,1452,1307,1454,1292,1186,1226,1308,1238,1270,1326,1096,996,1131,1024,1383,1267,1292,1303,1318,1323,1254,1331,1191,1348,1319,1441,1179,1320,1234,1345,1365,1264,1408,1299,1323,1509,1439,1335,1446,1473,1451,1539,1392,1245,1229,1090,1388,1266,1350,1245,1506,1442,1575,1380,1502,1469,1470,1391,1374,1409,1485,1425,1436,1486,1487,1369,1535,1605,1559,1440,1262,1520,1359,1502,1243,1477,1200,1315,1482,1357,1393,1076,1207,1489,1394,1409,823,983,1452,2048,2048,2048,2048,2057,2048,2048,2048,2048,1326,743,145,1296,1245,1242,1161,1094,1038,1120,1285,1141,932,1007,1124,1398,1324,926,1361,1249,1026,1222,1224,1125,1302,1404,1392,1509,1421,1188,1269,1340,1488,1513,1484,1556,1375,1535,1537,1351,1469,1174,1274,1282,1466,1381,1389,1490,1447,1346,1388,1354,1284,1391,1338,1450,1441,1222,1282,1513,1336,1394,1070,1185,1506,1454,1516,1183,1429,1247,1124,1407,1159,1187,1913,2048,2055,2048,2057,2057,2048,2052,2048,1639,1115,889,1245,1176,974,657,1274,1318,1186,1274,1347,1302,1322,1039,1034,1012,1005,1018,1019,1100,1225,943,1336,1198,628,766,963,1070,1033,1116,1367,965,894,1192,1446,1513,1360,1467,1379,1426,1495,1464,1526,1590,1465,1359,1318,1516,1431,1414,1223,1444,885,925,1148,1427,1383,1441,1346,1236,1367,1231,1219,1163,1093,1174,961,1060,1370,1342,1245,1198,1398,1287,1194,1184,1368,1210,1324,1333,1339,1414,1343,1371,1361,1379,1383,881,909,989,1396,1280,1323,1328,1520,1446,1351,1398,1490,1487,1101,1485,1067,1269,1541,1442,1199,1377,1308,1375,1254,1186,1215,1351,1562,1282,1467,1323,1433,1330,1496,1304,1345,1344,1225,1285,1172,1403,1343,1189,1418,1326,1519,1260,1396,1568,1430,1392,1549,1415,1430,1259,1370,1366,1200,1441,1161,1294,1361,1397,1303,1624,1544,1503,1481,1396,1501,1528,1375,1453,1467,1448,1190,1575,1606,1558,1422,1320,1379,1551,1492,1217,1371,1217,1351,1337,1164,1178,1389,1464,1185,1341,1487,1444,1420,1404,1432,1106,340,1271,968,1331,1244,1167,1363,1385,1375,1381,1243,1257,1366,1276,1355,1273,1312,1246,1395,1165,1255,1365,1221,1351,1399,1280,1226,1286,1244,1396,1404,1242,1250,1203,1202,1348,1301,1293,1310,1012,1326,1293,1360,1367,1311,1211,1084,1357,1377,1145,1249,1339,1229,1376,1131,1383,1297,1483,1423,1277,1303,1321,1343,1336,1276,1317,1341,1409,1279,1084,829,1070,1056,802,1990,2048,2055,2048,2048,2057,2048,2054,2048,1759,1141,1097,1156,1006,1352,1269,1231,1249,1224,1273,1218,1262,1318,1333,1337,1378,1290,1271,1264,1303,1225,1296,1372,1048,1402,1406,1417,1320,1337,1303,1298,1335,1372,1114,1401,1254,1379,1183,1299,1174,1246,1327,1304,1352,1252,1369,1281,1252,1354,1301,1325,1085,1355,1289,1378,1365,1351,1390,1183,1256,1186,1196,1377,1086,972,1021,1202,116,25,1016,1304,1224,1216,1321,1214,1047,1234,1079,1089,1390,1297,1253,1336,1366,1303,1251,1370,1333,1352,1233,1358,1371,1355,1296,1320,1290,1352,1379,1307,1279,1286,1378,1341,1250,1349,1361,1338,1211,1312,1363,1290,1311,1252,1349,1502,1575,1382,1422,1418,1435,1383,1410,1415,1538,1410,1426,1394,1097,1337,1403,1462,1475,1338,1126,1370,1363,1523,1400,1334,1290,1482,1285,1571,1372,1187,1314,1457,1310,1514,1407,1325,1465,1449,1368,1400,1441,1588,1443,1359,1233,1447,1327,1564,1377,1272,1322,1501,1293,1521,1337,1098,1330,1385,1442,1469,1437,1251,1162,1603,1365,1401,1112,1045,1576,1284,1003,1416,1465,1392,1246,1489,1327,658,671,1225,1091,1223,869,1935,2048,2055,2048,2048,2057,2048,2054,2048,1740,424,25,1166,1275,1233,1218,1360,1276,1291,1042,1014,1014,999,1011,996,1071,1231,956,1424,786,1052,1007,1061,1404,756,759,861,809,791,842,803,789,910,790,1172,1596,1414,1357,1393,1484,1409,1404,1414,1105,1428,1355,1547,1418,1431,1187,864,1073,1405,1441,1421,1318,1386,1191,1348,1368,1188,1162,1315,1270,1293,1428,1340,1315,1265,1250,1322,1392,1336,1331,1418,1363,1528,1306,1282,1342,1159,1155,736,1321,1276,1405,1363,1498,1515,1373,1497,1477,1462,1524,990,1451,1389,1149,1541,1515,1295,1310,1429,1323,1322,1378,1552,1456,1358,1441,1437,1299,1472,1422,1445,1439,1477,1239,1184,1449,1212,1294,1420,1273,1198,1204,1133,1340,1239,1167,1606,1397,1419,1114,1035,1340,965,1401,1431,1417,1340,1463,1315,841,476,1221,1101,1203,1386,1326,1397,1376,1354,1160,1326,1339,1346,1304,1378,1192,1348,1233,1215,1271,1413,1402,1335,1248,1284,1231,1386,1433,1270,1347,1347,1290,1296,1215,1270,1261,1368,1405,1288,1459,1195,1374,1366,1126,1255,1215,1326,1272,1223,1344,1273,1430,1084,1336,1324,1434,1406,1479,1220,1308,1297,1307,1411,1367,1289,1345,1370,1450,1400,787,1086,1239,2048,2057,2048,2057,2057,2048,2048,2048,1837,1101,1066,1114,1066,1390,1060,1405,1250,1301,1124,1240,991,1322,1149,1306,1143,1329,1262,1309,1409,1343,1375,1305,1295,1282,1346,1317,1322,1379,1053,1430,1361,1413,1392,1337,1294,1397,1298,1350,1365,1166,1381,1258,1387,1235,1316,1227,1345,1386,1348,1386,1288,1292,1379,1241,1312,1311,1364,1371,1201,1267,1377,1385,1322,1374,1385,1343,1349,984,513,25,507,814,678,757,606,752,1099,780,712,482,570,427,633,630,536,464,589,654,1146,874,545,441,510,632,513,466,590,757,1121,1121,1108,1040,1072,1119,1261,1216,1105,1322,1226,1056,1206,1730,1044,1242,914,1491,1329,1296,1063,1114,1788,2048,2052,2050,2048,2050,2048,2052,2048,1605,1598,1624,1590,1321,1117,1145,1189,1144,1090,1337,895,1347,1081,1360,1057,1131,1301,1223,1070,1286,1310,1108,1214,1097,1261,1427,1086,1318,1156,1251,1368,1313,1400,1337,1378,1204,1263,1306,1288,620,1360,1215,1505,1285,948,1155,889,1301,1313,1288,1336,1420,1311,1247,1329,1171,1258,1153,1296,1389,1190,1064,1209,1306,1246,1298,1248,1440,1344,1233,1411,1266,1481,1470,1091,1343,1231,1106,1319,1140,1348,1294,1296,1178,1074,1188,1225,1150,1120,1382,1388,1544,1447,958,1067,1047,1382,998,1015,970,798,808,1361,990,589,519,474,474,661,1469,667,695,661,644,1070,1263,618,773,1004,536,525,355,450,403,459,395,398,383,687,753,554,625,460,458,693,890,719,644,534,541,594,574,317,699,611,629,782,754,708,753,680,643,789,566,818,981,672,215],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_numpy.data")}Module["addRunDependency"]("datafile_numpy.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/numpy/setup.py",start:0,end:983,audio:0},{filename:"/lib/python3.9/site-packages/numpy/__init__.py",start:983,end:16882,audio:0},{filename:"/lib/python3.9/site-packages/numpy/dual.py",start:16882,end:19096,audio:0},{filename:"/lib/python3.9/site-packages/numpy/_globals.py",start:19096,end:22045,audio:0},{filename:"/lib/python3.9/site-packages/numpy/_distributor_init.py",start:22045,end:22376,audio:0},{filename:"/lib/python3.9/site-packages/numpy/matlib.py",start:22376,end:32741,audio:0},{filename:"/lib/python3.9/site-packages/numpy/version.py",start:32741,end:33112,audio:0},{filename:"/lib/python3.9/site-packages/numpy/_pytesttester.py",start:33112,end:39414,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ctypeslib.py",start:39414,end:56635,audio:0},{filename:"/lib/python3.9/site-packages/numpy/__config__.py",start:56635,end:59675,audio:0},{filename:"/lib/python3.9/site-packages/numpy/_version.py",start:59675,end:60173,audio:0},{filename:"/lib/python3.9/site-packages/numpy/char.pyi",start:60173,end:61888,audio:0},{filename:"/lib/python3.9/site-packages/numpy/__init__.pyi",start:61888,end:195813,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ctypeslib.pyi",start:195813,end:196264,audio:0},{filename:"/lib/python3.9/site-packages/numpy/py.typed",start:196264,end:196264,audio:0},{filename:"/lib/python3.9/site-packages/numpy/rec.pyi",start:196264,end:197235,audio:0},{filename:"/lib/python3.9/site-packages/numpy/__init__.pxd",start:197235,end:231841,audio:0},{filename:"/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd",start:231841,end:268079,audio:0},{filename:"/lib/python3.9/site-packages/numpy/LICENSE.txt",start:268079,end:270258,audio:0},{filename:"/lib/python3.9/site-packages/numpy/compat/setup.py",start:270258,end:270593,audio:0},{filename:"/lib/python3.9/site-packages/numpy/compat/__init__.py",start:270593,end:271025,audio:0},{filename:"/lib/python3.9/site-packages/numpy/compat/_inspect.py",start:271025,end:278472,audio:0},{filename:"/lib/python3.9/site-packages/numpy/compat/py3k.py",start:278472,end:281957,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py",start:281957,end:290757,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_internal.py",start:290757,end:318130,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/fromnumeric.py",start:318130,end:440907,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_string_helpers.py",start:440907,end:443762,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/getlimits.py",start:443762,end:463536,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_add_newdocs.py",start:463536,end:655446,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/defchararray.py",start:655446,end:725181,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_type_aliases.py",start:725181,end:733020,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/einsumfunc.py",start:733020,end:784465,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/setup.py",start:784465,end:829260,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_asarray.py",start:829260,end:833435,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/__init__.py",start:833435,end:838797,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/records.py",start:838797,end:876253,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/memmap.py",start:876253,end:887941,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/function_base.py",start:887941,end:906960,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/multiarray.py",start:906960,end:962265,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/arrayprint.py",start:962265,end:1023890,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/overrides.py",start:1023890,end:1032024,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/numeric.py",start:1032024,end:1108751,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py",start:1108751,end:1112424,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_dtype.py",start:1112424,end:1122267,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/cversions.py",start:1122267,end:1122614,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/umath.py",start:1122614,end:1124654,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/numerictypes.py",start:1124654,end:1141971,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/shape_base.py",start:1141971,end:1170972,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_exceptions.py",start:1170972,end:1177102,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/umath_tests.py",start:1177102,end:1177491,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_ufunc_config.py",start:1177491,end:1190878,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/setup_common.py",start:1190878,end:1210595,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/machar.py",start:1210595,end:1221410,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_methods.py",start:1221410,end:1232204,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/generate_numpy_api.py",start:1232204,end:1239313,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_multiarray_tests.so",start:1239313,end:1318876,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_multiarray_umath.so",start:1318876,end:3589736,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_umath_tests.so",start:3589736,end:3606664,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_rational_tests.so",start:3606664,end:3638463,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.so",start:3638463,end:3641622,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.so",start:3641622,end:3644190,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_simd.so",start:3644190,end:3648975,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/numeric.pyi",start:3648975,end:3653845,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/numerictypes.pyi",start:3653845,end:3656768,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi",start:3656768,end:3664790,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/__init__.pyi",start:3664790,end:3664916,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/shape_base.pyi",start:3664916,end:3665985,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_asarray.pyi",start:3665985,end:3667925,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_internal.pyi",start:3667925,end:3669298,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi",start:3669298,end:3670548,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi",start:3670548,end:3671068,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi",start:3671068,end:3674773,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/function_base.pyi",start:3674773,end:3676246,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/arrayprint.pyi",start:3676246,end:3680920,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h",start:3680920,end:3682787,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h",start:3682787,end:3684613,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h",start:3684613,end:3690919,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h",start:3690919,end:3703770,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h",start:3703770,end:3710556,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h",start:3710556,end:3714984,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h",start:3714984,end:3715801,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h",start:3715801,end:3731632,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h",start:3731632,end:3732756,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h",start:3732756,end:3736486,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h",start:3736486,end:3774831,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h",start:3774831,end:3777472,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h",start:3777472,end:3779333,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h",start:3779333,end:3783602,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/oldnumeric.h",start:3783602,end:3784310,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h",start:3784310,end:3794998,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h",start:3794998,end:3815795,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h",start:3815795,end:3816362,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h",start:3816362,end:3816526,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h",start:3816526,end:3887032,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h",start:3887032,end:3888910,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/libdivide.h",start:3888910,end:3968937,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt",start:3968937,end:3969955,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h",start:3969955,end:3979726,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h",start:3979726,end:3980115,audio:0},{filename:"/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a",start:3980115,end:4023451,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/exec_command.py",start:4023451,end:4033803,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/core.py",start:4033803,end:4041962,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py",start:4041962,end:4054934,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py",start:4054934,end:4080352,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/setup.py",start:4080352,end:4080986,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/__init__.py",start:4080986,end:4082545,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/conv_template.py",start:4082545,end:4092081,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py",start:4092081,end:4097478,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/from_template.py",start:4097478,end:4105391,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py",start:4105391,end:4106025,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/misc_util.py",start:4106025,end:4192654,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py",start:4192654,end:4194846,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py",start:4194846,end:4195559,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/ccompiler.py",start:4195559,end:4222783,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/system_info.py",start:4222783,end:4332340,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/lib2def.py",start:4332340,end:4335984,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py",start:4335984,end:4340218,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py",start:4340218,end:4362875,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py",start:4362875,end:4365488,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py",start:4365488,end:4367416,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/log.py",start:4367416,end:4369979,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/line_endings.py",start:4369979,end:4372011,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py",start:4372011,end:4468810,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/extension.py",start:4468810,end:4472168,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/__config__.py",start:4472168,end:4475208,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/__init__.pyi",start:4475208,end:4475327,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/sdist.py",start:4475327,end:4476060,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py",start:4476060,end:4480429,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py",start:4480429,end:4482094,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/config.py",start:4482094,end:4502818,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/autodist.py",start:4502818,end:4506536,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/build_src.py",start:4506536,end:4537708,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/__init__.py",start:4537708,end:4538740,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/install.py",start:4538740,end:4541818,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/build_py.py",start:4541818,end:4542962,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/install_data.py",start:4542962,end:4543810,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py",start:4543810,end:4562332,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py",start:4562332,end:4593900,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py",start:4593900,end:4594609,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py",start:4594609,end:4595528,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py",start:4595528,end:4596449,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/build.py",start:4596449,end:4599014,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/develop.py",start:4599014,end:4599589,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py",start:4599589,end:4601114,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py",start:4601114,end:4603654,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py",start:4603654,end:4604715,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py",start:4604715,end:4611261,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py",start:4611261,end:4612019,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py",start:4612019,end:4652120,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py",start:4652120,end:4653787,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py",start:4653787,end:4655114,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py",start:4655114,end:4656828,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py",start:4656828,end:4658181,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py",start:4658181,end:4659753,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py",start:4659753,end:4661330,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py",start:4661330,end:4664410,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py",start:4664410,end:4667978,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py",start:4667978,end:4688229,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py",start:4688229,end:4689559,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py",start:4689559,end:4693462,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py",start:4693462,end:4694795,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py",start:4694795,end:4700294,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py",start:4700294,end:4703833,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c",start:4703833,end:4703910,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c",start:4703910,end:4704607,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c",start:4704607,end:4705563,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c",start:4705563,end:4705797,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c",start:4705797,end:4706047,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c",start:4706047,end:4706796,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c",start:4706796,end:4707501,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c",start:4707501,end:4708137,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c",start:4708137,end:4708615,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c",start:4708615,end:4710210,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c",start:4710210,end:4711078,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c",start:4711078,end:4712088,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c",start:4712088,end:4712468,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c",start:4712468,end:4712719,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c",start:4712719,end:4713405,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c",start:4713405,end:4713706,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c",start:4713706,end:4714410,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c",start:4714410,end:4715459,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c",start:4715459,end:4716407,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c",start:4716407,end:4717411,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c",start:4717411,end:4717904,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c",start:4717904,end:4719036,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c",start:4719036,end:4719711,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c",start:4719711,end:4720083,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c",start:4720083,end:4720925,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c",start:4720925,end:4721614,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c",start:4721614,end:4721630,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c",start:4721630,end:4721893,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c",start:4721893,end:4722652,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c",start:4722652,end:4723469,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c",start:4723469,end:4724161,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c",start:4724161,end:4725106,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c",start:4725106,end:4725885,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c",start:4725885,end:4726214,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c",start:4726214,end:4726718,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c",start:4726718,end:4727473,audio:0},{filename:"/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c",start:4727473,end:4727904,audio:0},{filename:"/lib/python3.9/site-packages/numpy/doc/__init__.py",start:4727904,end:4728412,audio:0},{filename:"/lib/python3.9/site-packages/numpy/doc/constants.py",start:4728412,end:4737591,audio:0},{filename:"/lib/python3.9/site-packages/numpy/doc/ufuncs.py",start:4737591,end:4742951,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/f2py_testing.py",start:4742951,end:4744408,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/setup.py",start:4744408,end:4746868,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/__init__.py",start:4746868,end:4752632,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/crackfortran.py",start:4752632,end:4884619,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/f2py2e.py",start:4884619,end:4908960,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/diagnose.py",start:4908960,end:4914190,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/cb_rules.py",start:4914190,end:4938436,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/cfuncs.py",start:4938436,end:4985187,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py",start:4985187,end:4994998,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py",start:4994998,end:5016842,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/__main__.py",start:5016842,end:5016927,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/func2subr.py",start:5016927,end:5026282,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/use_rules.py",start:5026282,end:5029869,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/__version__.py",start:5029869,end:5029903,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/capi_maps.py",start:5029903,end:5061312,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/common_rules.py",start:5061312,end:5066233,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/rules.py",start:5066233,end:5124995,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/__init__.pyi",start:5124995,end:5125287,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h",start:5125287,end:5129808,audio:0},{filename:"/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c",start:5129808,end:5166416,audio:0},{filename:"/lib/python3.9/site-packages/numpy/fft/_pocketfft.py",start:5166416,end:5219313,audio:0},{filename:"/lib/python3.9/site-packages/numpy/fft/setup.py",start:5219313,end:5220041,audio:0},{filename:"/lib/python3.9/site-packages/numpy/fft/__init__.py",start:5220041,end:5228216,audio:0},{filename:"/lib/python3.9/site-packages/numpy/fft/helper.py",start:5228216,end:5234370,audio:0},{filename:"/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.so",start:5234370,end:5281520,audio:0},{filename:"/lib/python3.9/site-packages/numpy/fft/__init__.pyi",start:5281520,end:5282313,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/_datasource.py",start:5282313,end:5304981,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/arrayterator.py",start:5304981,end:5312044,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/utils.py",start:5312044,end:5345184,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/_version.py",start:5345184,end:5350039,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/scimath.py",start:5350039,end:5364914,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/ufunclike.py",start:5364914,end:5372945,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/setup.py",start:5372945,end:5373350,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/__init__.py",start:5373350,end:5375129,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/nanfunctions.py",start:5375129,end:5434275,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/_iotools.py",start:5434275,end:5465211,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/function_base.py",start:5465211,end:5627220,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/npyio.py",start:5627220,end:5716710,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/polynomial.py",start:5716710,end:5760523,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/user_array.py",start:5760523,end:5768244,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/arraysetops.py",start:5768244,end:5794720,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/twodim_base.py",start:5794720,end:5823661,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/index_tricks.py",start:5823661,end:5854299,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/shape_base.py",start:5854299,end:5892670,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/stride_tricks.py",start:5892670,end:5910514,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/type_check.py",start:5910514,end:5931292,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/histograms.py",start:5931292,end:5971507,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/arraypad.py",start:5971507,end:6002733,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/format.py",start:6002733,end:6034182,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/mixins.py",start:6034182,end:6041234,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/recfunctions.py",start:6041234,end:6097759,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi",start:6097759,end:6099314,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/arraypad.pyi",start:6099314,end:6099410,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi",start:6099410,end:6100721,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/format.pyi",start:6100721,end:6101600,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi",start:6101600,end:6106628,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/mixins.pyi",start:6106628,end:6108781,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/shape_base.pyi",start:6108781,end:6109507,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/_version.pyi",start:6109507,end:6110208,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/utils.pyi",start:6110208,end:6112738,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi",start:6112738,end:6113234,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/type_check.pyi",start:6113234,end:6113695,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/polynomial.pyi",start:6113695,end:6114110,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/scimath.pyi",start:6114110,end:6114322,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/npyio.pyi",start:6114322,end:6116547,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/function_base.pyi",start:6116547,end:6118538,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi",start:6118538,end:6119397,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/__init__.pyi",start:6119397,end:6124714,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/histograms.pyi",start:6124714,end:6124994,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi",start:6124994,end:6126066,audio:0},{filename:"/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi",start:6126066,end:6126576,audio:0},{filename:"/lib/python3.9/site-packages/numpy/linalg/__init__.py",start:6126576,end:6128389,audio:0},{filename:"/lib/python3.9/site-packages/numpy/linalg/linalg.py",start:6128389,end:6217995,audio:0},{filename:"/lib/python3.9/site-packages/numpy/linalg/setup.py",start:6217995,end:6220873,audio:0},{filename:"/lib/python3.9/site-packages/numpy/linalg/lapack_lite.so",start:6220873,end:8124644,audio:0},{filename:"/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.so",start:8124644,end:10141120,audio:0},{filename:"/lib/python3.9/site-packages/numpy/linalg/__init__.pyi",start:10141120,end:10141816,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/core.py",start:10141816,end:10406150,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/timer_comparison.py",start:10406150,end:10421808,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/setup.py",start:10421808,end:10422226,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/testutils.py",start:10422226,end:10432465,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/bench.py",start:10432465,end:10437348,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/__init__.py",start:10437348,end:10438752,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/extras.py",start:10438752,end:10497066,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/mrecords.py",start:10497066,end:10523752,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/__init__.pyi",start:10523752,end:10529750,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/extras.pyi",start:10529750,end:10532348,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/core.pyi",start:10532348,end:10546474,audio:0},{filename:"/lib/python3.9/site-packages/numpy/ma/mrecords.pyi",start:10546474,end:10548344,audio:0},{filename:"/lib/python3.9/site-packages/numpy/matrixlib/setup.py",start:10548344,end:10548770,audio:0},{filename:"/lib/python3.9/site-packages/numpy/matrixlib/__init__.py",start:10548770,end:10548988,audio:0},{filename:"/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py",start:10548988,end:10579655,audio:0},{filename:"/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi",start:10579655,end:10579841,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/hermite.py",start:10579841,end:10631967,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/setup.py",start:10631967,end:10632340,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/__init__.py",start:10632340,end:10639128,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/laguerre.py",start:10639128,end:10689582,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/legendre.py",start:10689582,end:10740747,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/polynomial.py",start:10740747,end:10789326,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py",start:10789326,end:10851706,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/polyutils.py",start:10851706,end:10873813,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py",start:10873813,end:10926063,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/_polybase.py",start:10926063,end:10962461,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi",start:10962461,end:10963645,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi",start:10963645,end:10964868,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi",start:10964868,end:10965509,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi",start:10965509,end:10966647,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi",start:10966647,end:10966899,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi",start:10966899,end:10968292,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi",start:10968292,end:10969476,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi",start:10969476,end:10971729,audio:0},{filename:"/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi",start:10971729,end:10972973,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/setup.py",start:10972973,end:10979148,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/__init__.py",start:10979148,end:10986654,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_pickle.py",start:10986654,end:10988956,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_mt19937.so",start:10988956,end:11088942,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_philox.so",start:11088942,end:11181361,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_pcg64.so",start:11181361,end:11291272,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_sfc64.so",start:11291272,end:11365032,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_common.so",start:11365032,end:11601763,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/bit_generator.so",start:11601763,end:11779240,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_generator.so",start:11779240,end:12466718,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_bounded_integers.so",start:12466718,end:12755274,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/mtrand.so",start:12755274,end:13358264,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/__init__.pxd",start:13358264,end:13358695,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_common.pxd",start:13358695,end:13363440,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/c_distributions.pxd",start:13363440,end:13369473,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_philox.pyi",start:13369473,end:13370589,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd",start:13370589,end:13372258,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_generator.pyi",start:13372258,end:13394466,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/bit_generator.pxd",start:13394466,end:13395473,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/mtrand.pyi",start:13395473,end:13415590,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_pcg64.pyi",start:13415590,end:13416812,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/__init__.pyi",start:13416812,end:13418807,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/bit_generator.pyi",start:13418807,end:13422400,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_mt19937.pyi",start:13422400,end:13423250,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_sfc64.pyi",start:13423250,end:13424085,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py",start:13424085,end:13424965,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py",start:13424965,end:13426794,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx",start:13426794,end:13429087,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/cython/setup.py",start:13429087,end:13430480,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx",start:13430480,end:13434350,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py",start:13434350,end:13436384,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py",start:13436384,end:13438341,audio:0},{filename:"/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a",start:13438341,end:13486197,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/utils.py",start:13486197,end:13487429,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/setup.py",start:13487429,end:13488094,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/__init__.py",start:13488094,end:13488660,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py",start:13488660,end:13494828,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/decorators.py",start:13494828,end:13494876,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/__init__.pyi",start:13494876,end:13497934,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/_private/utils.py",start:13497934,end:13583187,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/_private/__init__.py",start:13583187,end:13583187,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/_private/nosetester.py",start:13583187,end:13602622,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/_private/noseclasses.py",start:13602622,end:13617138,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/_private/decorators.py",start:13617138,end:13628539,audio:0},{filename:"/lib/python3.9/site-packages/numpy/testing/_private/parameterized.py",start:13628539,end:13644700,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py",start:13644700,end:13649108,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_generic_alias.py",start:13649108,end:13655442,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_array_like.py",start:13655442,end:13658909,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_add_docstring.py",start:13658909,end:13662724,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/setup.py",start:13662724,end:13663133,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/__init__.py",start:13663133,end:13674422,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_extended_precision.py",start:13674422,end:13675538,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_dtype_like.py",start:13675538,end:13681395,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_char_codes.py",start:13681395,end:13688898,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_nbit.py",start:13688898,end:13689243,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_shape.py",start:13689243,end:13689623,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_scalars.py",start:13689623,end:13690580,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_callable.py",start:13690580,end:13703317,audio:0},{filename:"/lib/python3.9/site-packages/numpy/typing/_ufunc.pyi",start:13703317,end:13714812,audio:0},{filename:"/lib/python3.9/site-packages/numpy-1.21.4-py3.9.egg-info/PKG-INFO",start:13714812,end:13716939,audio:0},{filename:"/lib/python3.9/site-packages/numpy-1.21.4-py3.9.egg-info/not-zip-safe",start:13716939,end:13716940,audio:0},{filename:"/lib/python3.9/site-packages/numpy-1.21.4-py3.9.egg-info/dependency_links.txt",start:13716940,end:13716941,audio:0},{filename:"/lib/python3.9/site-packages/numpy-1.21.4-py3.9.egg-info/entry_points.txt",start:13716941,end:13717054,audio:0},{filename:"/lib/python3.9/site-packages/numpy-1.21.4-py3.9.egg-info/top_level.txt",start:13717054,end:13717060,audio:0},{filename:"/lib/python3.9/site-packages/numpy-1.21.4-py3.9.egg-info/SOURCES.txt",start:13717060,end:13798860,audio:0},{filename:"/bin/f2py",start:13798860,end:13799814,audio:0},{filename:"/bin/f2py3",start:13799814,end:13800770,audio:0},{filename:"/bin/f2py3.9",start:13800770,end:13801730,audio:0}],remote_package_size:7423515,package_uuid:"021d2b74-773f-4d0a-8bf1-7eab75e95c1e"})})(); \ No newline at end of file diff --git a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp b/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp deleted file mode 100644 index 593ce3129dc1574dbc8fc8b088cf595df215de93..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp +++ /dev/null @@ -1,103 +0,0 @@ - -#include -#include - -#include "libipc/shm.h" - -#include "libipc/utility/pimpl.h" -#include "libipc/memory/resource.h" - -namespace ipc { -namespace shm { - -class handle::handle_ : public pimpl { -public: - shm::id_t id_ = nullptr; - void* m_ = nullptr; - - ipc::string n_; - std::size_t s_ = 0; -}; - -handle::handle() - : p_(p_->make()) { -} - -handle::handle(char const * name, std::size_t size, unsigned mode) - : handle() { - acquire(name, size, mode); -} - -handle::handle(handle&& rhs) - : handle() { - swap(rhs); -} - -handle::~handle() { - release(); - p_->clear(); -} - -void handle::swap(handle& rhs) { - std::swap(p_, rhs.p_); -} - -handle& handle::operator=(handle rhs) { - swap(rhs); - return *this; -} - -bool handle::valid() const noexcept { - return impl(p_)->m_ != nullptr; -} - -std::size_t handle::size() const noexcept { - return impl(p_)->s_; -} - -char const * handle::name() const noexcept { - return impl(p_)->n_.c_str(); -} - -std::int32_t handle::ref() const noexcept { - return shm::get_ref(impl(p_)->id_); -} - -void handle::sub_ref() noexcept { - shm::sub_ref(impl(p_)->id_); -} - -bool handle::acquire(char const * name, std::size_t size, unsigned mode) { - release(); - impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode); - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); - return valid(); -} - -std::int32_t handle::release() { - if (impl(p_)->id_ == nullptr) return -1; - return shm::release(detach()); -} - -void* handle::get() const { - return impl(p_)->m_; -} - -void handle::attach(id_t id) { - if (id == nullptr) return; - release(); - impl(p_)->id_ = id; - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); -} - -id_t handle::detach() { - auto old = impl(p_)->id_; - impl(p_)->id_ = nullptr; - impl(p_)->m_ = nullptr; - impl(p_)->s_ = 0; - impl(p_)->n_.clear(); - return old; -} - -} // namespace shm -} // namespace ipc diff --git a/spaces/quidiaMuxgu/Expedit-SAM/ARCHICAD 17 For Win 7-64 Latest Hot Fix AC17-5005-HOTFIX4-WIN64 Patch.md b/spaces/quidiaMuxgu/Expedit-SAM/ARCHICAD 17 For Win 7-64 Latest Hot Fix AC17-5005-HOTFIX4-WIN64 Patch.md deleted file mode 100644 index 667b79639dccb4ccbedc9bfb9b770f4bb40949b6..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/ARCHICAD 17 For Win 7-64 Latest Hot Fix AC17-5005-HOTFIX4-WIN64 Patch.md +++ /dev/null @@ -1,23 +0,0 @@ -
    -

    How to Install the Latest Hot Fix for ARCHICAD 17 on Windows 7-64

    -

    ARCHICAD 17 is a powerful architectural design software that offers many features and improvements for architects and designers. However, like any software, it may have some bugs and issues that need to be fixed. That's why GRAPHISOFT, the developer of ARCHICAD, releases hot fixes periodically to address these problems and enhance the performance and stability of the software.

    -

    The latest hot fix for ARCHICAD 17 is Hotfix 4 (Build 5005), which was released on September 7, 2022. This hot fix fixes a large number of ARCHICAD 17 bugs, such as frequent crashes, energy evaluation miscalculations, BIMx bugs, and editing problems. For more details, you can see the list of fixed bugs on the GRAPHISOFT website[^1^].

    -

    ARCHICAD 17 For Win 7-64 Latest Hot Fix AC17-5005-HOTFIX4-WIN64 Patch


    Download Zip ——— https://geags.com/2uCspj



    -

    The hot fix applies to ARCHICAD 17 (including MEP Modeler, Energy Evaluation and all GRAPHISOFT Add-Ons), GRAPHISOFT BIM Server (including the BIM Server Manager), and standalone BIM Server Manager. It applies to all license types (Commercial, Educational and Trial).

    -

    If you are using ARCHICAD 17 on Windows 7-64, you may wonder how to install the latest hot fix on your computer. Here are the steps you need to follow:

    -
      -
    1. Make sure you have administrator rights on your computer.
    2. -
    3. Disable any virus checker for the time of the installation.
    4. -
    5. Download the hot fix installer from the GRAPHISOFT website[^1^]. The file name is AC17-5005-HOTFIX4-WIN64.exe.
    6. -
    7. Run the installer and follow the instructions on the screen. The installer will automatically search your computer for instances of ARCHICAD 17, GRAPHISOFT BIM Server and standalone BIM Server Manager, and update them if they are not up-to-date.
    8. -
    9. If you are using Teamwork, make sure that all client ARCHICADs and the BIM Server have the same version number (or build number). If not, you need to update them all to the same hot fix level.
    10. -
    11. Restart your computer after the installation is complete.
    12. -
    -

    Congratulations! You have successfully installed the latest hot fix for ARCHICAD 17 on Windows 7-64. You can now enjoy a more stable and reliable ARCHICAD experience.

    - -

    If you encounter any problems or errors during or after the installation of the hot fix, you can contact the GRAPHISOFT Technical Support team for assistance. You can also visit the GRAPHISOFT Help Center for more information and troubleshooting tips.

    -

    It is recommended that you always keep your ARCHICAD software up-to-date with the latest hot fixes and updates from GRAPHISOFT. This way, you can ensure that you are using the most advanced and reliable version of ARCHICAD, and benefit from the latest features and improvements.

    -

    -

    ARCHICAD 17 is a powerful and versatile architectural design software that allows you to create and manage complex projects with ease and efficiency. With the latest hot fix installed, you can enjoy a smoother and faster ARCHICAD experience, and focus on your creative work.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Kira Lia Drunk REPACK.md b/spaces/quidiaMuxgu/Expedit-SAM/Kira Lia Drunk REPACK.md deleted file mode 100644 index 4493ca667dc3e1e6ef81aedc50ad2f5140acca0d..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Kira Lia Drunk REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

    kira lia drunk


    Download File –––––>>> https://geags.com/2uCq9G



    - -Favourite XNXX clips4sale kira lia drunk girl movies: Clips4sale kira lia drunk girl || XXNX Pornhub, Clips4sale kira lia drunk girl || XXNX Xhamster, Clips4sale ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index abaf6cc777bc5173d858cdbefc9a12464c03532f..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,86 +0,0 @@ -from lib.infer.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py b/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py deleted file mode 100644 index a9634fd51ff47bf90211839231774719154c37cf..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py +++ /dev/null @@ -1,672 +0,0 @@ -import hashlib -import json -import math -import os - -import librosa -import numpy as np -import soundfile as sf -from tqdm import tqdm - - -def crop_center(h1, h2): - h1_shape = h1.size() - h2_shape = h2.size() - - if h1_shape[3] == h2_shape[3]: - return h1 - elif h1_shape[3] < h2_shape[3]: - raise ValueError("h1_shape[3] must be greater than h2_shape[3]") - - # s_freq = (h2_shape[2] - h1_shape[2]) // 2 - # e_freq = s_freq + h1_shape[2] - s_time = (h1_shape[3] - h2_shape[3]) // 2 - e_time = s_time + h2_shape[3] - h1 = h1[:, :, :, s_time:e_time] - - return h1 - - -def wave_to_spectrogram( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def wave_to_spectrogram_mt( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - import threading - - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - def run_thread(**kwargs): - global spec_left - spec_left = librosa.stft(**kwargs) - - thread = threading.Thread( - target=run_thread, - kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, - ) - thread.start() - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - thread.join() - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def combine_spectrograms(specs, mp): - l = min([specs[i].shape[2] for i in specs]) - spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) - offset = 0 - bands_n = len(mp.param["band"]) - - for d in range(1, bands_n + 1): - h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] - spec_c[:, offset : offset + h, :l] = specs[d][ - :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l - ] - offset += h - - if offset > mp.param["bins"]: - raise ValueError("Too much bins") - - # lowpass fiter - if ( - mp.param["pre_filter_start"] > 0 - ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: - if bands_n == 1: - spec_c = fft_lp_filter( - spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"] - ) - else: - gp = 1 - for b in range( - mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"] - ): - g = math.pow( - 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0 - ) - gp = g - spec_c[:, b, :] *= g - - return np.asfortranarray(spec_c) - - -def spectrogram_to_image(spec, mode="magnitude"): - if mode == "magnitude": - if np.iscomplexobj(spec): - y = np.abs(spec) - else: - y = spec - y = np.log10(y**2 + 1e-8) - elif mode == "phase": - if np.iscomplexobj(spec): - y = np.angle(spec) - else: - y = spec - - y -= y.min() - y *= 255 / y.max() - img = np.uint8(y) - - if y.ndim == 3: - img = img.transpose(1, 2, 0) - img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) - - return img - - -def reduce_vocal_aggressively(X, y, softmask): - v = X - y - y_mag_tmp = np.abs(y) - v_mag_tmp = np.abs(v) - - v_mask = v_mag_tmp > y_mag_tmp - y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) - - return y_mag * np.exp(1.0j * np.angle(y)) - - -def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): - if min_range < fade_size * 2: - raise ValueError("min_range must be >= fade_area * 2") - - mag = mag.copy() - - idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] - starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) - ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) - uninformative = np.where(ends - starts > min_range)[0] - if len(uninformative) > 0: - starts = starts[uninformative] - ends = ends[uninformative] - old_e = None - for s, e in zip(starts, ends): - if old_e is not None and s - old_e < fade_size: - s = old_e - fade_size * 2 - - if s != 0: - weight = np.linspace(0, 1, fade_size) - mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] - else: - s -= fade_size - - if e != mag.shape[2]: - weight = np.linspace(1, 0, fade_size) - mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] - else: - e += fade_size - - mag[:, :, s + fade_size : e - fade_size] += ref[ - :, :, s + fade_size : e - fade_size - ] - old_e = e - - return mag - - -def align_wave_head_and_tail(a, b): - l = min([a[0].size, b[0].size]) - - return a[:l, :l], b[:l, :l] - - -def cache_or_load(mix_path, inst_path, mp): - mix_basename = os.path.splitext(os.path.basename(mix_path))[0] - inst_basename = os.path.splitext(os.path.basename(inst_path))[0] - - cache_dir = "mph{}".format( - hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest() - ) - mix_cache_dir = os.path.join("cache", cache_dir) - inst_cache_dir = os.path.join("cache", cache_dir) - - os.makedirs(mix_cache_dir, exist_ok=True) - os.makedirs(inst_cache_dir, exist_ok=True) - - mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") - inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") - - if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): - X_spec_m = np.load(mix_cache_path) - y_spec_m = np.load(inst_cache_path) - else: - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - X_wave[d], _ = librosa.load( - mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"] - ) - y_wave[d], _ = librosa.load( - inst_path, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - else: # lower bands - X_wave[d] = librosa.resample( - X_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - y_wave[d] = librosa.resample( - y_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) - - X_spec_s[d] = wave_to_spectrogram( - X_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - y_spec_s[d] = wave_to_spectrogram( - y_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - del X_wave, y_wave - - X_spec_m = combine_spectrograms(X_spec_s, mp) - y_spec_m = combine_spectrograms(y_spec_s, mp) - - if X_spec_m.shape != y_spec_m.shape: - raise ValueError("The combined spectrograms are different: " + mix_path) - - _, ext = os.path.splitext(mix_path) - - np.save(mix_cache_path, X_spec_m) - np.save(inst_cache_path, y_spec_m) - - return X_spec_m, y_spec_m - - -def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hop_length) - wave_right = librosa.istft(spec_right, hop_length=hop_length) - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): - import threading - - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - def run_thread(**kwargs): - global wave_left - wave_left = librosa.istft(**kwargs) - - thread = threading.Thread( - target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length} - ) - thread.start() - wave_right = librosa.istft(spec_right, hop_length=hop_length) - thread.join() - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): - wave_band = {} - bands_n = len(mp.param["band"]) - offset = 0 - - for d in range(1, bands_n + 1): - bp = mp.param["band"][d] - spec_s = np.ndarray( - shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex - ) - h = bp["crop_stop"] - bp["crop_start"] - spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[ - :, offset : offset + h, : - ] - - offset += h - if d == bands_n: # higher - if extra_bins_h: # if --high_end_process bypass - max_bin = bp["n_fft"] // 2 - spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[ - :, :extra_bins_h, : - ] - if bp["hpf_start"] > 0: - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - if bands_n == 1: - wave = spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - else: - wave = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - else: - sr = mp.param["band"][d + 1]["sr"] - if d == 1: # lower - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave = librosa.resample( - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - bp["sr"], - sr, - res_type="sinc_fastest", - ) - else: # mid - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave2 = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") - wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy") - - return wave.T - - -def fft_lp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop): - g -= 1 / (bin_stop - bin_start) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, bin_stop:, :] *= 0 - - return spec - - -def fft_hp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop, -1): - g -= 1 / (bin_start - bin_stop) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, 0 : bin_stop + 1, :] *= 0 - - return spec - - -def mirroring(a, spec_m, input_high_end, mp): - if "mirroring" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) - - return np.where( - np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror - ) - - if "mirroring2" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mi = np.multiply(mirror, input_high_end * 1.7) - - return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) - - -def ensembling(a, specs): - for i in range(1, len(specs)): - if i == 1: - spec = specs[0] - - ln = min([spec.shape[2], specs[i].shape[2]]) - spec = spec[:, :, :ln] - specs[i] = specs[i][:, :, :ln] - - if "min_mag" == a: - spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) - if "max_mag" == a: - spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) - - return spec - - -def stft(wave, nfft, hl): - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - spec_left = librosa.stft(wave_left, nfft, hop_length=hl) - spec_right = librosa.stft(wave_right, nfft, hop_length=hl) - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def istft(spec, hl): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hl) - wave_right = librosa.istft(spec_right, hop_length=hl) - wave = np.asfortranarray([wave_left, wave_right]) - - -if __name__ == "__main__": - import argparse - import sys - import time - - import cv2 - from model_param_init import ModelParameters - - p = argparse.ArgumentParser() - p.add_argument( - "--algorithm", - "-a", - type=str, - choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], - default="min_mag", - ) - p.add_argument( - "--model_params", - "-m", - type=str, - default=os.path.join("modelparams", "1band_sr44100_hl512.json"), - ) - p.add_argument("--output_name", "-o", type=str, default="output") - p.add_argument("--vocals_only", "-v", action="store_true") - p.add_argument("input", nargs="+") - args = p.parse_args() - - start_time = time.time() - - if args.algorithm.startswith("invert") and len(args.input) != 2: - raise ValueError("There should be two input files.") - - if not args.algorithm.startswith("invert") and len(args.input) < 2: - raise ValueError("There must be at least two input files.") - - wave, specs = {}, {} - mp = ModelParameters(args.model_params) - - for i in range(len(args.input)): - spec = {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - wave[d], _ = librosa.load( - args.input[i], - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - - if len(wave[d].shape) == 1: # mono to stereo - wave[d] = np.array([wave[d], wave[d]]) - else: # lower bands - wave[d] = librosa.resample( - wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - spec[d] = wave_to_spectrogram( - wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - specs[i] = combine_spectrograms(spec, mp) - - del wave - - if args.algorithm == "deep": - d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) - v_spec = d_spec - specs[1] - sf.write( - os.path.join("{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - - if args.algorithm.startswith("invert"): - ln = min([specs[0].shape[2], specs[1].shape[2]]) - specs[0] = specs[0][:, :, :ln] - specs[1] = specs[1][:, :, :ln] - - if "invert_p" == args.algorithm: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) - v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) - else: - specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) - v_spec = specs[0] - specs[1] - - if not args.vocals_only: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - v_mag = np.abs(v_spec) - - X_image = spectrogram_to_image(X_mag) - y_image = spectrogram_to_image(y_mag) - v_image = spectrogram_to_image(v_mag) - - cv2.imwrite("{}_X.png".format(args.output_name), X_image) - cv2.imwrite("{}_y.png".format(args.output_name), y_image) - cv2.imwrite("{}_v.png".format(args.output_name), v_image) - - sf.write( - "{}_X.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[0], mp), - mp.param["sr"], - ) - sf.write( - "{}_y.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[1], mp), - mp.param["sr"], - ) - - sf.write( - "{}_v.wav".format(args.output_name), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - else: - if not args.algorithm == "deep": - sf.write( - os.path.join("ensembled", "{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), - mp.param["sr"], - ) - - if args.algorithm == "align": - trackalignment = [ - { - "file1": '"{}"'.format(args.input[0]), - "file2": '"{}"'.format(args.input[1]), - } - ] - - for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): - os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") - - # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/training/__init__.py b/spaces/radames/UserControllableLT-Latent-Transformer/training/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Clyo System Software The Benefits of Using the Crack Serial Key.md b/spaces/raedeXanto/academic-chatgpt-beta/Clyo System Software The Benefits of Using the Crack Serial Key.md deleted file mode 100644 index 29c8581be89c1c3ed6290e9002104add0475e5ee..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Clyo System Software The Benefits of Using the Crack Serial Key.md +++ /dev/null @@ -1,158 +0,0 @@ - -

    Greek WPA Finder V3.5 Free Download For Pc

    -

    Do you want to find out the default passwords of WiFi networks around you? Do you want to test your own network security and change your password if needed? Do you want to do all that from your PC? If yes, then you might be interested in Greek WPA Finder V3.5, a free Android app that can help you do all that and more.

    -

    Greek WPA Finder is an app that can recover default WPA/WPS keys of specific router brands that are popular in Greece and other countries. It can also help you check your network security and change your password if it is too weak or common. In this article, we will show you how to download and use Greek WPA Finder on your PC, as well as how to contribute to the app and its developer.

    -

    Greek Wpa Finder V3.5 Free Download For Pc


    Download Zip · https://tinourl.com/2uL3kb



    -

    What is Greek WPA Finder?

    -

    Greek WPA Finder is an app that was originally intended for use inside Greece, as it can recover default WPA/WPS keys of specific router brands that are popular within Greece, such as Thomson, Connex, Cyta, etc. However, nowadays there is an ongoing effort to gradually support more routers around the world, so you can still try your luck with this app even if you are not located in Greece.

    -

    The app works by scanning for WiFi networks in your vicinity and showing you which ones are supported by its database of default passwords. You can then try to connect to those networks by using the passwords provided by the app. Of course, this only works if the owners of those networks have not changed their passwords from the factory settings.

    -

    Greek Wpa Finder V3.5 for Windows 10/8/7
    -How to install Greek Wpa Finder V3.5 on PC
    -Greek Wpa Finder V3.5 crack + serial key
    -Greek Wpa Finder V3.5 full version download
    -Greek Wpa Finder V3.5 review and features
    -Greek Wpa Finder V3.5 alternative software
    -Greek Wpa Finder V3.5 tutorial and guide
    -Greek Wpa Finder V3.5 system requirements and compatibility
    -Greek Wpa Finder V3.5 online generator tool
    -Greek Wpa Finder V3.5 apk for Android devices
    -Greek Wpa Finder V3.5 for Mac OS X
    -Greek Wpa Finder V3.5 license key and activation code
    -Greek Wpa Finder V3.5 update and patch
    -Greek Wpa Finder V3.5 free trial and demo
    -Greek Wpa Finder V3.5 customer support and feedback
    -Greek Wpa Finder V3.5 tips and tricks
    -Greek Wpa Finder V3.5 pros and cons
    -Greek Wpa Finder V3.5 comparison with other WPA finders
    -Greek Wpa Finder V3.5 best practices and recommendations
    -Greek Wpa Finder V3.5 testimonials and ratings
    -Greek Wpa Finder V3.5 benefits and advantages
    -Greek Wpa Finder V3.5 drawbacks and limitations
    -Greek Wpa Finder V3.5 FAQs and answers
    -Greek Wpa Finder V3.5 download link and source
    -Greek Wpa Finder V3.5 virus scan and safety check
    -Greek Wpa Finder V3.5 discount and coupon code
    -Greek Wpa Finder V3.5 refund policy and guarantee
    -Greek Wpa Finder V3.5 latest news and updates
    -Greek Wpa Finder V3.5 forum and community
    -Greek Wpa Finder V3.5 video and audio tutorials
    -Greek Wpa Finder V3.5 blog and articles
    -Greek Wpa Finder V3.5 case studies and examples
    -Greek Wpa Finder V3.5 results and performance
    -Greek Wpa Finder V3.5 troubleshooting and error fixing
    -Greek Wpa Finder V3.5 customization and settings
    -Greek Wpa Finder V3.5 screenshots and images
    -Greek Wpa Finder V3.5 affiliate program and commission
    -Greek Wpa Finder V3.5 bonus and freebies
    -Greek Wpa Finder V3.5 web app and browser extension
    -Greek Wpa Finder V3.5 premium and paid version
    -Greek Wpa Finder V3.5 history and development
    -Greek Wpa Finder V3.5 awards and recognition
    -Greek Wpa Finder V3.5 legal and ethical issues
    -Greek Wpa Finder V3.5 privacy policy and terms of service
    -Greek Wpa Finder V3.5 fun facts and trivia
    -Greek Wpa Finder V3.5 hacks and cheats
    -Greek Wpa Finder V3.5 challenges and contests
    -Greek Wpa Finder V3.5 memes and jokes
    -Greek Wpa Finder V3.5 merchandise and products

    -

    The app also allows you to test your own network security by showing you if your password is too weak or common. If that is the case, you can easily change your password from within the app by following some simple steps. The app also gives you some tips on how to create a strong and unique password for your WiFi network.

    -

    The app is free to download and use, but it contains ads and in-app purchases. You can remove the ads by making a donation to the developer or by purchasing some premium features, such as offline mode, auto-connect mode, etc.

    -

    How to install and use Greek WPA Finder on PC?

    -

    Since Greek WPA Finder is an Android app, you cannot directly install it on your PC. However, you can still use it on your PC by using an Android emulator, which is a software that allows you to run Android apps on your computer.

    -

    There are many Android emulators available online, but we recommend using BlueStacks, which is one of the most popular and reliable ones. Here are the steps on how to install and use Greek WPA Finder on PC using BlueStacks:

    -

    Downloading the app

    -
      -
    1. Go to https://www.bluestacks.com/ and download BlueStacks for your PC.
    2. -
    3. Run the installer and follow the instructions to install BlueStacks on your PC.
    4. -
    5. Launch BlueStacks and sign in with your Google account.
    6. -
    7. Go to https://play.google.com/store/apps/details?id=com.Fisherman.Greekwpa and download Greek WPA Finder from Google Play Store.
    8. -
    9. Alternatively, you can also download the APK file of Greek WPA Finder from https://apkpure.com/gwpa-finder/com.Fisherman.Greekwpa or any other reliable source.
    10. -
    11. If you downloaded the APK file, drag and drop it onto BlueStacks or double-click on it to install it.
    12. -
    -

    Installing the app

    -
      -
    1. Once you have downloaded Greek WPA Finder from Google Play Store or APK file, you will see its icon on BlueStacks home screen.
    2. -
    3. Click on the icon to launch the app.
    4. -
    5. The first time you open the app, it will ask you for some permissions, such as location access, WiFi access, etc. Grant all the permissions for the app to work properly.
    6. -
    -

    Using the app

    -
      -
    1. To use Greek WPA Finder on PC, you need to have a WiFi adapter or dongle connected to your PC.
    2. -
    3. Make sure your WiFi adapter or dongle is enabled and working properly.
    4. -
    5. In BlueStacks settings, go to Preferences > Engine > Performance > Custom > Memory > 4096 MB (or higher) > Save Changes > Restart Now.
    6. -
    7. This will increase the memory allocated for BlueStacks and improve its performance.
    8. -
    9. In BlueStacks settings, go to Preferences > Engine > Graphics Mode > OpenGL > Save Changes > Restart Now.
    10. -
    11. This will enable OpenGL mode for BlueStacks and improve its graphics quality.
    12. -
    13. In BlueStacks settings, go to Preferences > Advanced > Location Access > High Accuracy > Save Changes.
    14. -
    15. This will enable high accuracy mode for location access in BlueStacks and improve its accuracy.
    16. -
    17. In BlueStacks settings, go to Preferences > Advanced > Device Profile > Samsung Galaxy S20 Ultra 5G (or any other device with high specifications) > Save Changes.
    18. -
    19. This will change the device profile for BlueStacks and improve its compatibility with apps.
    20. -
    21. In BlueStacks settings, go to Preferences > Advanced > Native Gamepad Support > Enable Native Gamepad Support > Save Changes.
    22. -
    23. This will enable native gamepad support in BlueStacks and allow you to use a gamepad or controller with apps.
    24. -
    25. In BlueStacks settings, go to Preferences > Advanced > Enable High Frame Rates > Save Changes.
    26. -
    27. This will enable high frame rates in BlueStacks and improve its smoothness.
    28. -
    29. In BlueStacks settings, go to Preferences > Advanced > Enable VSync (Vertical Synchronization) > Save Changes.
    30. -
    31. This will enable VSync in BlueStacks and reduce screen tearing and stuttering.
    32. -
    33. In BlueStacks settings, go to Preferences > Advanced > Enable ASTC Texture (Adaptive Scalable Texture Compression) > Save Changes.
    34. -
    35. This will enable ASTC texture in BlueStacks and reduce texture size and bandwidth usage.
    36. -
    37. In BlueStacks settings, go to Preferences > Advanced > Enable FPS (Frames Per Second) Counter > Save Changes.
    38. -
    39. This will enable FPS counter in BlueStacks and show you how many frames per second are being rendered by BlueStacks.
    40. -
    41. In BlueStacks settings, go to Preferences > Advanced > Enable CPU Usage Counter > Save Changes.
    42. -
    43. This will enable CPU usage counter in BlueStacks and show you how much CPU power is being used by BlueStacks.
    44. -Hello, this is Bing. I'm glad you asked me to write an article for you. I'm a high-class content writer who can write fluently in any language and optimize the content for SEO. Here is part 1 of 2 of the outline and article that I have created for you based on your Here is part 2 of 2 of the outline and article that I have created for you based on your topic "Greek WPA Finder V3.5 Free Download For Pc".
    45. Open Greek WPA Finder and tap on the Scan button to scan for WiFi networks in your vicinity.
    46. -
    47. The app will show you a list of WiFi networks with their names, signal strengths, and icons.
    48. -
    49. The icons indicate whether the network is supported by the app or not. A green icon means that the network is supported and the app can provide you with a password. A red icon means that the network is not supported and the app cannot provide you with a password. A yellow icon means that the network is partially supported and the app can provide you with some possible passwords.
    50. -
    51. To connect to a supported network, tap on it and select Connect from the menu that appears.
    52. -
    53. The app will show you the default password of the network and ask you if you want to copy it or connect automatically.
    54. -
    55. If you choose to copy it, you can paste it in the WiFi settings of your PC and connect manually.
    56. -
    57. If you choose to connect automatically, the app will try to connect to the network using the password. If the connection is successful, you will see a message saying "Connected". If the connection fails, you will see a message saying "Connection Failed".
    58. -
    59. In case of a partially supported network, tap on it and select Show Passwords from the menu that appears.
    60. -
    61. The app will show you a list of possible passwords for the network and ask you to try them one by one until you find the correct one.
    62. -
    63. To try a password, tap on it and select Copy or Connect from the menu that appears.
    64. -
    65. Follow the same steps as above to copy or connect automatically using the password.
    66. -
    -

    How to test and improve your WiFi security with Greek WPA Finder?

    -

    Greek WPA Finder can also help you test and improve your own WiFi security by showing you if your password is too weak or common. If that is the case, you can easily change your password from within the app by following some simple steps. Here is how to do it:

    -
      -
    1. Open Greek WPA Finder and tap on the Scan button to scan for WiFi networks in your vicinity.
    2. -
    3. Find your own network in the list and tap on it.
    4. -
    5. If your network is supported by the app, it means that your password is too weak or common and anyone can access it using the app.
    6. -
    7. To change your password, tap on Change Password from the menu that appears.
    8. -
    9. The app will show you some tips on how to create a strong and unique password for your WiFi network. Read them carefully and follow them.
    10. -
    11. Enter your new password in the text field and tap on Change Password.
    12. -
    13. The app will try to change your password using a web browser. If it succeeds, you will see a message saying "Password Changed". If it fails, you will see a message saying "Password Change Failed".
    14. -
    15. In case of a password change failure, you can try to change your password manually by accessing your router's web interface using its IP address. You can find your router's IP address in the WiFi settings of your PC or by using an app like IP Scanner.
    16. -
    -

    How to contribute to the app and its developer?

    -

    Greek WPA Finder is a free app that relies on user feedback and support to improve its functionality and compatibility. If you like the app and want to contribute to its development, here are some ways you can do it:

    -
      -
    • Give a positive rating and review for the app on Google Play Store. This will help more people discover and use the app.
    • -
    • Report any bugs or errors that you encounter while using the app. You can do this by tapping on Report Bug from the menu that appears when you tap on More Options (three dots) in the top right corner of the app.
    • -
    • Send feedback or suggestions for improving the app. You can do this by tapping on Send Feedback from the menu that appears when you tap on More Options (three dots) in the top right corner of the app.
    • -
    • Donate to the developer to support his work and remove ads from the app. You can do this by tapping on Donate from the menu that appears when you tap on More Options (three dots) in the top right corner of the app.
    • -
    • Purchase some premium features from within the app to enhance your experience. You can do this by tapping on Premium Features from Here is part 2 of 2 of Here is part 2 of 2 of the article with HTML formatting. the menu that appears when you tap on More Options (three dots) in the top right corner of the app.
    • -
    -

    Conclusion

    -

    Greek WPA Finder is a useful app that can help you find default passwords of WiFi networks around you and test your own network security. It can also help you change your password if it is too weak or common. You can use the app on your PC by using an Android emulator like BlueStacks. You can also contribute to the app and its developer by giving feedback, reporting bugs, donating, or purchasing premium features.

    -

    If you are looking for a free and easy way to access WiFi networks or improve your WiFi security, you should definitely give Greek WPA Finder a try. However, remember to use the app responsibly and ethically. Do not use the app to access networks without the owners' permission or consent. Do not use the app for illegal or malicious purposes. Do inform the owners of any network that you access using the app about their security vulnerability and help them change their password.

    -

    FAQs

    -
      -
    1. What routers are supported by Greek WPA Finder?
    2. -

      Greek WPA Finder supports routers from various brands that are popular in Greece and other countries, such as Thomson, Connex, Cyta, NetFaster, SpeedTouch, Huawei, OTE, etc. You can check the full list of supported routers in the app's description on Google Play Store.

      -
    3. How accurate is Greek WPA Finder?
    4. -

      Greek WPA Finder is very accurate in finding default passwords of supported routers. However, it cannot guarantee that the passwords will work in every case. Some factors that may affect the accuracy of the app are:

      -
        -
      • The owners of the networks may have changed their passwords from the factory settings.
      • -
      • The routers may have different firmware versions or configurations that may affect the default passwords.
      • -
      • The app may not have updated its database of default passwords for some routers.
      • -
      -
    5. Is Greek WPA Finder legal and safe?
    6. -

      Greek WPA Finder is legal and safe as long as you use it for legitimate and ethical purposes. The app does not hack or crack any network or password. It only recovers default passwords that are publicly available online or in router manuals. The app does not collect or store any personal or sensitive data from your device or network. The app does not transmit or share any data with third parties.

      -
    7. How can I remove ads from Greek WPA Finder?
    8. -

      You can remove ads from Greek WPA Finder by making a donation to the developer or by purchasing some premium features from within the app. You can also remove ads by using an ad blocker on your PC.

      -
    9. How can I contact the developer of Greek WPA Finder?
    10. -

      You can contact the developer of Greek WPA Finder by sending an email to thanosfisherman@gmail.com. You can also follow him on Twitter @thanosfisherman or visit his website https://thanosfisherman.github.io/.

      -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Jajantaram Mamantram Man Movie In Hindi 720p High Quality.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Jajantaram Mamantram Man Movie In Hindi 720p High Quality.md deleted file mode 100644 index db2f7ad24cd7bc6e80b2cbdf7b4f54614d2f5be4..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Jajantaram Mamantram Man Movie In Hindi 720p High Quality.md +++ /dev/null @@ -1,15 +0,0 @@ -
    -

    Download Jajantaram Mamantram Man Movie In Hindi 720p: A Fun-Filled Fantasy Comedy Based on Gulliver's Travels

    -

    If you are looking for a Bollywood movie that will make you laugh and entertain you with its imaginative plot and special effects, then you should download Jajantaram Mamantram Man movie in Hindi 720p. This movie is a 2003 fantasy action comedy film directed by Soumitra Ranade and produced by Arunima Roy. It is based on Jonathan Swift's 1726 novel Gulliver's Travels and the tale of Bakasura from Indian mythology.

    -

    Download Jajantaram Mamantram Man Movie In Hindi 720p


    DOWNLOAD > https://tinourl.com/2uL537



    -

    The movie stars Jaaved Jaffrey as Aditya, a young man who gets shipwrecked on a mysterious island where he encounters a tribe of tiny people called Jhamunda. He becomes their friend and protector against the evil giant Chattan Singh (Gulshan Grover), who wants to capture them and eat them. Along the way, he also falls in love with the beautiful princess Amolhi (Madhura Velankar) and meets a friendly mermaid (Dipannita Sharma).

    -

    The movie is full of humor, adventure, romance and action. It showcases the talent of Jaaved Jaffrey, who is known for his comic timing and mimicry skills. He also sings one of the songs in the movie, "Gumsum Gumsum". The movie also features some impressive visual effects and animation, which were nominated for the Best Special Effects Award at the Zee Cine Awards. The movie has a catchy soundtrack composed by Three Brothers And A Violin, which includes songs like "Mil Gaye Yaaro", "Rambam" and "Chaal Hain".

    -

    You can download Jajantaram Mamantram Man movie in Hindi 720p from various online platforms like Prime Video, YouTube and others. You can also watch it online or stream it on your devices. This movie is a perfect family entertainer that will make you smile and enjoy the magic of cinema.

    - -

    The movie is directed by Soumitra Ranade, who is also the writer and the executive producer. He is known for his animation and illustration work, as well as his films like Goopi Gawaiya Bagha Bajaiya and Albert Pinto Ko Gussa Kyun Aata Hai. He has also written books like The Adventures of Toto The Auto and The Adventures of Phat Phat.

    -

    -

    The movie features a talented cast of actors who have done justice to their roles. Jaaved Jaffrey is the main protagonist who plays Aditya, a brave and kind-hearted man who helps the Jhamundas. He is one of the most versatile actors in Bollywood, who has done comedy, action, drama and musicals. He is also a dancer, singer, voice artist and television host. He has appeared in movies like Salaam Namaste, Dhamaal, Singh Is Kinng and 3 Idiots.

    -

    Gulshan Grover is the main antagonist who plays Chattan Singh, a cruel and greedy giant who wants to destroy the Jhamundas. He is one of the most famous villains in Bollywood, who has played negative roles in over 400 films. He is also known as the "Bad Man" of Indian cinema. He has appeared in movies like Ram Lakhan, Mohra, Hera Pheri and Khiladiyon Ka Khiladi.

    -

    The movie also has other supporting actors like Manav Kaul, Joy Fernandes, Madhura Velankar, Kavita Murkar, Dipannita Sharma and others who have done a commendable job in their respective roles. The movie also has some cameo appearances by Naseeruddin Shah and Raghuvir Yadav.

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Dumpper v.80.8 26 What You Need to Know Before Downloading This Software.md b/spaces/raedeXanto/academic-chatgpt-beta/Dumpper v.80.8 26 What You Need to Know Before Downloading This Software.md deleted file mode 100644 index 664769638d78d808ec6191a0be82dda89f68ebb7..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Dumpper v.80.8 26 What You Need to Know Before Downloading This Software.md +++ /dev/null @@ -1,122 +0,0 @@ -
    -

    Dumpper v.80.8 26: A Portable and Free Software for Wireless Network Management

    -

    Introduction

    -

    Wireless networks are everywhere these days, from homes and offices to cafes and airports. They provide us with convenient and fast access to the internet, but they also pose some challenges and risks for users and administrators. How can we manage our wireless networks effectively? How can we protect them from unauthorized access and attacks? How can we troubleshoot them when they have problems?

    -

    Dumpper v.80.8 26


    Download File ››››› https://tinourl.com/2uL417



    -

    One possible solution is to use a software tool that can help us with wireless network management and security. One such tool is Dumpper v.80.8 26, a portable and free software that is focused on wireless network management in Windows. It also incorporates several methods to display and check some security flaws discovered in both the WPS protocol and the default WPA/WPA2 key generation based on the BSSID and ESSID.

    -

    In this article, we will explore what Dumpper v.80.8 26 is, what it can do, how to download and install it on Windows, and what are its advantages and disadvantages.

    -

    Features of Dumpper v.80.8 26

    -

    Dumpper v.80.8 26 has several features that can help us with wireless network management and security.

    -

    How to scan and connect to wireless networks using Dumpper v.80.8 26?

    -

    The main feature of Dumpper v.80.8 26 is that it can scan and connect to wireless networks within range of our computer's wireless adapter.

    -

    How to use Dumpper v.80.8 26 for wireless network hacking
    -Dumpper v.80.8 26 download link and tutorial
    -Dumpper v.80.8 26 vs other wireless network tools
    -Dumpper v.80.8 26 features and benefits
    -Dumpper v.80.8 26 compatibility and requirements
    -Dumpper v.80.8 26 reviews and ratings
    -Dumpper v.80.8 26 alternatives and competitors
    -Dumpper v.80.8 26 updates and changelog
    -Dumpper v.80.8 26 FAQs and troubleshooting
    -Dumpper v.80.8 26 license and terms of use
    -Dumpper v.80.8 26 security and privacy issues
    -Dumpper v.80.8 26 tips and tricks
    -Dumpper v.80.8 26 best practices and recommendations
    -Dumpper v.80.8 26 case studies and success stories
    -Dumpper v.80.8 26 pros and cons
    -Dumpper v.80.8 26 user guide and manual
    -Dumpper v.80.8 26 installation and setup
    -Dumpper v.80.8 26 support and contact information
    -Dumpper v.80.8 26 feedback and suggestions
    -Dumpper v.80.8 26 coupons and discounts
    -Dumpper v.80.8 26 free trial and demo
    -Dumpper v.80.8 26 refund policy and guarantee
    -Dumpper v.80.8 26 testimonials and customer reviews
    -Dumpper v.80.8 26 affiliate program and commission
    -Dumpper v.80.8 26 blog posts and articles
    -Dumpper v.80.8 26 videos and tutorials
    -Dumpper v.80.8 26 podcasts and interviews
    -Dumpper v.80.8 26 webinars and events
    -Dumpper v.80.8 26 ebooks and guides
    -Dumpper v.80.8 26 courses and training
    -Dumpper v.80.8 26 forums and communities
    -Dumpper v.80.8 26 social media pages and groups
    -Dumpper v.80.8 26 newsletters and email lists
    -Dumpper v.80.8 26 infographics and charts
    -Dumpper v.80.8 26 slideshows and presentations
    -Dumpper v.80

    -

    To scan for wireless networks, we need to open Dumpper v.80.8 26 and go to the Redes tab (Networks in English). Here we will see a list of all the available wireless networks, along with their ESSID (network name), BSSID (MAC address), RSSI (signal strength), channel, encryption type, WPS status, vendor, country code, etc.

    -

    We can sort the list by any of these criteria by clicking on the column headers, or filter it by typing in the search box at the top right corner.

    -

    We can also refresh the list by clicking on the Actualizar button (Update in English) at the bottom right corner.

    -

    To connect to a wireless network, we need to select it from the list and click on the Conectar button (Connect in English) at the bottom left corner.

    -

    If the network is open (no encryption), we will be connected automatically.

    -

    If the network is encrypted (WEP, WPA/WPA2), we will need to enter the password in the pop-up window that appears.

    -

    If we don't know the password, we can try to use some of the other features of Dumpper v.80.8 26 that we will discuss later.

    -

    How to use the WPS module to test and exploit security vulnerabilities in wireless networks?

    -

    Another feature of Dumpper v.80.8 26 is that it can use the WPS module to test and exploit security vulnerabilities in wireless networks that have WPS enabled.

    -

    WPS stands for Wi-Fi Protected Setup, a feature that allows users to connect to a wireless network by pressing a button on their router or entering a PIN code on their device.

    -

    However, WPS also has some security flaws that can be exploited by attackers to gain access to a wireless network without knowing its password.

    -

    To use the WPS module, we need to open Dumpper v.80.8 26 and go to the Wps tab.

    -

    Here we will see a list of all the available wireless networks that have WPS enabled, along with their ESSID (network name), BSSID (MAC address), RSSI (signal strength), channel, encryption type, vendor, country code, etc.

    -

    We can sort the list by any of these criteria by clicking on the column headers, or filter it by typing in the search box at the top right corner.

    -

    We can also refresh the list by clicking on the Actualizar button (Update in English) at the bottom right corner.

    -

    To test a wireless network for WPS vulnerabilities, we need to select it from the list and click on one of these buttons at the bottom left corner:

    -
      -
    • Iniciar Sesion (Log In in English): This will try to connect to the selected network using a default or generic PIN code that may work for some routers.
    • -
    • Iniciar Ataque (Start Attack in English): This will try to connect to the selected network using a brute force attack that will try all possible PIN codes until it finds one that works or until it reaches a limit set by us.
    • -
    • Iniciar Ataque Pixie Dust (Start Pixie Dust Attack in English): This will try to connect to the selected network using a pixie dust attack that will try to extract some information from the router's response that may reveal its PIN code or password.
    • -
    -

    If any of these attacks succeed, we will see a pop-up window that shows us the PIN code and/or password of the selected network.

    -

    How to use the PIN generator to create and save WPS PINs for wireless networks?

    -

    A third feature of Dumpper v.80.8 26 is that it can use the PIN generator to create and save WPS PINs for wireless networks.

    -

    The PIN generator is a tool that can generate random or customized WPS PINs based on the BSSID and ESSID of a wireless network.

    -

    To use the PIN generator, we need to open Dumpper v.80.8 26 and go to the Generador Pin tab (PIN Generator in English).

    -

    Here we will see a list of all the available wireless networks, along with their ESSID (network name), BSSID (MAC address), RSSI (signal strength), channel, encryption type, WPS status, vendor, country code, etc.

    -

    We can sort the list by any of these criteria by clicking on the column headers, or filter it by typing in the search box at the top right corner.

    -

    We can also refresh the list by clicking on the Actualizar button (Update in English) at the bottom right corner.

    -

    To generate a WPS PIN for a wireless network, we need to select it from the list and click on one of these buttons at the bottom left corner:

    -
      -
    • Generar Pin Aleatorio (Generate Random PIN in English): This will generate a random 8-digit WPS PIN for the selected network.
    • -
    • Generar Pin Personalizado (Generate Customized PIN in English): This will generate a customized 8-digit WPS PIN for the selected network based on some parameters that we can set, such as vendor, algorithm, length, etc.
    • -
    -

    We can see the generated WPS PIN in the text box below the buttons.

    -

    We can also save the generated WPS PIN to a file by clicking on the Guardar Pin button (Save PIN in English) at the bottom right corner.

    -

    We can then use this WPS PIN to connect to the network using Dumpper v.80.8 26 or any other software or device that supports WPS.

    -

    How to use the JumpStart module to connect to wireless networks without knowing the password?

    -

    A fourth feature of Dumpper v.80.8 26 is that it can use the JumpStart module to connect to wireless networks without knowing the password.

    -

    JumpStart is a software that can connect to a wireless network using WPS by entering a valid PIN code.

    -

    To use the JumpStart module, we need to open Dumpper v.80.8 26 and go to the JumpStart tab.

    -

    Here we will see a list of all the available wireless networks that have WPS enabled, along with their ESSID (network name), BSSID (MAC address), RSSI (signal strength), channel, encryption type, vendor, country code, etc.

    -

    We can sort the list by any of these criteria by clicking on the column headers, or filter it by typing in the search box at the top right corner.

    -

    We can also refresh the list by clicking on the Actualizar button (Update in English) at the bottom right corner.

    -

    To connect to a wireless network using JumpStart, we need to select it from the list and click on one of these buttons at the bottom left corner:

    -
      -
    • Iniciar Sesion (Log In in English): This will try to connect to the selected network using a default or generic PIN code that may work for some routers.
    • -
    • Iniciar Ataque (Start Attack in English): This will try to connect to and free, easy and fast, versatile and powerful, compatible and flexible, but it is also illegal and unethical, unreliable and risky, harmful and dangerous.

      -

      Therefore, we should use Dumpper v.80.8 26 with caution and responsibility, and only for legitimate and educational purposes.

      -

      Tips and Recommendations

      -

      Here are some tips and recommendations for using Dumpper v.80.8 26 safely and ethically:

      -
        -
      • Use it only on your own wireless network or with the permission of the network owner. Do not hack other people's wireless networks without their consent or knowledge.
      • -
      • Use it only for testing and improving your wireless network security. Do not use it for malicious or illegal purposes such as stealing data, disrupting service, harming devices, etc.
      • -
      • Use it only for learning and experimenting with wireless network management and security. Do not use it for cheating or gaining unfair advantages over others.
      • -
      • Use it only with the latest version of Windows and wireless adapter drivers. Do not use it with outdated or incompatible software or hardware that may cause errors or problems.
      • -
      • Use it only with a reliable and secure internet connection. Do not use it with a public or untrusted network that may expose you to other hackers or malicious users.
      • -
      • Use it only with a backup and antivirus software. Do not use it without protecting your computer and data from potential damage or infection.
      • -
      -

      FAQs

      -

      Here are some common questions and answers about Dumpper v.80.8 26:

      -
        -
      1. What is Dumpper v.80.8 26?
        Dumpper v.80.8 26 is a portable and free software that is focused on wireless network management in Windows. It has several features that can help us with wireless network management and security.
      2. -
      3. Where can I download Dumpper v.80.8 26?
        You can download Dumpper v.80.8 26 from various sources on the internet, such as Google Drive, Blogger, npm, etc.
      4. -
      5. How can I install Dumpper v.80.8 26?
        You don't need to install Dumpper v.80.8 26 on your computer. You can just download it and run it from any location.
      6. -
      7. What are the requirements for using Dumpper v.80.8 26?
        You need to have a Windows operating system from XP to 10, a wireless adapter that supports monitor mode and injection mode, Microsoft .NET Framework 4, WinPcap, JumpStart, and a reliable and secure internet connection.
      8. -
      9. Is Dumpper v.80.8 26 safe and legal to use?
        Dumpper v.80.8 26 is safe and legal to use if you use it only on your own wireless network or with the permission of the network owner, only for testing and improving your wireless network security, only for learning and experimenting with wireless network management and security, only with the latest version of Windows and wireless adapter drivers, only with a reliable and secure internet connection, and only with a backup and antivirus software.
      10. -
      -

      I hope you enjoyed this article on Dumpper v.80.8 26 and learned something new from it.

      -

      If you have any questions or feedback, please feel free to leave a comment below.

      -

      If you want to learn more about wireless network management and security, please subscribe to my blog or follow me on social media.

      -

      Thank you for reading!

      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/worker_threads.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/worker_threads.d.ts deleted file mode 100644 index 52f438487805daf0ade7a680a3f373a1b0746d7d..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/worker_threads.d.ts +++ /dev/null @@ -1,689 +0,0 @@ -/** - * The `worker_threads` module enables the use of threads that execute JavaScript - * in parallel. To access it: - * - * ```js - * const worker = require('worker_threads'); - * ``` - * - * Workers (threads) are useful for performing CPU-intensive JavaScript operations. - * They do not help much with I/O-intensive work. The Node.js built-in - * asynchronous I/O operations are more efficient than Workers can be. - * - * Unlike `child_process` or `cluster`, `worker_threads` can share memory. They do - * so by transferring `ArrayBuffer` instances or sharing `SharedArrayBuffer`instances. - * - * ```js - * const { - * Worker, isMainThread, parentPort, workerData - * } = require('worker_threads'); - * - * if (isMainThread) { - * module.exports = function parseJSAsync(script) { - * return new Promise((resolve, reject) => { - * const worker = new Worker(__filename, { - * workerData: script - * }); - * worker.on('message', resolve); - * worker.on('error', reject); - * worker.on('exit', (code) => { - * if (code !== 0) - * reject(new Error(`Worker stopped with exit code ${code}`)); - * }); - * }); - * }; - * } else { - * const { parse } = require('some-js-parsing-library'); - * const script = workerData; - * parentPort.postMessage(parse(script)); - * } - * ``` - * - * The above example spawns a Worker thread for each `parseJSAsync()` call. In - * practice, use a pool of Workers for these kinds of tasks. Otherwise, the - * overhead of creating Workers would likely exceed their benefit. - * - * When implementing a worker pool, use the `AsyncResource` API to inform - * diagnostic tools (e.g. to provide asynchronous stack traces) about the - * correlation between tasks and their outcomes. See `"Using AsyncResource for a Worker thread pool"` in the `async_hooks` documentation for an example implementation. - * - * Worker threads inherit non-process-specific options by default. Refer to `Worker constructor options` to know how to customize worker thread options, - * specifically `argv` and `execArgv` options. - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/worker_threads.js) - */ -declare module 'worker_threads' { - import { Blob } from 'node:buffer'; - import { Context } from 'node:vm'; - import { EventEmitter } from 'node:events'; - import { EventLoopUtilityFunction } from 'node:perf_hooks'; - import { FileHandle } from 'node:fs/promises'; - import { Readable, Writable } from 'node:stream'; - import { URL } from 'node:url'; - import { X509Certificate } from 'node:crypto'; - const isMainThread: boolean; - const parentPort: null | MessagePort; - const resourceLimits: ResourceLimits; - const SHARE_ENV: unique symbol; - const threadId: number; - const workerData: any; - /** - * Instances of the `worker.MessageChannel` class represent an asynchronous, - * two-way communications channel. - * The `MessageChannel` has no methods of its own. `new MessageChannel()`yields an object with `port1` and `port2` properties, which refer to linked `MessagePort` instances. - * - * ```js - * const { MessageChannel } = require('worker_threads'); - * - * const { port1, port2 } = new MessageChannel(); - * port1.on('message', (message) => console.log('received', message)); - * port2.postMessage({ foo: 'bar' }); - * // Prints: received { foo: 'bar' } from the `port1.on('message')` listener - * ``` - * @since v10.5.0 - */ - class MessageChannel { - readonly port1: MessagePort; - readonly port2: MessagePort; - } - interface WorkerPerformance { - eventLoopUtilization: EventLoopUtilityFunction; - } - type TransferListItem = ArrayBuffer | MessagePort | FileHandle | X509Certificate | Blob; - /** - * Instances of the `worker.MessagePort` class represent one end of an - * asynchronous, two-way communications channel. It can be used to transfer - * structured data, memory regions and other `MessagePort`s between different `Worker` s. - * - * This implementation matches [browser `MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort) s. - * @since v10.5.0 - */ - class MessagePort extends EventEmitter { - /** - * Disables further sending of messages on either side of the connection. - * This method can be called when no further communication will happen over this`MessagePort`. - * - * The `'close' event` is emitted on both `MessagePort` instances that - * are part of the channel. - * @since v10.5.0 - */ - close(): void; - /** - * Sends a JavaScript value to the receiving side of this channel.`value` is transferred in a way which is compatible with - * the [HTML structured clone algorithm](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Structured_clone_algorithm). - * - * In particular, the significant differences to `JSON` are: - * - * * `value` may contain circular references. - * * `value` may contain instances of builtin JS types such as `RegExp`s,`BigInt`s, `Map`s, `Set`s, etc. - * * `value` may contain typed arrays, both using `ArrayBuffer`s - * and `SharedArrayBuffer`s. - * * `value` may contain [`WebAssembly.Module`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/WebAssembly/Module) instances. - * * `value` may not contain native (C++-backed) objects other than: - * - * ```js - * const { MessageChannel } = require('worker_threads'); - * const { port1, port2 } = new MessageChannel(); - * - * port1.on('message', (message) => console.log(message)); - * - * const circularData = {}; - * circularData.foo = circularData; - * // Prints: { foo: [Circular] } - * port2.postMessage(circularData); - * ``` - * - * `transferList` may be a list of [`ArrayBuffer`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/ArrayBuffer), `MessagePort` and `FileHandle` objects. - * After transferring, they are not usable on the sending side of the channel - * anymore (even if they are not contained in `value`). Unlike with `child processes`, transferring handles such as network sockets is currently - * not supported. - * - * If `value` contains [`SharedArrayBuffer`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer) instances, those are accessible - * from either thread. They cannot be listed in `transferList`. - * - * `value` may still contain `ArrayBuffer` instances that are not in`transferList`; in that case, the underlying memory is copied rather than moved. - * - * ```js - * const { MessageChannel } = require('worker_threads'); - * const { port1, port2 } = new MessageChannel(); - * - * port1.on('message', (message) => console.log(message)); - * - * const uint8Array = new Uint8Array([ 1, 2, 3, 4 ]); - * // This posts a copy of `uint8Array`: - * port2.postMessage(uint8Array); - * // This does not copy data, but renders `uint8Array` unusable: - * port2.postMessage(uint8Array, [ uint8Array.buffer ]); - * - * // The memory for the `sharedUint8Array` is accessible from both the - * // original and the copy received by `.on('message')`: - * const sharedUint8Array = new Uint8Array(new SharedArrayBuffer(4)); - * port2.postMessage(sharedUint8Array); - * - * // This transfers a freshly created message port to the receiver. - * // This can be used, for example, to create communication channels between - * // multiple `Worker` threads that are children of the same parent thread. - * const otherChannel = new MessageChannel(); - * port2.postMessage({ port: otherChannel.port1 }, [ otherChannel.port1 ]); - * ``` - * - * The message object is cloned immediately, and can be modified after - * posting without having side effects. - * - * For more information on the serialization and deserialization mechanisms - * behind this API, see the `serialization API of the v8 module`. - * @since v10.5.0 - */ - postMessage(value: any, transferList?: ReadonlyArray): void; - /** - * Opposite of `unref()`. Calling `ref()` on a previously `unref()`ed port does _not_ let the program exit if it's the only active handle left (the default - * behavior). If the port is `ref()`ed, calling `ref()` again has no effect. - * - * If listeners are attached or removed using `.on('message')`, the port - * is `ref()`ed and `unref()`ed automatically depending on whether - * listeners for the event exist. - * @since v10.5.0 - */ - ref(): void; - /** - * Calling `unref()` on a port allows the thread to exit if this is the only - * active handle in the event system. If the port is already `unref()`ed calling`unref()` again has no effect. - * - * If listeners are attached or removed using `.on('message')`, the port is`ref()`ed and `unref()`ed automatically depending on whether - * listeners for the event exist. - * @since v10.5.0 - */ - unref(): void; - /** - * Starts receiving messages on this `MessagePort`. When using this port - * as an event emitter, this is called automatically once `'message'`listeners are attached. - * - * This method exists for parity with the Web `MessagePort` API. In Node.js, - * it is only useful for ignoring messages when no event listener is present. - * Node.js also diverges in its handling of `.onmessage`. Setting it - * automatically calls `.start()`, but unsetting it lets messages queue up - * until a new handler is set or the port is discarded. - * @since v10.5.0 - */ - start(): void; - addListener(event: 'close', listener: () => void): this; - addListener(event: 'message', listener: (value: any) => void): this; - addListener(event: 'messageerror', listener: (error: Error) => void): this; - addListener(event: string | symbol, listener: (...args: any[]) => void): this; - emit(event: 'close'): boolean; - emit(event: 'message', value: any): boolean; - emit(event: 'messageerror', error: Error): boolean; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'close', listener: () => void): this; - on(event: 'message', listener: (value: any) => void): this; - on(event: 'messageerror', listener: (error: Error) => void): this; - on(event: string | symbol, listener: (...args: any[]) => void): this; - once(event: 'close', listener: () => void): this; - once(event: 'message', listener: (value: any) => void): this; - once(event: 'messageerror', listener: (error: Error) => void): this; - once(event: string | symbol, listener: (...args: any[]) => void): this; - prependListener(event: 'close', listener: () => void): this; - prependListener(event: 'message', listener: (value: any) => void): this; - prependListener(event: 'messageerror', listener: (error: Error) => void): this; - prependListener(event: string | symbol, listener: (...args: any[]) => void): this; - prependOnceListener(event: 'close', listener: () => void): this; - prependOnceListener(event: 'message', listener: (value: any) => void): this; - prependOnceListener(event: 'messageerror', listener: (error: Error) => void): this; - prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this; - removeListener(event: 'close', listener: () => void): this; - removeListener(event: 'message', listener: (value: any) => void): this; - removeListener(event: 'messageerror', listener: (error: Error) => void): this; - removeListener(event: string | symbol, listener: (...args: any[]) => void): this; - off(event: 'close', listener: () => void): this; - off(event: 'message', listener: (value: any) => void): this; - off(event: 'messageerror', listener: (error: Error) => void): this; - off(event: string | symbol, listener: (...args: any[]) => void): this; - } - interface WorkerOptions { - /** - * List of arguments which would be stringified and appended to - * `process.argv` in the worker. This is mostly similar to the `workerData` - * but the values will be available on the global `process.argv` as if they - * were passed as CLI options to the script. - */ - argv?: any[] | undefined; - env?: NodeJS.Dict | typeof SHARE_ENV | undefined; - eval?: boolean | undefined; - workerData?: any; - stdin?: boolean | undefined; - stdout?: boolean | undefined; - stderr?: boolean | undefined; - execArgv?: string[] | undefined; - resourceLimits?: ResourceLimits | undefined; - /** - * Additional data to send in the first worker message. - */ - transferList?: TransferListItem[] | undefined; - /** - * @default true - */ - trackUnmanagedFds?: boolean | undefined; - } - interface ResourceLimits { - /** - * The maximum size of a heap space for recently created objects. - */ - maxYoungGenerationSizeMb?: number | undefined; - /** - * The maximum size of the main heap in MB. - */ - maxOldGenerationSizeMb?: number | undefined; - /** - * The size of a pre-allocated memory range used for generated code. - */ - codeRangeSizeMb?: number | undefined; - /** - * The default maximum stack size for the thread. Small values may lead to unusable Worker instances. - * @default 4 - */ - stackSizeMb?: number | undefined; - } - /** - * The `Worker` class represents an independent JavaScript execution thread. - * Most Node.js APIs are available inside of it. - * - * Notable differences inside a Worker environment are: - * - * * The `process.stdin`, `process.stdout` and `process.stderr` may be redirected by the parent thread. - * * The `require('worker_threads').isMainThread` property is set to `false`. - * * The `require('worker_threads').parentPort` message port is available. - * * `process.exit()` does not stop the whole program, just the single thread, - * and `process.abort()` is not available. - * * `process.chdir()` and `process` methods that set group or user ids - * are not available. - * * `process.env` is a copy of the parent thread's environment variables, - * unless otherwise specified. Changes to one copy are not visible in other - * threads, and are not visible to native add-ons (unless `worker.SHARE_ENV` is passed as the `env` option to the `Worker` constructor). - * * `process.title` cannot be modified. - * * Signals are not delivered through `process.on('...')`. - * * Execution may stop at any point as a result of `worker.terminate()` being invoked. - * * IPC channels from parent processes are not accessible. - * * The `trace_events` module is not supported. - * * Native add-ons can only be loaded from multiple threads if they fulfill `certain conditions`. - * - * Creating `Worker` instances inside of other `Worker`s is possible. - * - * Like [Web Workers](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API) and the `cluster module`, two-way communication can be - * achieved through inter-thread message passing. Internally, a `Worker` has a - * built-in pair of `MessagePort` s that are already associated with each other - * when the `Worker` is created. While the `MessagePort` object on the parent side - * is not directly exposed, its functionalities are exposed through `worker.postMessage()` and the `worker.on('message')` event - * on the `Worker` object for the parent thread. - * - * To create custom messaging channels (which is encouraged over using the default - * global channel because it facilitates separation of concerns), users can create - * a `MessageChannel` object on either thread and pass one of the`MessagePort`s on that `MessageChannel` to the other thread through a - * pre-existing channel, such as the global one. - * - * See `port.postMessage()` for more information on how messages are passed, - * and what kind of JavaScript values can be successfully transported through - * the thread barrier. - * - * ```js - * const assert = require('assert'); - * const { - * Worker, MessageChannel, MessagePort, isMainThread, parentPort - * } = require('worker_threads'); - * if (isMainThread) { - * const worker = new Worker(__filename); - * const subChannel = new MessageChannel(); - * worker.postMessage({ hereIsYourPort: subChannel.port1 }, [subChannel.port1]); - * subChannel.port2.on('message', (value) => { - * console.log('received:', value); - * }); - * } else { - * parentPort.once('message', (value) => { - * assert(value.hereIsYourPort instanceof MessagePort); - * value.hereIsYourPort.postMessage('the worker is sending this'); - * value.hereIsYourPort.close(); - * }); - * } - * ``` - * @since v10.5.0 - */ - class Worker extends EventEmitter { - /** - * If `stdin: true` was passed to the `Worker` constructor, this is a - * writable stream. The data written to this stream will be made available in - * the worker thread as `process.stdin`. - * @since v10.5.0 - */ - readonly stdin: Writable | null; - /** - * This is a readable stream which contains data written to `process.stdout` inside the worker thread. If `stdout: true` was not passed to the `Worker` constructor, then data is piped to the - * parent thread's `process.stdout` stream. - * @since v10.5.0 - */ - readonly stdout: Readable; - /** - * This is a readable stream which contains data written to `process.stderr` inside the worker thread. If `stderr: true` was not passed to the `Worker` constructor, then data is piped to the - * parent thread's `process.stderr` stream. - * @since v10.5.0 - */ - readonly stderr: Readable; - /** - * An integer identifier for the referenced thread. Inside the worker thread, - * it is available as `require('worker_threads').threadId`. - * This value is unique for each `Worker` instance inside a single process. - * @since v10.5.0 - */ - readonly threadId: number; - /** - * Provides the set of JS engine resource constraints for this Worker thread. - * If the `resourceLimits` option was passed to the `Worker` constructor, - * this matches its values. - * - * If the worker has stopped, the return value is an empty object. - * @since v13.2.0, v12.16.0 - */ - readonly resourceLimits?: ResourceLimits | undefined; - /** - * An object that can be used to query performance information from a worker - * instance. Similar to `perf_hooks.performance`. - * @since v15.1.0, v14.17.0, v12.22.0 - */ - readonly performance: WorkerPerformance; - /** - * @param filename The path to the Worker’s main script or module. - * Must be either an absolute path or a relative path (i.e. relative to the current working directory) starting with ./ or ../, - * or a WHATWG URL object using file: protocol. If options.eval is true, this is a string containing JavaScript code rather than a path. - */ - constructor(filename: string | URL, options?: WorkerOptions); - /** - * Send a message to the worker that is received via `require('worker_threads').parentPort.on('message')`. - * See `port.postMessage()` for more details. - * @since v10.5.0 - */ - postMessage(value: any, transferList?: ReadonlyArray): void; - /** - * Opposite of `unref()`, calling `ref()` on a previously `unref()`ed worker does _not_ let the program exit if it's the only active handle left (the default - * behavior). If the worker is `ref()`ed, calling `ref()` again has - * no effect. - * @since v10.5.0 - */ - ref(): void; - /** - * Calling `unref()` on a worker allows the thread to exit if this is the only - * active handle in the event system. If the worker is already `unref()`ed calling`unref()` again has no effect. - * @since v10.5.0 - */ - unref(): void; - /** - * Stop all JavaScript execution in the worker thread as soon as possible. - * Returns a Promise for the exit code that is fulfilled when the `'exit' event` is emitted. - * @since v10.5.0 - */ - terminate(): Promise; - /** - * Returns a readable stream for a V8 snapshot of the current state of the Worker. - * See `v8.getHeapSnapshot()` for more details. - * - * If the Worker thread is no longer running, which may occur before the `'exit' event` is emitted, the returned `Promise` is rejected - * immediately with an `ERR_WORKER_NOT_RUNNING` error. - * @since v13.9.0, v12.17.0 - * @return A promise for a Readable Stream containing a V8 heap snapshot - */ - getHeapSnapshot(): Promise; - addListener(event: 'error', listener: (err: Error) => void): this; - addListener(event: 'exit', listener: (exitCode: number) => void): this; - addListener(event: 'message', listener: (value: any) => void): this; - addListener(event: 'messageerror', listener: (error: Error) => void): this; - addListener(event: 'online', listener: () => void): this; - addListener(event: string | symbol, listener: (...args: any[]) => void): this; - emit(event: 'error', err: Error): boolean; - emit(event: 'exit', exitCode: number): boolean; - emit(event: 'message', value: any): boolean; - emit(event: 'messageerror', error: Error): boolean; - emit(event: 'online'): boolean; - emit(event: string | symbol, ...args: any[]): boolean; - on(event: 'error', listener: (err: Error) => void): this; - on(event: 'exit', listener: (exitCode: number) => void): this; - on(event: 'message', listener: (value: any) => void): this; - on(event: 'messageerror', listener: (error: Error) => void): this; - on(event: 'online', listener: () => void): this; - on(event: string | symbol, listener: (...args: any[]) => void): this; - once(event: 'error', listener: (err: Error) => void): this; - once(event: 'exit', listener: (exitCode: number) => void): this; - once(event: 'message', listener: (value: any) => void): this; - once(event: 'messageerror', listener: (error: Error) => void): this; - once(event: 'online', listener: () => void): this; - once(event: string | symbol, listener: (...args: any[]) => void): this; - prependListener(event: 'error', listener: (err: Error) => void): this; - prependListener(event: 'exit', listener: (exitCode: number) => void): this; - prependListener(event: 'message', listener: (value: any) => void): this; - prependListener(event: 'messageerror', listener: (error: Error) => void): this; - prependListener(event: 'online', listener: () => void): this; - prependListener(event: string | symbol, listener: (...args: any[]) => void): this; - prependOnceListener(event: 'error', listener: (err: Error) => void): this; - prependOnceListener(event: 'exit', listener: (exitCode: number) => void): this; - prependOnceListener(event: 'message', listener: (value: any) => void): this; - prependOnceListener(event: 'messageerror', listener: (error: Error) => void): this; - prependOnceListener(event: 'online', listener: () => void): this; - prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this; - removeListener(event: 'error', listener: (err: Error) => void): this; - removeListener(event: 'exit', listener: (exitCode: number) => void): this; - removeListener(event: 'message', listener: (value: any) => void): this; - removeListener(event: 'messageerror', listener: (error: Error) => void): this; - removeListener(event: 'online', listener: () => void): this; - removeListener(event: string | symbol, listener: (...args: any[]) => void): this; - off(event: 'error', listener: (err: Error) => void): this; - off(event: 'exit', listener: (exitCode: number) => void): this; - off(event: 'message', listener: (value: any) => void): this; - off(event: 'messageerror', listener: (error: Error) => void): this; - off(event: 'online', listener: () => void): this; - off(event: string | symbol, listener: (...args: any[]) => void): this; - } - interface BroadcastChannel extends NodeJS.RefCounted {} - /** - * Instances of `BroadcastChannel` allow asynchronous one-to-many communication - * with all other `BroadcastChannel` instances bound to the same channel name. - * - * ```js - * 'use strict'; - * - * const { - * isMainThread, - * BroadcastChannel, - * Worker - * } = require('worker_threads'); - * - * const bc = new BroadcastChannel('hello'); - * - * if (isMainThread) { - * let c = 0; - * bc.onmessage = (event) => { - * console.log(event.data); - * if (++c === 10) bc.close(); - * }; - * for (let n = 0; n < 10; n++) - * new Worker(__filename); - * } else { - * bc.postMessage('hello from every worker'); - * bc.close(); - * } - * ``` - * @since v15.4.0 - */ - class BroadcastChannel { - readonly name: string; - /** - * Invoked with a single \`MessageEvent\` argument when a message is received. - * @since v15.4.0 - */ - onmessage: (message: unknown) => void; - /** - * Invoked with a received message cannot be deserialized. - * @since v15.4.0 - */ - onmessageerror: (message: unknown) => void; - constructor(name: string); - /** - * Closes the `BroadcastChannel` connection. - * @since v15.4.0 - */ - close(): void; - /** - * @since v15.4.0 - * @param message Any cloneable JavaScript value. - */ - postMessage(message: unknown): void; - } - /** - * Mark an object as not transferable. If `object` occurs in the transfer list of - * a `port.postMessage()` call, it is ignored. - * - * In particular, this makes sense for objects that can be cloned, rather than - * transferred, and which are used by other objects on the sending side. - * For example, Node.js marks the `ArrayBuffer`s it uses for its `Buffer pool` with this. - * - * This operation cannot be undone. - * - * ```js - * const { MessageChannel, markAsUntransferable } = require('worker_threads'); - * - * const pooledBuffer = new ArrayBuffer(8); - * const typedArray1 = new Uint8Array(pooledBuffer); - * const typedArray2 = new Float64Array(pooledBuffer); - * - * markAsUntransferable(pooledBuffer); - * - * const { port1 } = new MessageChannel(); - * port1.postMessage(typedArray1, [ typedArray1.buffer ]); - * - * // The following line prints the contents of typedArray1 -- it still owns - * // its memory and has been cloned, not transferred. Without - * // `markAsUntransferable()`, this would print an empty Uint8Array. - * // typedArray2 is intact as well. - * console.log(typedArray1); - * console.log(typedArray2); - * ``` - * - * There is no equivalent to this API in browsers. - * @since v14.5.0, v12.19.0 - */ - function markAsUntransferable(object: object): void; - /** - * Transfer a `MessagePort` to a different `vm` Context. The original `port`object is rendered unusable, and the returned `MessagePort` instance - * takes its place. - * - * The returned `MessagePort` is an object in the target context and - * inherits from its global `Object` class. Objects passed to the [`port.onmessage()`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort/onmessage) listener are also created in the - * target context - * and inherit from its global `Object` class. - * - * However, the created `MessagePort` no longer inherits from [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget), and only - * [`port.onmessage()`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort/onmessage) can be used to receive - * events using it. - * @since v11.13.0 - * @param port The message port to transfer. - * @param contextifiedSandbox A `contextified` object as returned by the `vm.createContext()` method. - */ - function moveMessagePortToContext(port: MessagePort, contextifiedSandbox: Context): MessagePort; - /** - * Receive a single message from a given `MessagePort`. If no message is available,`undefined` is returned, otherwise an object with a single `message` property - * that contains the message payload, corresponding to the oldest message in the`MessagePort`’s queue. - * - * ```js - * const { MessageChannel, receiveMessageOnPort } = require('worker_threads'); - * const { port1, port2 } = new MessageChannel(); - * port1.postMessage({ hello: 'world' }); - * - * console.log(receiveMessageOnPort(port2)); - * // Prints: { message: { hello: 'world' } } - * console.log(receiveMessageOnPort(port2)); - * // Prints: undefined - * ``` - * - * When this function is used, no `'message'` event is emitted and the`onmessage` listener is not invoked. - * @since v12.3.0 - */ - function receiveMessageOnPort(port: MessagePort): - | { - message: any; - } - | undefined; - type Serializable = string | object | number | boolean | bigint; - /** - * Within a worker thread, `worker.getEnvironmentData()` returns a clone - * of data passed to the spawning thread's `worker.setEnvironmentData()`. - * Every new `Worker` receives its own copy of the environment data - * automatically. - * - * ```js - * const { - * Worker, - * isMainThread, - * setEnvironmentData, - * getEnvironmentData, - * } = require('worker_threads'); - * - * if (isMainThread) { - * setEnvironmentData('Hello', 'World!'); - * const worker = new Worker(__filename); - * } else { - * console.log(getEnvironmentData('Hello')); // Prints 'World!'. - * } - * ``` - * @since v15.12.0, v14.18.0 - * @param key Any arbitrary, cloneable JavaScript value that can be used as a {Map} key. - */ - function getEnvironmentData(key: Serializable): Serializable; - /** - * The `worker.setEnvironmentData()` API sets the content of`worker.getEnvironmentData()` in the current thread and all new `Worker`instances spawned from the current context. - * @since v15.12.0, v14.18.0 - * @param key Any arbitrary, cloneable JavaScript value that can be used as a {Map} key. - * @param value Any arbitrary, cloneable JavaScript value that will be cloned and passed automatically to all new `Worker` instances. If `value` is passed as `undefined`, any previously set value - * for the `key` will be deleted. - */ - function setEnvironmentData(key: Serializable, value: Serializable): void; - - import { - BroadcastChannel as _BroadcastChannel, - MessageChannel as _MessageChannel, - MessagePort as _MessagePort, - } from 'worker_threads'; - global { - /** - * `BroadcastChannel` class is a global reference for `require('worker_threads').BroadcastChannel` - * https://nodejs.org/api/globals.html#broadcastchannel - * @since v18.0.0 - */ - var BroadcastChannel: typeof globalThis extends { - onmessage: any; - BroadcastChannel: infer T; - } - ? T - : typeof _BroadcastChannel; - - /** - * `MessageChannel` class is a global reference for `require('worker_threads').MessageChannel` - * https://nodejs.org/api/globals.html#messagechannel - * @since v15.0.0 - */ - var MessageChannel: typeof globalThis extends { - onmessage: any; - MessageChannel: infer T; - } - ? T - : typeof _MessageChannel; - - /** - * `MessagePort` class is a global reference for `require('worker_threads').MessagePort` - * https://nodejs.org/api/globals.html#messageport - * @since v15.0.0 - */ - var MessagePort: typeof globalThis extends { - onmessage: any; - MessagePort: infer T; - } - ? T - : typeof _MessagePort; - } -} -declare module 'node:worker_threads' { - export * from 'worker_threads'; -} diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Game Over Full Movie Download 720p Hd.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Game Over Full Movie Download 720p Hd.md deleted file mode 100644 index 9e289e6c578cff6137873f63b6fe6830351d420d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Game Over Full Movie Download 720p Hd.md +++ /dev/null @@ -1,21 +0,0 @@ - -

      How to Download Game Over Full Movie in 720p HD Quality

      -

      Game Over is a 2019 Indian thriller film starring Taapsee Pannu as a woman who is haunted by a mysterious entity in her home. The film received critical acclaim for its gripping storyline, suspenseful atmosphere and Pannu's performance. If you are looking for a way to watch Game Over full movie in 720p HD quality, you have come to the right place.

      -

      In this article, we will show you how to download Game Over full movie in 720p HD quality from a reliable and safe source. You will need a torrent client such as BitTorrent or uTorrent to download the movie file. Follow these simple steps to get started:

      -

      Game Over full movie download 720p hd


      Download ••• https://urlgoal.com/2uCLVw



      -
        -
      1. Go to 1337x.to, one of the most popular torrent sites on the internet.
      2. -
      3. In the search box, type "Game Over 2019 Hindi 720p" and hit enter. You will see a list of results matching your query.
      4. -
      5. Select the one that has the most seeders and leechers. This indicates that the file is active and has a high download speed.
      6. -
      7. Click on the torrent name to open its details page. You will see information such as file size, quality, language, subtitles and screenshots.
      8. -
      9. Click on the magnet link or the download button to start downloading the torrent file. You will be prompted to open it with your torrent client.
      10. -
      11. Once you open the torrent file with your torrent client, it will start downloading the movie file to your device. You can monitor the progress and speed of the download on your torrent client.
      12. -
      13. After the download is complete, you can enjoy watching Game Over full movie in 720p HD quality on your device.
      14. -
      -

      Note: Downloading movies from torrent sites may be illegal in some countries. We do not condone or promote piracy in any way. This article is for educational purposes only. Please use your own discretion and follow the laws of your country before downloading any content from torrent sites.

      - -

      Game Over is a film that will keep you on the edge of your seat from start to finish. The film is directed by Ashwin Saravanan and written by him and Kaavya Ramkumar. The film is a co-production between Y NOT Studios and Reliance Entertainment. The film was released in Hindi, Tamil and Telugu languages on 14 June 2019.

      -

      The film revolves around Swapna (Taapsee Pannu), a video game designer who suffers from post-traumatic stress disorder (PTSD) and nyctophobia (fear of the dark) after a horrific incident in her past. She lives alone in a secluded house with her maid Kalamma (Vinodhini Vaidyanathan). On New Year's Eve, she gets a tattoo on her wrist that says "Game Over" as a symbol of overcoming her fears. However, she soon realizes that the tattoo has a sinister connection to a serial killer who targets women with tattoos.

      -

      As the night progresses, Swapna finds herself trapped in her house with the killer and his accomplices. She has to use her wits and skills to survive the night and fight back against the intruders. She also has to face her inner demons and overcome her fears. The film is a thrilling ride that explores themes such as trauma, survival, identity and empowerment.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/rendchevi/nix-tts/elements/tts.py b/spaces/rendchevi/nix-tts/elements/tts.py deleted file mode 100644 index 5a86a885a5a6a60711fde19e50774940ee726688..0000000000000000000000000000000000000000 --- a/spaces/rendchevi/nix-tts/elements/tts.py +++ /dev/null @@ -1,45 +0,0 @@ -# Utils -import os -import timeit -import soundfile as sf - -# Streamlit -import streamlit as st - -# Custom elements -from elements.component import ( - centered_text, -) - -def generate_voice( - input_text, -): - # TTS Inference - start_time = timeit.default_timer() - c, c_length, phoneme = st.session_state.TTS.tokenize(input_text) - tok_time = timeit.default_timer() - start_time - - start_time = timeit.default_timer() - voice = st.session_state.TTS.vocalize(c, c_length) - tts_time = timeit.default_timer() - start_time - - # Time stats - total_infer_time = tts_time + tok_time - audio_time = voice.shape[-1] / 22050 - rtf = total_infer_time / audio_time - rt_ratio = 1 / rtf - - # Save audio (bug in Streamlit, can't play numpy array directly) - sf.write(f"cache_sound/{st.session_state.random_str}.wav", voice[0,0], 22050) - - # Play audio - st.audio(f"cache_sound/{st.session_state.random_str}.wav", format = "audio/wav") - os.remove(f"cache_sound/{st.session_state.random_str}.wav") - st.caption("Generated Voice") - - st.code( - f"💬 Output Audio: {str(audio_time)[:6]} sec.\n\n⏳ Elapsed time for:\n => Tokenization: {str(tok_time)[:6]} sec.\n => Model Inference: {str(tts_time)[:6]} sec.\n\n⏰ Real-time Factor (RTF): {str(rtf)[:6]}\n\n🏃 The model runs {str(rt_ratio)[:6]} x faster than real-time \ - ", - language = "bash", - ) - st.caption("Elapsed Time Stats") \ No newline at end of file diff --git a/spaces/rizam/literature-research-tool/inference_hf/__init__.py b/spaces/rizam/literature-research-tool/inference_hf/__init__.py deleted file mode 100644 index fc0d43df0f7739b74e5b4c53b898bc2467717d24..0000000000000000000000000000000000000000 --- a/spaces/rizam/literature-research-tool/inference_hf/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ._inference import InferenceHF \ No newline at end of file diff --git a/spaces/rizam/literature-research-tool/lrt/clustering/models/keyBartPlus.py b/spaces/rizam/literature-research-tool/lrt/clustering/models/keyBartPlus.py deleted file mode 100644 index 7b74a5726fcbde6f292a8e4e33829a35b0ce1e90..0000000000000000000000000000000000000000 --- a/spaces/rizam/literature-research-tool/lrt/clustering/models/keyBartPlus.py +++ /dev/null @@ -1,411 +0,0 @@ -from typing import Optional, List, Union, Tuple -import torch -import torch.nn as nn -import random -from torch.nn import CrossEntropyLoss - -from transformers.utils import ( -add_start_docstrings_to_model_forward, -add_end_docstrings, -replace_return_docstrings -) - -from transformers import AutoModelForSeq2SeqLM -from transformers.models.bart.modeling_bart import ( - BartForConditionalGeneration, - _expand_mask, logger, - shift_tokens_right, - BartPretrainedModel, - BART_INPUTS_DOCSTRING, - _CONFIG_FOR_DOC, - BART_GENERATION_EXAMPLE, - BartModel, - BartDecoder - -) -from .adapter import Adapter -from transformers.modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - Seq2SeqModelOutput, - BaseModelOutput, - Seq2SeqLMOutput -) - - -class KeyBartAdapter(BartForConditionalGeneration): - def __init__(self,adapter_hid_dim:int) -> None: - keyBart = AutoModelForSeq2SeqLM.from_pretrained("bloomberg/KeyBART") - self.__fix_weights__(keyBart) - - super().__init__(keyBart.model.config) - self.lm_head = keyBart.lm_head - self.model = BartPlus(keyBart, adapter_hid_dim) - self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) - - - def __fix_weights__(self,keyBart:BartForConditionalGeneration): - for i in keyBart.model.parameters(): - i.requires_grad = False - for i in keyBart.lm_head.parameters(): - i.requires_grad = False - - @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) - @add_end_docstrings(BART_GENERATION_EXAMPLE) - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - decoder_input_ids: Optional[torch.LongTensor] = None, - decoder_attention_mask: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.Tensor] = None, - decoder_head_mask: Optional[torch.Tensor] = None, - cross_attn_head_mask: Optional[torch.Tensor] = None, - encoder_outputs: Optional[List[torch.FloatTensor]] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - decoder_inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, Seq2SeqLMOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., - config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored - (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. - Returns: - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if labels is not None: - if use_cache: - logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") - use_cache = False - if decoder_input_ids is None and decoder_inputs_embeds is None: - decoder_input_ids = shift_tokens_right( - labels, self.config.pad_token_id, self.config.decoder_start_token_id - ) - - outputs = self.model( - input_ids, - attention_mask=attention_mask, - decoder_input_ids=decoder_input_ids, - encoder_outputs=encoder_outputs, - decoder_attention_mask=decoder_attention_mask, - head_mask=head_mask, - decoder_head_mask=decoder_head_mask, - cross_attn_head_mask=cross_attn_head_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - decoder_inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias - - masked_lm_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) - - if not return_dict: - output = (lm_logits,) + outputs[1:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - - return Seq2SeqLMOutput( - loss=masked_lm_loss, - logits=lm_logits, - past_key_values=outputs.past_key_values, - decoder_hidden_states=outputs.decoder_hidden_states, - decoder_attentions=outputs.decoder_attentions, - cross_attentions=outputs.cross_attentions, - encoder_last_hidden_state=outputs.encoder_last_hidden_state, - encoder_hidden_states=outputs.encoder_hidden_states, - encoder_attentions=outputs.encoder_attentions, - ) - - - -class BartDecoderPlus(BartDecoder): - def __init__(self,keyBart:BartForConditionalGeneration,adapter_hid_dim: int) -> None: - super().__init__(keyBart.get_decoder().config) - self.decoder = keyBart.model.decoder - self.adapters = nn.ModuleList([Adapter(self.decoder.config.d_model,adapter_hid_dim) for _ in range(len(self.decoder.layers))]) - self.config = self.decoder.config - self.dropout = self.decoder.dropout - self.layerdrop = self.decoder.layerdrop - self.padding_idx = self.decoder.padding_idx - self.max_target_positions = self.decoder.max_target_positions - self.embed_scale = self.decoder.embed_scale - self.embed_tokens = self.decoder.embed_tokens - self.embed_positions = self.decoder.embed_positions - self.layers = self.decoder.layers - self.layernorm_embedding = self.decoder.layernorm_embedding - self.gradient_checkpointing = self.decoder.gradient_checkpointing - - - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.Tensor] = None, - cross_attn_head_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # retrieve input_ids and inputs_embeds - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") - elif input_ids is not None: - input = input_ids - input_shape = input.shape - input_ids = input_ids.view(-1, input_shape[-1]) - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - input = inputs_embeds[:, :, -1] - else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - if inputs_embeds is None: - inputs_embeds = self.decoder.embed_tokens(input) * self.decoder.embed_scale - - attention_mask = self.decoder._prepare_decoder_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) - - # expand encoder attention mask - if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) - - # embed positions - positions = self.decoder.embed_positions(input, past_key_values_length) - - hidden_states = inputs_embeds + positions - hidden_states = self.decoder.layernorm_embedding(hidden_states) - - hidden_states = nn.functional.dropout(hidden_states, p=self.decoder.dropout, training=self.decoder.training) - - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None - next_decoder_cache = () if use_cache else None - - # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired - for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): - if attn_mask is not None: - if attn_mask.size()[0] != (len(self.decoder.layers)): - raise ValueError( - f"The `{mask_name}` should be specified for {len(self.decoder.layers)} layers, but it is for" - f" {head_mask.size()[0]}." - ) - - for idx, decoder_layer in enumerate(self.decoder.layers): - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - if output_hidden_states: - all_hidden_states += (hidden_states,) - dropout_probability = random.uniform(0, 1) - if self.decoder.training and (dropout_probability < self.decoder.layerdrop): - continue - - past_key_value = past_key_values[idx] if past_key_values is not None else None - - if self.decoder.gradient_checkpointing and self.decoder.training: - - if use_cache: - logger.warning( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - def create_custom_forward(module): - def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, output_attentions, use_cache) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(decoder_layer), - hidden_states, - attention_mask, - encoder_hidden_states, - encoder_attention_mask, - head_mask[idx] if head_mask is not None else None, - cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, - None, - ) - else: - - layer_outputs = decoder_layer( - hidden_states, - attention_mask=attention_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - layer_head_mask=(head_mask[idx] if head_mask is not None else None), - cross_attn_layer_head_mask=( - cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None - ), - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - ) - hidden_states = layer_outputs[0] - - ######################### new ################################# - hidden_states = self.adapters[idx](hidden_states) - ######################### new ################################# - - if use_cache: - next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) - - if output_attentions: - all_self_attns += (layer_outputs[1],) - - if encoder_hidden_states is not None: - all_cross_attentions += (layer_outputs[2],) - - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) - - next_cache = next_decoder_cache if use_cache else None - if not return_dict: - return tuple( - v - for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_cache, - hidden_states=all_hidden_states, - attentions=all_self_attns, - cross_attentions=all_cross_attentions, - ) - -class BartPlus(BartModel): - def __init__(self,keyBart: BartForConditionalGeneration, adapter_hid_dim: int ) -> None: - super().__init__(keyBart.model.config) - self.config = keyBart.model.config - - # self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) - self.shared = keyBart.model.shared - - #self.encoder = BartEncoder(config, self.shared) - self.encoder = keyBart.model.encoder - - #self.decoder = BartDecoder(config, self.shared) - #self.decoder = keyBart.model.decoder - self.decoder = BartDecoderPlus(keyBart,adapter_hid_dim=adapter_hid_dim) - - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - decoder_input_ids: Optional[torch.LongTensor] = None, - decoder_attention_mask: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.Tensor] = None, - decoder_head_mask: Optional[torch.Tensor] = None, - cross_attn_head_mask: Optional[torch.Tensor] = None, - encoder_outputs: Optional[List[torch.FloatTensor]] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - decoder_inputs_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, Seq2SeqModelOutput]: - - # different to other models, Bart automatically creates decoder_input_ids from - # input_ids if no decoder_input_ids are provided - if decoder_input_ids is None and decoder_inputs_embeds is None: - if input_ids is None: - raise ValueError( - "If no `decoder_input_ids` or `decoder_inputs_embeds` are " - "passed, `input_ids` cannot be `None`. Please pass either " - "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." - ) - - decoder_input_ids = shift_tokens_right( - input_ids, self.config.pad_token_id, self.config.decoder_start_token_id - ) - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if encoder_outputs is None: - encoder_outputs = self.encoder( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True - elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): - encoder_outputs = BaseModelOutput( - last_hidden_state=encoder_outputs[0], - hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, - attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, - ) - - # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) - decoder_outputs = self.decoder( - input_ids=decoder_input_ids, - attention_mask=decoder_attention_mask, - encoder_hidden_states=encoder_outputs[0], - encoder_attention_mask=attention_mask, - head_mask=decoder_head_mask, - cross_attn_head_mask=cross_attn_head_mask, - past_key_values=past_key_values, - inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - if not return_dict: - return decoder_outputs + encoder_outputs - - return Seq2SeqModelOutput( - last_hidden_state=decoder_outputs.last_hidden_state, - past_key_values=decoder_outputs.past_key_values, - decoder_hidden_states=decoder_outputs.hidden_states, - decoder_attentions=decoder_outputs.attentions, - cross_attentions=decoder_outputs.cross_attentions, - encoder_last_hidden_state=encoder_outputs.last_hidden_state, - encoder_hidden_states=encoder_outputs.hidden_states, - encoder_attentions=encoder_outputs.attentions, - ) - diff --git a/spaces/rorallitri/biomedical-language-models/logs/COD Black Ops II Crack VERIFIED ONLY SKIDROW.md b/spaces/rorallitri/biomedical-language-models/logs/COD Black Ops II Crack VERIFIED ONLY SKIDROW.md deleted file mode 100644 index 4b80c75cdd8ef63173581f7097401b00d8cc9066..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/COD Black Ops II Crack VERIFIED ONLY SKIDROW.md +++ /dev/null @@ -1,6 +0,0 @@ -

      COD Black Ops II CRACK ONLY SKIDROW


      Download Filehttps://tinurll.com/2uzoIy



      -
      - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/Crack Ac16 3006 Int64 To String Learn the Secrets of Graphisoft SE.md b/spaces/rorallitri/biomedical-language-models/logs/Crack Ac16 3006 Int64 To String Learn the Secrets of Graphisoft SE.md deleted file mode 100644 index e6278bcb35537c41c31a348a565abb0693d1c95c..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Crack Ac16 3006 Int64 To String Learn the Secrets of Graphisoft SE.md +++ /dev/null @@ -1,6 +0,0 @@ -

      downloadbukuteologiislamharunnasutionpdf73


      Download ►►►►► https://tinurll.com/2uznrH



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/saadob12/Chart_Data_Summarization/README.md b/spaces/saadob12/Chart_Data_Summarization/README.md deleted file mode 100644 index b39d699ca148d8b988ea84dbbe665e5e713d9dcf..0000000000000000000000000000000000000000 --- a/spaces/saadob12/Chart_Data_Summarization/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chart Summary -emoji: 🐢 -colorFrom: indigo -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/scedlatioru/img-to-music/example/Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen.md b/spaces/scedlatioru/img-to-music/example/Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen.md deleted file mode 100644 index e28c60daa9ed76ad813d1f1c2a7bbce16a9b672d..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen.md +++ /dev/null @@ -1,25 +0,0 @@ - -

      How to Download and Install Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen

      -

      If you are looking for a professional DJ software that can handle any music genre and style, you might want to check out Native Instruments Traktor Pro 2. This software is designed to give you the ultimate control over your mixes, with advanced features such as four decks, remix decks, loop recorder, effects, and more.

      -

      However, if you want to use the full version of Traktor Pro 2, you will need to purchase a license from the official website. This can be quite expensive for some users, especially if you are just starting out or want to try it out before buying. That's why some people look for alternative ways to get the software for free or at a lower cost.

      -

      Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen


      Download Zip ····· https://gohhs.com/2uEA2g



      -

      One of these ways is to download and install a keygen, which is a program that can generate a serial number or activation code for a software. By using a keygen, you can bypass the registration process and unlock the full features of Traktor Pro 2 without paying anything.

      -

      However, downloading and installing a keygen is not without risks. You might end up with a virus, malware, or spyware on your computer, or you might get into legal trouble for violating the software's terms of service. That's why we do not recommend or endorse using a keygen to get Traktor Pro 2.

      -

      But if you still want to take the risk and try it out, we will show you how to download and install Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen in this article. Please note that we are not responsible for any damage or consequences that may result from following these steps.

      -

      Step 1: Download Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen

      -

      The first step is to download the keygen from a reliable source. You can search for it on Google or use a torrent site like The Pirate Bay or Kickass Torrents. Make sure you read the comments and reviews of other users before downloading anything, and use an antivirus program to scan the file for any threats.

      -

      Once you have downloaded the keygen, extract it using a program like WinRAR or 7-Zip. You should see a folder containing several files, including the keygen.exe file.

      -

      Step 2: Run the Keygen

      -

      The next step is to run the keygen.exe file as an administrator. You might get a warning message from your antivirus program or Windows Defender, but ignore it and proceed anyway.

      -

      A window will pop up with several options and buttons. You will need to select your product from the drop-down menu (Traktor Pro 2), and then click on the Generate button. This will create a serial number or activation code for Traktor Pro 2.

      -

      -

      Copy the code and save it somewhere safe. You will need it later to activate the software.

      -

      Step 3: Download and Install Traktor Pro 2

      -

      The final step is to download and install Traktor Pro 2 from the official website. You can get the latest version (V2.0.3 B10893) from this link: https://www.native-instruments.com/en/products/traktor/dj-software/traktor-pro-3/download/

      -

      Follow the instructions on the website to download and install the software on your computer. When prompted, enter the serial number or activation code that you generated with the keygen in step 2.

      -

      If everything goes well, you should be able to run Traktor Pro 2 without any limitations or restrictions.

      -

      Conclusion

      -

      In this article, we showed you how to download and install Native Instruments Traktor Pro 2 V2.0.3 B10893 (Full) [RH] Keygen on your computer. However, we also warned you about the potential risks and consequences of using a keygen to get Traktor Pro 2.

      -

      We hope you found this article helpful and informative, but we also advise you to consider buying a legitimate license from the official website if you want to support the developers and enjoy the software without any worries. d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/O Caminho Jedi Pdf 205.md b/spaces/scedlatioru/img-to-music/example/O Caminho Jedi Pdf 205.md deleted file mode 100644 index 638374394b9133f52086d3097c4dd18ef247f8db..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/O Caminho Jedi Pdf 205.md +++ /dev/null @@ -1,6 +0,0 @@ -

      o caminho jedi pdf 205


      Download File >>>>> https://gohhs.com/2uEAoS



      - -Download file Free Book PDF Reading^Free Pdf at Complete PDF Library. ... Repair Manual Software · Caminho Nordeste Angolano Dias Carvalho Jo%c3%a3o ... Return Of The Jedi A Storybook · Adventure Tales America Illustrated History ... Ricoh Aficio 3025 3030 Ricoh Mp 2510 3010 Product Code B205 B209 D007 ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Raftaar Movie English Subtitles [WORK] Download.md b/spaces/scedlatioru/img-to-music/example/Raftaar Movie English Subtitles [WORK] Download.md deleted file mode 100644 index 03f073f8d5fce58ab21561f2e476a1448918333d..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Raftaar Movie English Subtitles [WORK] Download.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      the cast of the film includes sam claflin as raftaar singh, priyanka chopra as sara rana, rupert grint as mark, daisy ridley as emily, toby jones as father, riz ahmed as dastan, adeel akhtar as bikram, stephen graham as kirpal singh and neena gupta as rani.

      -

      it helps that divine (and fellow mumbai rapper naezy, a.k.a. naved shaikh) became recognized names following their participation in the bollywood hip-hop film movie gully boy, which came out in february. the films director, zoya akhtar, and lead actor, ranveer, singh are among the first to appear in gully life to speak about divine, which places the rapper square in the middle of indias current pop culture.

      -

      Raftaar Movie English Subtitles Download


      DOWNLOADhttps://gohhs.com/2uEz3B



      -

      after all, there are plenty of rappers from honey singh to badshah to raftaar famous for their work on bollywood songs. gully life touches upon all the relevant names associated with divine, including the first crew that he was part of, mumbais finest. also seen in the documentary are mumbai-based rappers such as enkore (who released his album bombay soul last year), naezy (who collaborated with divine on their breakthrough song mere gully mein) and divines own gully gang crew, including dj-producer joel dsouza (jd), rapper devil and more. other rising producers include sez on the beat, karan kanchan, phenom and stunnah who integrate elements or samples from traditional indian music styles without forcing them.

      -

      the film begins as raftaar singh who does nothing productive to support his family is given the ultimatum by his father either to join his father's friend's business in order to make some earning or marry his another friend's very fat daughter. hechoises to join his father's friend in goa india. over there, he lies to his father's friend kipar singh that he knows to speak english. he is given theresponsibility to perform the escort and guard duty with a beautiful girl sara rana who has come from romania to find her mother in india. in order to communicate with sara, raftaar singh hires a translator emily. while on the visit, sara and raftaar singh run into a group of bad guys and over there sara fights and kick off all the bad guys. raftaar singh and emily get astonished that a soft, slim and smart girl can fight like a commando.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/scite/README/README.md b/spaces/scite/README/README.md deleted file mode 100644 index 8d519e6219d82b8dfb82d803074954bf047bb464..0000000000000000000000000000000000000000 --- a/spaces/scite/README/README.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: README -emoji: 🌖 -colorFrom: red -colorTo: pink -sdk: static -pinned: false ---- - -**The scite huggingface space!** - - -Check out [https://scite.ai]() for the worlds largest database of citation statements to read how any paper was cited. - - - -Check our huggingface organization for models and datasets relating to solving NLP problems in the scientific and biomedical domains. diff --git a/spaces/sczhou/ProPainter/RAFT/utils/frame_utils.py b/spaces/sczhou/ProPainter/RAFT/utils/frame_utils.py deleted file mode 100644 index 6c491135efaffc25bd61ec3ecde99d236f5deb12..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/RAFT/utils/frame_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -import numpy as np -from PIL import Image -from os.path import * -import re - -import cv2 -cv2.setNumThreads(0) -cv2.ocl.setUseOpenCL(False) - -TAG_CHAR = np.array([202021.25], np.float32) - -def readFlow(fn): - """ Read .flo file in Middlebury format""" - # Code adapted from: - # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy - - # WARNING: this will work on little-endian architectures (eg Intel x86) only! - # print 'fn = %s'%(fn) - with open(fn, 'rb') as f: - magic = np.fromfile(f, np.float32, count=1) - if 202021.25 != magic: - print('Magic number incorrect. Invalid .flo file') - return None - else: - w = np.fromfile(f, np.int32, count=1) - h = np.fromfile(f, np.int32, count=1) - # print 'Reading %d x %d flo file\n' % (w, h) - data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) - # Reshape data into 3D array (columns, rows, bands) - # The reshape here is for visualization, the original code is (w,h,2) - return np.resize(data, (int(h), int(w), 2)) - -def readPFM(file): - file = open(file, 'rb') - - color = None - width = None - height = None - scale = None - endian = None - - header = file.readline().rstrip() - if header == b'PF': - color = True - elif header == b'Pf': - color = False - else: - raise Exception('Not a PFM file.') - - dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) - if dim_match: - width, height = map(int, dim_match.groups()) - else: - raise Exception('Malformed PFM header.') - - scale = float(file.readline().rstrip()) - if scale < 0: # little-endian - endian = '<' - scale = -scale - else: - endian = '>' # big-endian - - data = np.fromfile(file, endian + 'f') - shape = (height, width, 3) if color else (height, width) - - data = np.reshape(data, shape) - data = np.flipud(data) - return data - -def writeFlow(filename,uv,v=None): - """ Write optical flow to file. - - If v is None, uv is assumed to contain both u and v channels, - stacked in depth. - Original code by Deqing Sun, adapted from Daniel Scharstein. - """ - nBands = 2 - - if v is None: - assert(uv.ndim == 3) - assert(uv.shape[2] == 2) - u = uv[:,:,0] - v = uv[:,:,1] - else: - u = uv - - assert(u.shape == v.shape) - height,width = u.shape - f = open(filename,'wb') - # write the header - f.write(TAG_CHAR) - np.array(width).astype(np.int32).tofile(f) - np.array(height).astype(np.int32).tofile(f) - # arrange into matrix form - tmp = np.zeros((height, width*nBands)) - tmp[:,np.arange(width)*2] = u - tmp[:,np.arange(width)*2 + 1] = v - tmp.astype(np.float32).tofile(f) - f.close() - - -def readFlowKITTI(filename): - flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR) - flow = flow[:,:,::-1].astype(np.float32) - flow, valid = flow[:, :, :2], flow[:, :, 2] - flow = (flow - 2**15) / 64.0 - return flow, valid - -def readDispKITTI(filename): - disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0 - valid = disp > 0.0 - flow = np.stack([-disp, np.zeros_like(disp)], -1) - return flow, valid - - -def writeFlowKITTI(filename, uv): - uv = 64.0 * uv + 2**15 - valid = np.ones([uv.shape[0], uv.shape[1], 1]) - uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16) - cv2.imwrite(filename, uv[..., ::-1]) - - -def read_gen(file_name, pil=False): - ext = splitext(file_name)[-1] - if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': - return Image.open(file_name) - elif ext == '.bin' or ext == '.raw': - return np.load(file_name) - elif ext == '.flo': - return readFlow(file_name).astype(np.float32) - elif ext == '.pfm': - flow = readPFM(file_name).astype(np.float32) - if len(flow.shape) == 2: - return flow - else: - return flow[:, :, :-1] - return [] \ No newline at end of file diff --git a/spaces/seduerr/communicaite/app.py b/spaces/seduerr/communicaite/app.py deleted file mode 100644 index 8261b583e78006894cbf8cb29c5d4224b319ba3d..0000000000000000000000000000000000000000 --- a/spaces/seduerr/communicaite/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import gradio as gr -import re - -from services.hate_speech import classify_hatespeech -from services.cola import classify_correctness -from services.bad_words import identify_bad_words - -examples = [ - 'John is a son of a.', 'John a wonderful item', 'The dog is a bastard.', - 'The dog a cat.', 'It was Peter Thiel from PayPal.', - 'Dear Thomas,\n\nI understand that last Friday when you were a guest at our restaurant you experienced an unfortunate mishap that resulted in a beverage being spilled on your coat. Please accept my sincere apology.\n\nAs we all know accidents happen but it’s how the establishment responds that either rectifies the situation or makes it worse. Unfortunately the staff on duty at the time did not reflect our customer service policy. I have investigated the incident, talked to those involved, and scheduled remedial customer relations training for them. In addition, please send the dry cleaning bill for your coat directly to me at the address on the letterhead above and we will reimburse you for the cost.\n\nWe’d like to have you back as a customer so I’m enclosing a coupon for two free entrees for you and a guest that can be used at any of our three locations in Austin. Again, my apologies for the incident. I hope you give us the opportunity to make this right. We value your patronage.\n\nSincerely,\nBenson Bailey', - "Hi Professor,\n\nI have really had a rough week and I won't be able to submit my paper in time. First, my car broke down onn Monday, then my dog got sick on Tuesday and I needed to take the bus to get to the vent annd I lost another day. Then I had to cram all night for an exam that I wrote today. Now, I am sitting here, trying to write this paper and I'm just too exhausted to do anything. So, I wanted to kindly ask you if I could get an extention for two days?\n\nThanks a lot,\nPeter", -] - - -def check_ethical(text: str): - # simple heuristic based on offensive word list by cmu.edu - identified_bad_words = identify_bad_words(text) - if len(identified_bad_words) > 0: - return {'status': 'Input contains offensive words.', 'data': identified_bad_words} - - # based on DistilRoberta hosted on transformers - nice = float(classify_hatespeech(text)) - if nice < .8: - return {'status': 'Input contains hate speech.', 'data': nice} - - # based on DistilBert hosted on transformers - linguistic_incorrect = float(classify_correctness(text)) - if linguistic_incorrect < .8: - return {'status': 'Input is linguistically inacceptable.', 'data': text} - return {'status': "ethical", 'data': text} - -title = 'Communicaite - Ethical Communication' -description = '''Please insert any text to scan for subliminal hate speech, the use of bad words or inappropriate language. You can also try the different examples from below. -Thank you. ''' - -demo = gr.Interface(fn=check_ethical, inputs='text', outputs='text', - examples=examples, title=title, description=description) -demo.launch() \ No newline at end of file diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Changelog_CN.md b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Changelog_CN.md deleted file mode 100644 index ad10739c45cd5f8ac6a4e60e9924e192759f7672..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Changelog_CN.md +++ /dev/null @@ -1,35 +0,0 @@ -### 20230409 -- 修正训练参数,提升显卡平均利用率,A100最高从25%提升至90%左右,V100:50%->90%左右,2060S:60%->85%左右,P40:25%->95%左右,训练速度显著提升 -- 修正参数:总batch_size改为每张卡的batch_size -- 修正total_epoch:最大限制100解锁至1000;默认10提升至默认20 -- 修复ckpt提取识别是否带音高错误导致推理异常的问题 -- 修复分布式训练每个rank都保存一次ckpt的问题 -- 特征提取进行nan特征过滤 -- 修复静音输入输出随机辅音or噪声的问题(老版模型需要重做训练集重训) - -### 20230416更新 -- 新增本地实时变声迷你GUI,双击go-realtime-gui.bat启动 -- 训练推理均对<50Hz的频段进行滤波过滤 -- 训练推理音高提取pyworld最低音高从默认80下降至50,50-80hz间的男声低音不会哑 -- WebUI支持根据系统区域变更语言(现支持en_US,ja_JP,zh_CN,zh_HK,zh_SG,zh_TW,不支持的默认en_US) -- 修正部分显卡识别(例如V100-16G识别失败,P4识别失败) - -### 20230428更新 -- 升级faiss索引设置,速度更快,质量更高 -- 取消total_npy依赖,后续分享模型不再需要填写total_npy -- 解锁16系限制。4G显存GPU给到4G的推理设置。 -- 修复部分音频格式下UVR5人声伴奏分离的bug -- 实时变声迷你gui增加对非40k与不懈怠音高模型的支持 - - -### 后续计划: -功能: -- 增加选项:每次epoch保存的小模型均进行提取 -- 增加选项:推理额外导出mp3至填写的路径 -- 支持多人训练选项卡(至多4人) -- -底模: -- 收集呼吸wav加入训练集修正呼吸变声电音的问题 -- 我们正在训练增加了歌声训练集的底模,未来会公开 -- 升级鉴别器 -- 升级自监督特征结构 diff --git a/spaces/simonraj/ELOralCoachv2/README.md b/spaces/simonraj/ELOralCoachv2/README.md deleted file mode 100644 index 81b1dd2f206fefefbfe2cc9b943dad20dd444547..0000000000000000000000000000000000000000 --- a/spaces/simonraj/ELOralCoachv2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: OralCoachStreamingEL -emoji: 📉 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false -duplicated_from: simonraj/ELOralCoachv1 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Sausage Man China Version and Enjoy the Wacky and Competitive Shooting Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Sausage Man China Version and Enjoy the Wacky and Competitive Shooting Game.md deleted file mode 100644 index b0ab9a5efa63a80fcab410efd233a35f8f2f2237..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Sausage Man China Version and Enjoy the Wacky and Competitive Shooting Game.md +++ /dev/null @@ -1,112 +0,0 @@ -
      -

      Sausage Man China Version Download: A Fun and Wacky Battle Royale Game

      -

      If you are looking for a new and exciting battle royale game to play on your mobile device or PC, then you might want to check out Sausage Man China version download. Sausage Man is a cartoon-styled, competitive shooting, battle royale game featuring sausages as protagonists. It is a game that you can get started with effortlessly and play anytime, anywhere. You will roleplay as funny and adorable sausages and fight in high-octane, imagination-filled battles.

      -

      In this article, we will tell you everything you need to know about Sausage Man China version download, including what is Sausage Man, how to download it, what are its features, what are some tips and tricks for playing it, how does it compare with PUBG, and why you should try it. So without further ado, let's get started!

      -

      sausage man china version download


      DOWNLOAD ☆☆☆☆☆ https://ssurll.com/2uNRK9



      -

      What

      What is Sausage Man?

      -

      Sausage Man is a cartoon-styled, competitive shooting, battle royale game featuring sausages as protagonists. It is a game that you can get started with effortlessly and play anytime, anywhere. You will roleplay as funny and adorable sausages and fight in high-octane, imagination-filled battles.

      -

      Sausage Man is a game that offers exhilarating battles, item buffs with unique powers, fresh gameplay, and adorably crude appearances. You can use a variety of weapons and accessories to customize your character and make it stand out from the crowd. You can also ride vehicles, dragons, and UFOs to traverse the map and attack your enemies. You can team up with your friends or go solo in different modes and activities. You can also enjoy voice chat and hilarious interactions with other players.

      -

      How to download Sausage Man China version?

      -

      The official version of Sausage Man is only available on Android and iOS systems. However, if you want to play Sausage Man on your PC, you can use an Android emulator like BlueStacks or NoxPlayer. These emulators enable you to run Android apps and games on your PC for free. You can download them from their official websites and follow the instructions to install them on your computer.

      -

      Once you have installed an Android emulator on your PC, you can download the APK file of Sausage Man from various sources like APKCombo, Softonic, or APKPure. These sources provide safe and reliable APK files for various apps and games. You can download the APK file of Sausage Man from any of these sources and save it on your computer.

      -

      After downloading the APK file of Sausage Man, you can open it with your Android emulator and install it on your PC. Then, you can launch the game from your emulator and enjoy playing Sausage Man on a bigger screen with better graphics and controls.

      -

      What are the features of Sausage Man game?

      -

      Sausage Man is a game that has many features that make it fun and unique. Here are some of the features that you can expect from Sausage Man game:

      -
        -
      • A fast-paced and detailed combat system, including realistic bullet physics. You can shoot, dodge, hide, aim, and reload in real time. You can also use different weapons like pistols, rifles, shotguns, grenades, and more to suit your playstyle.
      • -
      • A variety of weapons and accessories to collect and customize your character. You can find weapons and accessories in different locations on the map or loot them from other players. You can also upgrade your weapons and accessories to improve their performance. You can also change the appearance of your character by wearing different costumes, hats, glasses, masks, backpacks, etc.
      • -
      • A helpful tutorial level to guide you through the game. If you are new to Sausage Man or battle royale games in general, you can play the tutorial level to learn the basics of the game. The tutorial level will teach you how to move, shoot, loot, use items, ride vehicles, etc.
      • -
      • Multiple modes and activities to enjoy and battle in. You can choose from different modes like classic mode, team mode, duo mode, solo mode, etc. You can also participate in various activities like daily missions, events, challenges, etc. to earn rewards and rank up.
      • -
      • Superweapons and strategic planning to win. You can find superweapons like rocket launchers, flamethrowers, miniguns, etc. on the map that can give you an advantage over your enemies. You can also use strategic planning to ambush your enemies or escape from danger.
      • -
      • Ride the dragon and UFO with skills and attacks. You can ride the dragon or UFO on the map to fly over obstacles and enemies. You can also use skills and attacks while riding them to deal damage or support your teammates.
      • -
      -

      What are some tips and tricks for playing Sausage Man?

      -

      If you want to improve your skills and win more matches in Sausage Man, here are some tips and tricks that you can follow:

      -

      sausage man apk download for android
      -sausage man game download for pc
      -sausage man chinese version apk
      -sausage man battle royale game download
      -sausage man free download ios
      -sausage man apk latest version
      -sausage man download link
      -sausage man game online play
      -sausage man mod apk unlimited money
      -sausage man game review
      -sausage man apk obb download
      -sausage man emulator for pc
      -sausage man chinese version ios
      -sausage man game size
      -sausage man hack apk download
      -sausage man game tips and tricks
      -sausage man apk pure
      -sausage man system requirements for pc
      -sausage man english version release date
      -sausage man game official website
      -sausage man apk uptodown
      -sausage man best settings for pc
      -sausage man chinese version name
      -sausage man game wiki
      -sausage man redeem code 2023
      -sausage man apk no vpn
      -sausage man gameplay video
      -sausage man vs free fire
      -sausage man chinese version update
      -sausage man game discord server
      -sausage man apk mirror
      -sausage man minimum requirements for android
      -sausage man chinese version pc
      -sausage man game characters
      -sausage man cheat codes 2023
      -sausage man apk rexdl
      -sausage man graphics settings for android
      -sausage man chinese version english patch
      -sausage man game memes
      -sausage man skins and costumes
      -sausage man apk apkpure
      -sausage man keyboard controls for pc
      -sausage man chinese version discord link
      -sausage man game rating and reviews
      -sausage man unlimited diamonds apk download
      -sausage man apk old version
      -sausage man sensitivity settings for android
      -sausage man chinese version gameplay

      -
        -
      • Make adjustments to the control configurations according to your preference. You can change the sensitivity, layout, size, opacity, etc. of the controls in the settings menu. You can also enable or disable features like auto-aiming, auto-shooting, auto-looting, etc.
      • -
      • Be efficient in picking stuff up in the game. You can tap on items on the ground or drag them into your inventory to save time and space. You can also use the quick pick up button to automatically pick up the items that you need.
      • -
      • Watch out for the bots and be alert while playing. You can encounter bots in the game that look like real players but have low intelligence and skills. You can easily kill them and loot their items, but be careful not to let them distract you from other enemies or the shrinking circle.
      • -
      • Seek higher ground and use cover wisely. You can gain an advantage over your enemies by finding higher ground and sniping them from a distance. You can also use cover like buildings, trees, rocks, etc. to hide from your enemies or ambush them.
      • -
      • Ride vehicles to the safe zone and avoid the shrinking circle. You can find vehicles like cars, motorcycles, boats, etc. on the map that can help you move faster and escape from danger. You can also use vehicles as weapons by running over your enemies or exploding them. However, be aware that vehicles make noise and attract attention, so use them wisely.
      • -
      • Use magic items and throwable items to gain an edge over your enemies. You can find magic items like invisibility cloaks, teleportation devices, shields, etc. on the map that can give you special abilities or buffs. You can also use throwable items like grenades, molotovs, smoke bombs, etc. to damage or confuse your enemies.
      • -
      -

      How does Sausage Man compare with PUBG?

      -

      Sausage Man and PUBG are both popular battle royale games that have many similarities and differences. Here are some of the main comparisons between them:

      -
        -
      • Sausage Man has minimalistic graphics and sausage-like characters, while PUBG has realistic graphics and human characters. Sausage Man has a more cartoonish and humorous style, while PUBG has a more serious and gritty style.
      • -
      • Sausage Man has more quirky and humorous elements, while PUBG has more dark and violent elements. Sausage Man has features like riding dragons and UFOs, using magic items, wearing funny costumes, etc. while PUBG has features like blood effects, gore effects, realistic weapons, etc.
      • -
      • Sausage Man has more unique vehicles, weapons, items, and costumes, while PUBG has more conventional ones. Sausage Man has vehicles like flying saucers, hot air balloons, etc., weapons like rocket launchers, flamethrowers, etc., items like invisibility cloaks, teleportation devices, etc., and costumes like animal suits, superhero outfits, etc., while PUBG has vehicles like jeeps, trucks, etc., weapons like AK-47s, M16s, etc., items like medkits, bandages, etc., and costumes like military uniforms, helmets, etc.
      • -
      -

      Conclusion: Why you should try Sausage Man China version download

      -

      Sausage Man is a fun and wacky battle royale game that offers a different experience from other games in the genre. It is a game that is easy to play and suitable for all ages, as it does not have blood or gore effects. It is also a game that is free to play and has a lot of customization options for your character.

      -

      If you are looking for a new and exciting battle royale game to play on your mobile device or PC, then you should try Sausage Man China version download. You will enjoy the exhilarating battles, the item buffs with unique powers, the fresh gameplay, and the adorably crude appearances of Sausage Man. You will also have fun with the voice chat and hilarious interactions with other players.

      -

      So what are you waiting for? Download Sausage Man China version today and join the sausage party!

      -

      FAQs

      -

      Here are some frequently asked questions about Sausage Man China version download:

      -
        -
      1. Is Sausage Man China version safe to download?
      2. -

        Yes, Sausage Man China version is safe to download as long as you download it from a reliable source like APKCombo, Softonic, or APKPure. These sources provide safe and verified APK files for various apps and games.

        -
      3. Is Sausage Man China version available in English?
      4. -

        Yes, Yes, Sausage Man China version is available in English. You can change the language settings in the game to English or any other language that you prefer.

        -
      5. How can I play Sausage Man with my friends?
      6. -

        You can play Sausage Man with your friends by inviting them to join your team or by joining their team. You can also use the voice chat feature to communicate with your teammates and coordinate your strategies.

        -
      7. What are the system requirements for Sausage Man?
      8. -

        The system requirements for Sausage Man are as follows:

        -
          -
        • For Android devices: Android 5.0 or higher, 2 GB of RAM or more, and 1.5 GB of free storage space or more
        • -
        • For iOS devices: iOS 9.0 or higher, iPhone 6s or newer, iPad Air 2 or newer, and 1.5 GB of free storage space or more
        • -
        • For PC: Windows 7 or higher, Intel Core i3 or higher, 4 GB of RAM or more, and 2 GB of free storage space or more
        • -
        -
      9. Where can I find more information about Sausage Man?
      10. -

        You can find more information about Sausage Man by visiting its official website, Facebook page, YouTube channel, or Discord server. You can also read reviews and guides from other players and websites.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Joystick Games for PC - Download the Latest and Greatest Games.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Joystick Games for PC - Download the Latest and Greatest Games.md deleted file mode 100644 index 0ff44bb929686758831caf7ce4fa1ecd3828625f..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Joystick Games for PC - Download the Latest and Greatest Games.md +++ /dev/null @@ -1,111 +0,0 @@ -
      -

      How to Download Free Games for PC with Joystick

      -

      If you are looking for a new way to enjoy gaming on your PC, you might want to try using a joystick. A joystick is a device that allows you to control your character or vehicle in a game by moving a stick in different directions. Joysticks can enhance your gaming experience by giving you more precision, immersion, and fun. In this article, we will show you how to download free games for PC with joystick support, how to choose a good joystick for PC gaming, how to connect it to your PC, and how to play free games for PC with joystick.

      -

      download free games for pc with joystick


      Download File 🗹 https://ssurll.com/2uNZVr



      -

      What is a Joystick and Why Use It for PC Gaming?

      -

      A joystick is a type of game controller that consists of a stick that can be moved in various directions and one or more buttons that can be pressed. Joysticks are often used for flight simulators, racing games, space shooters, platformers, and other genres that require precise movement and aiming. Joysticks can also be used for other types of games that support gamepad input.

      -

      Using a joystick for PC gaming can have several benefits, such as:

      -
        -
      • More accuracy and responsiveness: A joystick can give you more control over your character or vehicle than a keyboard or mouse. You can move the stick in any direction you want, adjust the speed and intensity of your movement, and perform complex maneuvers with ease.
      • -
      • More immersion and realism: A joystick can make you feel like you are actually piloting a plane, driving a car, or shooting a gun. You can feel the feedback from the stick as you move it, hear the sound effects from the buttons, and see the realistic graphics from the games.
      • -
      • More fun and variety: A joystick can make your gaming experience more enjoyable and exciting. You can try different types of games that you might not have played before, experiment with different settings and modes, and challenge yourself with different levels of difficulty.
      • -
      -

      How to Choose a Good Joystick for PC Gaming?

      -

      Before you download free games for PC with joystick support, you need to have a good joystick that works well with your PC. There are many types and models of joysticks available on the market, so how do you choose the best one for you? Here are some features to look for in a joystick:

      -

      download free pc games compatible with joystick
      -download free joystick games for windows 10
      -download free full version pc games with joystick support
      -download free pc games that use joystick
      -download free joystick games for laptop
      -download free pc games for usb joystick
      -download free joystick games for windows 7
      -download free pc games with controller support
      -download free joystick games for pc offline
      -download free pc games that work with joystick
      -download free joystick games for windows 8
      -download free pc games with gamepad support
      -download free joystick games for computer
      -download free pc games for wireless joystick
      -download free joystick games for windows xp
      -download free pc games with xbox controller support
      -download free joystick games for pc online
      -download free pc games for bluetooth joystick
      -download free joystick games for windows vista
      -download free pc games with ps4 controller support
      -download free joystick games for pc full version
      -download free pc games for android joystick
      -download free joystick games for windows 11
      -download free pc games with steam controller support
      -download free joystick games for mac
      -download free pc games for vr joystick
      -download free joystick racing games for pc
      -download free pc games with switch controller support
      -download free joystick action games for pc
      -download free pc games for dualshock 4 controller
      -download free joystick adventure games for pc
      -download free pc games with keyboard and mouse support
      -download free joystick shooting games for pc
      -download free pc games for logitech controller
      -download free joystick fighting games for pc
      -download free pc games with touch screen support
      -download free joystick simulation games for pc
      -download free pc games for thrustmaster controller
      -download free joystick sports games for pc
      -download free pc games with motion controller support
      -download free joystick arcade games for pc
      -download best free pc games with joystick support
      -download free joystick strategy games for pc
      -how to download free pc games with joystick support
      -download free joystick horror games for pc
      -where to download free pc games with joystick support
      -download free joystick puzzle games for pc
      -top 10 free pc games with joystick support to download

      -
        -
      • Compatibility: Make sure that the joystick is compatible with your PC and the games that you want to play. Check the system requirements and the game specifications before buying a joystick. Some joysticks may require additional software or drivers to work properly.
      • -
      • Quality: Choose a joystick that is durable, comfortable, and responsive. Look for a joystick that has a sturdy base, a smooth stick, and buttons that are easy to press and reach. Avoid joysticks that are cheap, flimsy, or have loose parts.
      • -
      • Functionality: Choose a joystick that has the features and functions that you need for your gaming style. Look for a joystick that has enough buttons, triggers, switches, and axes for your preferred games. Some joysticks may also have extra features such as vibration feedback, LED lights, or programmable buttons.
      • -
      • Price: Choose a joystick that fits your budget and offers good value for money. Compare different joysticks and their prices online or in stores. Read reviews and ratings from other users and experts to get an idea of the quality and performance of the joysticks.
      • -
      -

      How to Connect a Joystick to Your PC?

      -

      Once you have chosen a good joystick for PC gaming, you need to connect it to your PC and make sure that it works properly. Here are the steps to connect a joystick to your PC:

      -
        -
      1. Plug the joystick into an available USB port on your PC. If your joystick has a different type of connector, you may need an adapter or a converter to connect it to your PC.
      2. -
      3. Wait for your PC to recognize the joystick and install the necessary drivers. You may also need to download and install additional software or drivers from the manufacturer's website if your joystick requires them.
      4. -
      5. Open the Control Panel on your PC and go to Devices and Printers. You should see your joystick listed under the Devices section. Right-click on your joystick and select Game Controller Settings.
      6. -
      7. In the Game Controller Settings window, select your joystick and click on Properties. You should see a test page where you can check the status and functionality of your joystick. You can move the stick, press the buttons, and see if they respond correctly on the screen.
      8. -
      9. If everything works fine, click on OK and close the window. If you encounter any problems or errors, try troubleshooting them by following the instructions from the manufacturer or online sources.
      10. -
      -

      How to Find and Download Free Games for PC with Joystick Support?

      -

      Now that you have connected your joystick to your PC, you are ready to find and download free games for PC with joystick support. There are many sources of free games for PC with joystick support online, but not all of them are safe or reliable. You need to be careful when downloading free games from unknown or untrusted websites, as they may contain viruses, malware, or other harmful content. Here are some of the best sources of free games for PC with joystick support that we recommend:

      -

      itch.io

      -

      itch.io is a website that hosts indie games created by developers from all over the world. You can find thousands of free games for PC with joystick support on itch.io, ranging from casual to hardcore, from retro to modern, from 2D to 3D, and from any genre imaginable. You can browse by categories, tags, ratings, popularity, or recommendations. You can also search by keywords or filters such as platform, input device, genre, theme, or features. To download free games from itch.io, you just need to create a free account and click on the download button on the game page. Some games may require you to unzip or install them before playing.

      -

      Softonic

      -

      Softonic is a website that offers free software downloads for various platforms and devices. You can find hundreds of free games for PC with joystick support on Softonic, including classics such as Pac-Man, Tetris, Asteroids, Space Invaders, and Super Mario Bros, as well as newer titles such as Fortnite, Among Us, Minecraft, and GTA V. You can browse by categories, ratings, downloads, or editor's picks. You can also search by keywords or filters such as platform, genre, license, or language. To download free games from Softonic, you just need to click on the download button on the game page and follow the instructions. Some games may require you to run a setup file or a launcher before playing.

      -

      RetroArch

      -

      RetroArch is a software that allows you to play retro games on your PC using various emulators. Emulators are programs that mimic the hardware and software of old consoles or arcade machines, such as Nintendo, Sega, Atari, or Neo Geo. You can find thousands of free games for PC with joystick support on RetroArch, including classics such as Sonic the Hedgehog, Street Fighter II, The Legend of Zelda, Metal Slug, and Final Fantasy. You can browse by platforms, genres, regions, or playlists. You can also search by keywords or filters such as title, developer, publisher, or year. To download free games from RetroArch, you need to download and install the RetroArch software on your PC first. Then you need to download and install the emulators (called cores) that you want to use from the online updater. Finally, you need to download the games (called ROMs) that you want to play from various websites or sources. Some games may require you to unzip or extract them before playing.

      -

      How to Play Free Games for PC with Joystick?

      -

      After you have downloaded and installed your free games for PC with joystick support, you are ready to play them and have fun. Here are some tips and tricks to play free games for PC with joystick:

      -

      Configure your joystick settings

      -

      Before you start playing a game, you should configure your joystick settings to make sure that it works well with the game. You can do this by going to the game's options or settings menu and looking for the input or controller section. There you can adjust the sensitivity, deadzone, inversion, vibration, and mapping of your joystick. You can also calibrate your joystick if needed. You can also use third-party software such as JoyToKey or Xpadder to customize your joystick settings for any game.

      -

      Explore different genres and styles of games

      -

      One of the advantages of using a joystick for PC gaming is that you can play different types of games that you might not have tried before. You can explore different genres and styles of games that suit your preferences and skills. For example, you can try flight simulators if you like realistic and immersive games, racing games if you like fast-paced and competitive games, space shooters if you like action-packed and sci-fi games, platformers if you like colorful and challenging games, and so on. You can also try different modes and levels of difficulty within each game to test your abilities and improve your skills.

      -

      Have fun and challenge yourself

      -

      The most important thing when playing free games for PC with joystick is to have fun and enjoy yourself. You can play solo or with friends online or offline. You can also join online communities and forums where you can share your experiences and tips with other gamers who use joysticks. You can also challenge yourself by setting goals and achievements for yourself or competing with other players on leaderboards and rankings.

      -

      Conclusion

      -

      In conclusion, using a joystick for PC gaming can be a great way to enhance your gaming experience by giving you more control, immersion, and fun. You can download free games for PC with joystick support from various sources online such as itch.io, Softonic, or RetroArch. You can also choose a good joystick for PC gaming by looking for compatibility, quality, functionality, and price. You can also connect your joystick to your PC by plugging it into a USB port and configuring your joystick settings. You can also play free games for PC with joystick by exploring different genres and styles of games, having fun and challenging yourself. We hope that this article has helped you learn how to download free games for PC with joystick and enjoy them to the fullest. Happy gaming!

      -

      FAQs

      -

      Here are some frequently asked questions about downloading free games for PC with joystick:

      -
        -
      1. Q: What are some of the best joysticks for PC gaming?
      2. -
      3. A: Some of the best joysticks for PC gaming are the Logitech Extreme 3D Pro, the Thrustmaster T-Flight Hotas X, the Saitek X52 Pro, and the Hori Real Arcade Pro V Kai.
      4. -
      5. Q: What are some of the best free games for PC with joystick support?
      6. -
      7. A: Some of the best free games for PC with joystick support are War Thunder, FlightGear, Star Wars: The Old Republic, TrackMania Nations Forever, and SuperTuxKart.
      8. -
      9. Q: How can I play old console games on my PC with a joystick?
      10. -
      11. A: You can play old console games on your PC with a joystick by using emulators such as RetroArch, MAME, or Dolphin. You just need to download and install the emulators and the ROMs of the games that you want to play.
      12. -
      13. Q: How can I fix common problems or errors with my joystick?
      14. -
      15. A: You can fix common problems or errors with your joystick by updating your drivers, reinstalling your software, checking your USB ports, calibrating your joystick, or contacting customer support.
      16. -
      17. Q: How can I improve my skills and performance when playing free games for PC with joystick?
      18. -
      19. A: You can improve your skills and performance when playing free games for PC with joystick by practicing regularly, learning from other players, watching tutorials and guides, and adjusting your settings and preferences.
      20. -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/siya02/Konakni-TTS/ttsv/src/glow_tts/attentions.py b/spaces/siya02/Konakni-TTS/ttsv/src/glow_tts/attentions.py deleted file mode 100644 index 62b8c83acbd3150b6d6686f21f3627781107c1ba..0000000000000000000000000000000000000000 --- a/spaces/siya02/Konakni-TTS/ttsv/src/glow_tts/attentions.py +++ /dev/null @@ -1,378 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=None, - block_length=None, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - window_size=window_size, - p_dropout=p_dropout, - block_length=block_length, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - for i in range(self.n_layers): - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class CouplingBlock(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - sigmoid_scale=False, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - self.sigmoid_scale = sigmoid_scale - - start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) - start = torch.nn.utils.weight_norm(start) - self.start = start - # Initializing last layer to 0 makes the affine coupling layers - # do nothing at first. It helps to stabilze training. - end = torch.nn.Conv1d(hidden_channels, in_channels, 1) - end.weight.data.zero_() - end.bias.data.zero_() - self.end = end - - self.wn = modules.WN( - in_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels, - p_dropout, - ) - - def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): - b, c, t = x.size() - if x_mask is None: - x_mask = 1 - x_0, x_1 = x[:, : self.in_channels // 2], x[:, self.in_channels // 2 :] - - x = self.start(x_0) * x_mask - x = self.wn(x, x_mask, g) - out = self.end(x) - - z_0 = x_0 - m = out[:, : self.in_channels // 2, :] - logs = out[:, self.in_channels // 2 :, :] - if self.sigmoid_scale: - logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) - - if reverse: - z_1 = (x_1 - m) * torch.exp(-logs) * x_mask - logdet = None - else: - z_1 = (m + torch.exp(logs) * x_1) * x_mask - logdet = torch.sum(logs * x_mask, [1, 2]) - - z = torch.cat([z_0, z_1], 1) - return z, logdet - - def store_inverse(self): - self.wn.remove_weight_norm() - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - window_size=None, - heads_share=True, - p_dropout=0.0, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.p_dropout = p_dropout - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels ** -0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - if proximal_init: - self.conv_k.weight.data.copy_(self.conv_q.weight.data) - self.conv_k.bias.data.copy_(self.conv_q.bias.data) - nn.init.xavier_uniform_(self.conv_v.weight) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) - rel_logits = self._relative_position_to_absolute_position(rel_logits) - scores_local = rel_logits / math.sqrt(self.k_channels) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores * block_mask + -1e4 * (1 - block_mask) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - - self.conv_1 = nn.Conv1d( - in_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.conv_2 = nn.Conv1d( - filter_channels, out_channels, kernel_size, padding=kernel_size // 2 - ) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(x * x_mask) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - return x * x_mask diff --git a/spaces/siya02/Konakni-TTS/ttsv/utils/inference/api.py b/spaces/siya02/Konakni-TTS/ttsv/utils/inference/api.py deleted file mode 100644 index d6bcabd194a4531801941d5e1d248dc134ce255f..0000000000000000000000000000000000000000 --- a/spaces/siya02/Konakni-TTS/ttsv/utils/inference/api.py +++ /dev/null @@ -1,66 +0,0 @@ -from starlette.responses import StreamingResponse -from tts import MelToWav, TextToMel -from advanced_tts import load_all_models, run_tts_paragraph -from typing import Optional -from pydantic import BaseModel -from fastapi import FastAPI, HTTPException -import uvicorn -import base64 -import argparse -import json -import time -from argparse import Namespace - -app = FastAPI() - - -class TextJson(BaseModel): - text: str - lang: Optional[str] = "hi" - noise_scale: Optional[float]=0.667 - length_scale: Optional[float]=1.0 - transliteration: Optional[int]=1 - number_conversion: Optional[int]=1 - split_sentences: Optional[int]=1 - - - - -@app.post("/TTS/") -async def tts(input: TextJson): - text = input.text - lang = input.lang - - args = Namespace(**input.dict()) - - args.wav = '../../results/api/'+str(int(time.time())) + '.wav' - - if text: - sr, audio = run_tts_paragraph(args) - else: - raise HTTPException(status_code=400, detail={"error": "No text"}) - - ## to return outpur as a file - audio = open(args.wav, mode='rb') - return StreamingResponse(audio, media_type="audio/wav") - - # with open(args.wav, "rb") as audio_file: - # encoded_bytes = base64.b64encode(audio_file.read()) - # encoded_string = encoded_bytes.decode() - # return {"encoding": "base64", "data": encoded_string, "sr": sr} - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-a", "--acoustic", required=True, type=str) - parser.add_argument("-v", "--vocoder", required=True, type=str) - parser.add_argument("-d", "--device", type=str, default="cpu") - parser.add_argument("-L", "--lang", type=str, required=True) - - args = parser.parse_args() - - load_all_models(args) - - uvicorn.run( - "api:app", host="0.0.0.0", port=6006, log_level="debug" - ) diff --git a/spaces/skf15963/summary/fengshen/models/deltalm/configuration_deltalm.py b/spaces/skf15963/summary/fengshen/models/deltalm/configuration_deltalm.py deleted file mode 100644 index 97abb0b3e2c796b6be41d6bf22ae95a1b4557ef7..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/models/deltalm/configuration_deltalm.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" deltalm model configuration""" - -import warnings -from transformers.configuration_utils import PretrainedConfig -from transformers.utils import logging -logger = logging.get_logger(__name__) - -BART_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "IDEA/Deltalm": "https://huggingface.co/Deltalm-362M-Zh-En/resolve/main/config.json", -} - - -class DeltalmConfig(PretrainedConfig): - - model_type = "Deltalm" - keys_to_ignore_at_inference = ["past_key_values"] - attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} - - def __init__( - self, - vocab_size=250001, - max_position_embeddings=1024, - encoder_layers=12, - encoder_ffn_dim=3072, - encoder_attention_heads=12, - decoder_layers=6, - decoder_ffn_dim=3072, - decoder_attention_heads=12, - encoder_layerdrop=0.0, - decoder_layerdrop=0.0, - activation_function="gelu", - d_model=1024, - dropout=0.1, - attention_dropout=0.0, - activation_dropout=0.0, - init_std=0.02, - classifier_dropout=0.0, - scale_embedding=False, - use_cache=True, - num_labels=3, - pad_token_id=1, - bos_token_id=0, - eos_token_id=2, - is_encoder_decoder=True, - decoder_start_token_id=0, - forced_eos_token_id=2, - label_smoothing=0.1, - length_penalty=1.0, - encoder_normalize_before=False, - **kwargs - ): - self.vocab_size = vocab_size - self.max_position_embeddings = max_position_embeddings - self.d_model = d_model - self.encoder_ffn_dim = encoder_ffn_dim - self.encoder_layers = encoder_layers - self.encoder_attention_heads = encoder_attention_heads - self.decoder_ffn_dim = decoder_ffn_dim - self.decoder_layers = decoder_layers - self.decoder_attention_heads = decoder_attention_heads - self.dropout = dropout - self.attention_dropout = attention_dropout - self.activation_dropout = activation_dropout - self.activation_function = activation_function - self.init_std = init_std - self.encoder_layerdrop = encoder_layerdrop - self.decoder_layerdrop = decoder_layerdrop - self.classifier_dropout = classifier_dropout - self.use_cache = use_cache - self.num_hidden_layers = encoder_layers - self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True - self.label_smoothing = label_smoothing - self.encoder_normalize_before = encoder_normalize_before - - super().__init__( - num_labels=num_labels, - pad_token_id=pad_token_id, - bos_token_id=bos_token_id, - eos_token_id=eos_token_id, - is_encoder_decoder=is_encoder_decoder, - decoder_start_token_id=decoder_start_token_id, - forced_eos_token_id=forced_eos_token_id, - length_penalty=length_penalty, - **kwargs, - ) - - # ensure backward compatibility for BART CNN models - if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False): - self.forced_bos_token_id = self.bos_token_id - warnings.warn( - f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " - "The config can simply be saved and uploaded again to be fixed." - ) - - @property - def num_attention_heads(self) -> int: - return self.encoder_attention_heads - - @property - def hidden_size(self) -> int: - return self.d_model diff --git a/spaces/sklearn-docs/bayesian-ridge-regression/README.md b/spaces/sklearn-docs/bayesian-ridge-regression/README.md deleted file mode 100644 index 643e1a5d09bbf3b2135d9539a24b26918f070659..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/bayesian-ridge-regression/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bayesian Ridge Regression -emoji: ⚡ -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/face_detection/detection/sfd/__init__.py b/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/face_detection/detection/sfd/__init__.py deleted file mode 100644 index 5a63ecd45658f22e66c171ada751fb33764d4559..0000000000000000000000000000000000000000 --- a/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/face_detection/detection/sfd/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sfd_detector import SFDDetector as FaceDetector \ No newline at end of file diff --git a/spaces/sophiaaez/BLIPvOFAde/README.md b/spaces/sophiaaez/BLIPvOFAde/README.md deleted file mode 100644 index 78eb3f08440455d08e3aa5182fdd21207ec5da29..0000000000000000000000000000000000000000 --- a/spaces/sophiaaez/BLIPvOFAde/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: BLIPvOFAde -emoji: 🥊 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 2.8.14 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py deleted file mode 100644 index 3465731eb3e55047c44d1b336a97e99cb3a89a53..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py +++ /dev/null @@ -1,899 +0,0 @@ -from typing import NamedTuple, List -from urllib.parse import urlparse -import os, sys -import subprocess -from subprocess import check_call, check_output -import glob -import wget -import re -import multiprocessing as mp -from functools import partial -import pathlib -from collections import OrderedDict - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - -# scripts and data locations -CWD = os.getcwd() -UTILS = f"{CWD}/utils" - -MOSES = f"{UTILS}/mosesdecoder" -SGM_TOOL = f'{MOSES}/scripts/ems/support/input-from-sgm.perl' - -TMX2CORPUS = f"{UTILS}/tmx2corpus" -TMX_TOOL = f'python {TMX2CORPUS}/tmx2corpus.py' - -to_data_path = f'{WORKDIR_ROOT}/wmt' -download_to = f'{to_data_path}/downloads' -manually_downloads = f'{to_data_path}/downloads' -extract_to = f'{to_data_path}/extracted' -#DESTDIR=${WORKDIR_ROOT}/ML50/raw/ -raw_data = f'{WORKDIR_ROOT}/ML50/raw' -#### - -class DLDataset(NamedTuple): - name: str - train_urls: List[str] - valid_urls: List[str] - test_urls: List[str] - train_files_patterns: List[str] = [] - valid_files_patterns: List[str] = [] - test_files_patterns: List[str] = [] - - - -def bar_custom(current, total, width=80): - print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r') - -def get_downloaded_file(dl_folder, url): - if isinstance(url, tuple): - url, f = url - else: - url_f = urlparse(url) - # f = os.path.split(url_f.path)[-1] - f = '_'.join(url_f.path.split('/')[1:]) - return url, f"{dl_folder}/{f}" - -def download_parts_and_combine(dl_folder, urls, filename): - parts = [] - for url_record in urls: - url, part_file = get_downloaded_file(dl_folder, url_record) - if os.path.exists(part_file): - print(f'{part_file} has already been downloaded so skip') - else: - part_file = wget.download(url, part_file, bar=bar_custom) - parts.append(part_file) - - def get_combine_cmd(parts): - #default as tar.gz.?? - return f'cat {" ".join(parts)} > {filename}' - - combine_cmd = get_combine_cmd(parts) - call(combine_cmd, debug=True) - return filename - -def download_a_url(dl_folder, url): - url, filename = get_downloaded_file(dl_folder, url) - if os.path.exists(filename): - print(f'{filename} has already been downloaded so skip') - return filename - - print(f'downloading {url} to {filename}') - if isinstance(url, list) or isinstance(url, tuple): - download_parts_and_combine(dl_folder, url, filename) - else: - wget.download(url, filename, bar=bar_custom) - print(f'dowloaded: {filename}') - return filename - -def download_files(dl_folder, urls, completed_urls={}): - for url_record in urls: - url, _ = get_downloaded_file(dl_folder, url_record) - filename = download_a_url(dl_folder, url_record) - completed_urls[str(url)] = filename - return completed_urls - -def check_need_manual_downalod(dl_folder, to_manually_download_urls): - to_be_manually_dowloaded = [] - manually_completed_urls = {} - for url_record, instruction in to_manually_download_urls: - url, filename = get_downloaded_file(dl_folder, url_record) - if not os.path.exists(filename): - print(f'{url} need to be download manually, please download it manually following {instruction}; and copy it to {filename}') - to_be_manually_dowloaded.append((url, filename)) - else: - manually_completed_urls[url] = filename - # if len(to_be_manually_dowloaded) > 0: - # raise ValueError('Missing files that need to be downloaded manually; stop the process now.') - return to_be_manually_dowloaded - -def download_dataset(to_folder, dl_dataset, completed_urls={}): - download_files(to_folder, dl_dataset.train_urls, completed_urls) - download_files(to_folder, dl_dataset.valid_urls, completed_urls) - download_files(to_folder, dl_dataset.test_urls, completed_urls) - print('completed downloading') - return completed_urls - -def call(cmd, debug=False): - if debug: - print(cmd) - check_call(cmd, shell=True) - - -def get_extract_name(file_path): - path = os.path.split(file_path) - return path[-1] + '_extract' #.split('.')[0] - -def extract_file(downloaded_file, extract_folder, get_extract_name=get_extract_name, debug=False): - extract_name = get_extract_name(downloaded_file) - extract_to = f'{extract_folder}/{extract_name}' - os.makedirs(extract_to, exist_ok=True) - if os.path.exists(f'{extract_to}/DONE'): - print(f'{downloaded_file} has already been extracted to {extract_to} so skip') - return extract_to - def get_extract_cmd(filename): - if filename.endswith('.tgz') or filename.endswith('tar.gz'): - return f'tar xzfv {filename} -C {extract_to}' - elif filename.endswith('.gz.tar'): - return f'tar xfv {filename} -C {extract_to}; (cd {extract_to}; gzip -d *.gz; [ $? -eq 0 ] || gzip -d */*.gz)' - elif filename.endswith('.tar'): - return f'tar xfv {filename} -C {extract_to}' - elif filename.endswith('.gz'): - return f'cp {filename} {extract_to}; (cd {extract_to}; gzip -d *.gz)' - elif filename.endswith('.zip'): - return f'unzip {filename} -d {extract_to}' - extract_cmd = get_extract_cmd(downloaded_file) - print(f'extracting {downloaded_file}') - if isinstance(extract_cmd, list): - for c in extract_cmd: - call(c, debug=debug) - else: - call(extract_cmd, debug=debug) - call(f'echo DONE > {extract_to}/DONE') - return extract_to - - -def extract_all_files( - completed_urls, extract_folder, - get_extract_name=get_extract_name, - completed_extraction={}, - debug=False): - extracted_folders = OrderedDict() - for url, downloaded_file in set(completed_urls.items()): - if downloaded_file in completed_extraction: - print(f'{downloaded_file} is already extracted; so skip') - continue - folder = extract_file(downloaded_file, extract_folder, get_extract_name, debug) - extracted_folders[url] = folder - return extracted_folders - - -def my_glob(folder): - for p in [f'{folder}/*', f'{folder}/*/*', f'{folder}/*/*/*']: - for f in glob.glob(p): - yield f - - -def sgm2raw(sgm, debug): - to_file = sgm[0:len(sgm) - len('.sgm')] - if os.path.exists(to_file): - debug and print(f'{sgm} already converted to {to_file}; so skip') - return to_file - cmd = f'{SGM_TOOL} < {sgm} > {to_file}' - call(cmd, debug) - return to_file - -def tmx2raw(tmx, debug): - to_file = tmx[0:len(tmx) - len('.tmx')] - to_folder = os.path.join(*os.path.split(tmx)[:-1]) - if os.path.exists(f'{to_folder}/bitext.en'): - debug and print(f'{tmx} already extracted to {to_file}; so skip') - return to_file - cmd = f'(cd {to_folder}; {TMX_TOOL} {tmx})' - call(cmd, debug) - return to_file - -CZENG16_REGEX = re.compile(r'.*?data.plaintext-format/0[0-9]train$') -WMT19_WIKITITLES_REGEX = re.compile(r'.*?wikititles-v1.(\w\w)-en.tsv.gz') -TSV_REGEX = re.compile(r'.*?(\w\w)-(\w\w).tsv$') - - - -def cut_wikitles(wiki_file, debug): - # different languages have different file names: - if wiki_file.endswith('wiki/fi-en/titles.fi-en'): - to_file1 = f'{wiki_file}.fi' - to_file2 = f'{wiki_file}.en' - BACKSLASH = '\\' - cmd1 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f1 |awk '{{$1=$1}};1' > {to_file1}" - cmd2 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f2 |awk '{{$1=$1}};1' > {to_file2}" -# elif WMT19_WIKITITLES_REGEX.match(wiki_file): -# src = WMT19_WIKITITLES_REGEX.match(wiki_file).groups()[0] -# to_file1 = f'{wiki_file}.{src}' -# to_file2 = f'{wiki_file}.en' -# cmd1 = f"cat {wiki_file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}" -# cmd2 = f"cat {wiki_file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}" - else: - return None - if os.path.exists(to_file1) and os.path.exists(to_file2): - debug and print(f'{wiki_file} already processed to {to_file1} and {to_file2}; so skip') - return wiki_file - - call(cmd1, debug=debug) - call(cmd2, debug=debug) - return wiki_file - -def cut_tsv(file, debug): - m = TSV_REGEX.match(file) - if m is None: - raise ValueError(f'{file} is not matching tsv pattern') - src = m.groups()[0] - tgt = m.groups()[1] - - to_file1 = f'{file}.{src}' - to_file2 = f'{file}.{tgt}' - cmd1 = f"cat {file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}" - cmd2 = f"cat {file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}" - if os.path.exists(to_file1) and os.path.exists(to_file2): - debug and print(f'{file} already processed to {to_file1} and {to_file2}; so skip') - return file - - call(cmd1, debug=debug) - call(cmd2, debug=debug) - return file - - -def convert_file_if_needed(file, debug): - if file.endswith('.sgm'): - return sgm2raw(file, debug) - elif file.endswith('.tmx'): - return tmx2raw(file, debug) - elif file.endswith('wiki/fi-en/titles.fi-en'): - return cut_wikitles(file, debug) -# elif WMT19_WIKITITLES_REGEX.match(file): -# return cut_wikitles(file, debug) - elif file.endswith('.tsv'): - return cut_tsv(file, debug) - elif CZENG16_REGEX.match(file): - return convert2czeng17(file, debug) - else: - return file - - -def convert_files_if_needed(extracted_foldrs, my_glob=my_glob, debug=False): - return { - url: list(sorted(set(convert_file_if_needed(f, debug)) for f in sorted(set(my_glob(folder))))) - for url, folder in extracted_foldrs.items() - } - -def match_patt(file_path, file_pattern, src, tgt, lang): - return file_pattern.format(src=src, tgt=tgt, lang=lang) in file_path - -def match_patts(file_path, file_patterns, src, tgt, lang): - for file_pattern in file_patterns: - params = { k: v for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] if k in file_pattern} - matching = file_pattern.format(**params) - - if isinstance(file_pattern, tuple): - pattern, directions = file_pattern - if f'{src}-{tgt}' in directions and matching in file_path: - return True - else: - if matching in file_path: - return True - return False - -def extracted_glob(extracted_folder, file_patterns, src, tgt, lang): - def get_matching_pattern(file_pattern): - params = { - k: v - for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] - if '{' + k + '}' in file_pattern - } - file_pattern = re.sub(r'{src:(.*?)}', r'\1' if lang == src else '', file_pattern) - file_pattern = re.sub(r'{tgt:(.*?)}', r'\1' if lang == tgt else '', file_pattern) - file_pattern = file_pattern.format(**params) - return file_pattern - for file_pattern in file_patterns: - if isinstance(file_pattern, tuple): - file_pattern, lang_pairs = file_pattern - if f'{src}-{tgt}' not in lang_pairs: - continue -# print('working on pattern: ', file_pattern, lang_pairs ) - matching_pattern = get_matching_pattern(file_pattern) - if matching_pattern is None: - continue - glob_patterns = f'{extracted_folder}/{matching_pattern}' -# print('glob_patterns: ', glob_patterns) - for f in glob.glob(glob_patterns): - yield f - -# for debug usage -def all_extracted_files(split, src, tgt, extracted_folders, split_urls): - def get_url(url): - if isinstance(url, tuple): - url, downloaded_file = url - return url - return [ - f - for url in split_urls - for f in my_glob(extracted_folders[str(get_url(url))]) - ] - -def concat_files(split, src, tgt, extracted_folders, split_urls, path_patterns, to_folder, debug=False): -# if debug: -# print('extracted files to be filtered by patterns: ', -# '\n\t'.join(sorted(all_extracted_files(split, src, tgt, extracted_folders, split_urls)))) - for lang in [src, tgt]: - to_file = f'{to_folder}/{split}.{src}-{tgt}.{lang}' - s_src, s_tgt, s_lang = src.split('_')[0], tgt.split('_')[0], lang.split('_')[0] - files = [] - for url in split_urls: - if isinstance(url, tuple): - url, downloaded_file = url - if str(url) not in extracted_folders: - print(f'warning: {url} not in extracted files') - for extracted_file in set( - extracted_glob( - extracted_folders[str(url)], path_patterns, - s_src, s_tgt, s_lang)): - files.append(extracted_file) - if len(files) == 0: - print('warning: ', f'No files found for split {to_file}') - continue - files = sorted(set(files)) - print(f'concating {len(files)} files into {to_file}') - cmd = ['cat'] + [f'"{f}"' for f in files] + [f'>{to_file}'] - cmd = " ".join(cmd) - call(cmd, debug=debug) - -UTILS = os.path.join(pathlib.Path(__file__).parent, 'utils') -LID_MODEL = f'{download_to}/lid.176.bin' -LID_MULTI = f'{UTILS}/fasttext_multi_filter.py' - -def lid_filter(split, src, tgt, from_folder, to_folder, debug=False): - if not os.path.exists(LID_MODEL): - call(f'wget -nc https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin -O {LID_MODEL}') - from_prefix = f'{from_folder}/{split}.{src}-{tgt}' - to_prefix = f'{to_folder}/{split}.{src}-{tgt}' - if os.path.exists(f'{from_prefix}.{src}') and os.path.exists(f'{from_prefix}.{tgt}'): - s_src, s_tgt = src.split('_')[0], tgt.split('_')[0] - cmd = ( - f'python {LID_MULTI} --model {LID_MODEL} --inputs {from_prefix}.{src} {from_prefix}.{tgt} ' - f'--langs {s_src} {s_tgt} --outputs {to_prefix}.{src} {to_prefix}.{tgt}' - ) - print(f'filtering {from_prefix}') - call(cmd, debug=debug) - -def concat_into_splits(dl_dataset, src, tgt, extracted_folders, to_folder, debug): - to_folder_tmp = f"{to_folder}_tmp" - os.makedirs(to_folder_tmp, exist_ok=True) - concat_files('train', src, tgt, - extracted_folders, - split_urls=dl_dataset.train_urls, - path_patterns=dl_dataset.train_files_patterns, - to_folder=to_folder_tmp, debug=debug) - lid_filter('train', src, tgt, to_folder_tmp, to_folder, debug) - - concat_files('valid', src, tgt, - extracted_folders, - split_urls=dl_dataset.valid_urls, - path_patterns=dl_dataset.valid_files_patterns, - to_folder=to_folder, debug=debug) - concat_files('test', src, tgt, - extracted_folders, - split_urls=dl_dataset.test_urls, - path_patterns=dl_dataset.test_files_patterns, - to_folder=to_folder, debug=debug) - - -def download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=False): - pool = mp.Pool(processes=num_processes) - download_f = partial(download_a_url, dl_folder) - downloaded_files = pool.imap_unordered(download_f, urls) - pool.close() - pool.join() - -BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ") -def run_eval_bleu(cmd): - output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip() - print(output) - bleu = -1.0 - for line in output.strip().split('\n'): - m = BLEU_REGEX.search(line) - if m is not None: - bleu = m.groups()[0] - bleu = float(bleu) - break - return bleu - -def check_wmt_test_bleu(raw_folder, wmt_lang_pairs): - not_matchings = [] - for wmt, src_tgts in wmt_lang_pairs: - for src_tgt in src_tgts: - print(f'checking test bleus for: {src_tgt} at {wmt}') - src, tgt = src_tgt.split('-') - ssrc, stgt = src[:2], tgt[:2] - if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'): - # reversed direction may have different test set - test_src = f'{raw_folder}/test.{tgt}-{src}.{src}' - else: - test_src = f'{raw_folder}/test.{src}-{tgt}.{src}' - cmd1 = f'cat {test_src} | sacrebleu -t "{wmt}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""' - test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}' - cmd2 = f'cat {test_tgt} | sacrebleu -t "{wmt}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""' - bleu1 = run_eval_bleu(cmd1) - if bleu1 != 100.0: - not_matchings.append(f'{wmt}:{src_tgt} source side not matching: {test_src}') - bleu2 = run_eval_bleu(cmd2) - if bleu2 != 100.0: - not_matchings.append(f'{wmt}:{src_tgt} target side not matching: {test_tgt}') - return not_matchings - -def download_and_extract( - to_folder, lang_pairs, dl_dataset, - to_manually_download_urls, - completed_urls={}, completed_extraction={}, - debug=False): - - dl_folder = f'{to_folder}/downloads' - extract_folder = f'{to_folder}/extracted' - raw_folder = f'{to_folder}/raw' - lid_filtered = f'{to_folder}/lid_filtered' - - os.makedirs(extract_folder, exist_ok=True) - os.makedirs(raw_folder, exist_ok=True) - os.makedirs(lid_filtered, exist_ok=True) - - - to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls) - - completed_urls = download_dataset( - dl_folder, dl_dataset, completed_urls) - if debug: - print('completed urls: ', completed_urls) - - - extracted_folders = extract_all_files( - completed_urls, - extract_folder=extract_folder, - completed_extraction=completed_extraction, - debug=debug) - if debug: - print('download files have been extracted to folders: ', extracted_folders) - - converted_files = convert_files_if_needed(extracted_folders, debug=False) - for src_tgt in lang_pairs: - print(f'working on {dl_dataset.name}: {src_tgt}') - src, tgt = src_tgt.split('-') - concat_into_splits(dl_dataset, - src=src, tgt=tgt, - extracted_folders=extracted_folders, - to_folder=raw_folder, debug=debug) - print('completed data into: ', raw_folder) - -def download_czang16(download_to, username=None): - wgets = [ - f'wget --user={username} --password=czeng -P {download_to} http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar' - for i in range(10)] - cmds = [] - for i, cmd in enumerate(wgets): - filename = f'{download_to}/data-plaintext-format.{i}.tar' - if os.path.exists(filename): - print(f'{filename} has already been downloaded; so skip') - continue - cmds.append(cmd) - if cmds and username is None: - raise ValueError('No czeng username is given; please register at http://ufal.mff.cuni.cz/czeng/czeng16 to obtain username to download') - for cmd in cmds: - call(cmd) - print('done with downloading czeng1.6') - -def download_czeng17_script(download_to, extract_folder, debug=False): - url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip' - filename = f'{download_to}/convert_czeng16_to_17.pl.zip' - extract_to = f'{extract_folder}/{get_extract_name(filename)}' - script_path = f'{extract_to}/convert_czeng16_to_17.pl' - - if not os.path.exists(script_path): - wget.download(url, filename, bar=bar_custom) - extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug) - return script_path - -czeng17_script_path = "" -def convert2czeng17(file, debug): - en_file = f'{file}.en' - cs_file = f'{file}.cs' - - if not os.path.exists(en_file) or not os.path.exists(cs_file): - cs_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f3 > {cs_file}' - en_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f4 > {en_file}' - call(cs_cmd, debug) - call(en_cmd, debug) - else: - print(f'already extracted: {en_file} and {cs_file}') - return file - -def extract_czeng17(extract_folder, debug=False): - url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip' - filename = f'{download_to}/convert_czeng16_to_17.pl.zip' - extract_to = f'{extract_folder}/{get_extract_name(filename)}' - script_path = f'{extract_to}/convert_czeng16_to_17.pl' - - if not os.path.exists(script_path): - wget.download(url, filename, bar=bar_custom) - extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug) - return script_path - -######### -# definitions of wmt data sources -# for es-en -# Punctuation in the official test sets will be encoded with ASCII characters (not complex Unicode characters) as much as possible. You may want to normalize your system's output before submission. You are able able to use a rawer version of the test sets that does not have this normalization. -# script to normalize punctuation: http://www.statmt.org/wmt11/normalize-punctuation.perl -wmt13_es_en = DLDataset( - name='wmt13_es-en', - train_urls=[ - 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://www.statmt.org/wmt13/training-parallel-un.tgz', - 'http://www.statmt.org/wmt13/training-parallel-nc-v8.tgz', - ], - valid_urls=[ - ('http://www.statmt.org/wmt13/dev.tgz', 'wmt13_dev.tgz') - ], - test_urls=[ - ('http://www.statmt.org/wmt13/test.tgz', 'wmt13_test.tgz') - ], - train_files_patterns=[ - ('*/europarl-v7.{src}-{tgt}.{lang}', ['es-en']), - ('*commoncrawl.{src}-{tgt}.{lang}', ['es-en']), - ('*/news-commentary-v8.{src}-{tgt}.{lang}', ['es-en']), - ('un/*undoc.2000.{src}-{tgt}.{lang}', ['es-en']), - ] , - valid_files_patterns=[ - ('dev/newstest2012.{lang}', ['es-en']) - ], - test_files_patterns=[ - ('test/newstest*.{lang}', ['es-en']) - ], -) - -wmt14_de_fr_en = DLDataset( - name='wmt14_de_fr_en', - train_urls=[ - 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://www.statmt.org/wmt13/training-parallel-un.tgz', - 'http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz', - ('http://www.statmt.org/wmt10/training-giga-fren.tar', 'training-giga-fren.gz.tar'), #it is actuall a gz.tar - ], - valid_urls=[ - ('http://www.statmt.org/wmt14/dev.tgz', 'wmt14_dev.tgz'), - ], - test_urls=[ - ('http://www.statmt.org/wmt14/test-full.tgz', 'wmt14_test_full.tgz'), # cleaned test sets - ], - train_files_patterns=[ - ('*/europarl-v7.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), - ('*commoncrawl.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), - ('*/*news-commentary-v9.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), - ('un/undoc.2000.{src}-{tgt}.{lang}', ['fr-en']), - ('*giga-{src}{tgt}*{lang}', ['fr-en']) - ], - valid_files_patterns=[ - ('dev/newstest2013.{lang}', ['fr-en', 'de-en']) - ], - test_files_patterns=[ - ('test-full/newstest*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['en-de', 'de-en', 'fr-en', 'en-fr']), - ], -) - -# pip install git+https://github.com/amake/tmx2corpus.git -wmt16_ro_en = DLDataset( - name='wmt16_ro-en', - train_urls=[ - ('http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz', 'wmt16_training-parallel-ep-v8.tgz'), - ('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-ro.tmx.gz', 'en-ro.tmx.gz'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt16/translation-task/dev-romanian-updated.tgz', 'wmt16_dev.tgz') - ], - test_urls=[ - ('http://data.statmt.org/wmt16/translation-task/test.tgz', 'wmt16_test.tgz') - ], - train_files_patterns=[ - ('*/*europarl-v8.{src}-{tgt}.{lang}', ['ro-en']), - ('bitext.{lang}', ['ro-en']) #setimes from tmux - ] , - valid_files_patterns=[ - ('dev/newsdev2016*{src}{tgt}*.{lang}', ['ro-en', 'ro-en']) - ], - test_files_patterns=[ - ('test/newstest*{src}{tgt}*.{lang}', ['ro-en', 'en-ro']) - ], -) - -cwmt_wmt_instruction = 'cwmt download instruction at: http://nlp.nju.edu.cn/cwmt-wmt' -wmt17_fi_lv_tr_zh_en_manual_downloads = [ - # fake urls to have unique keys for the data - ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), cwmt_wmt_instruction), - ( ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), cwmt_wmt_instruction), -] -wmt17_fi_lv_tr_zh_en = DLDataset( - name='wmt17_fi_lv_tr_zh_en', - train_urls=[ - ('http://data.statmt.org/wmt17/translation-task/training-parallel-ep-v8.tgz', 'wmt17_training-parallel-ep-v8.tgz'), - 'http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz', - 'http://www.statmt.org/wmt15/wiki-titles.tgz', - ('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-tr.tmx.gz', 'en-tr.tmx.gz'), - ('http://data.statmt.org/wmt17/translation-task/rapid2016.tgz', 'wmt17_rapid2016.tgz'), - 'http://data.statmt.org/wmt17/translation-task/leta.v1.tgz', - 'http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz', - 'http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz', - (('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00', - 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.01',), 'UNv1.0.en-zh.tar.gz'), - #manually download files: - ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), - ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt17/translation-task/dev.tgz', 'wmt17_dev.tgz'), - ], - test_urls=[ - #NEW: Improved translations for zh test sets - ('http://data.statmt.org/wmt17/translation-task/test-update-1.tgz', 'wmt17_test_zh_en.tgz'), - ('http://data.statmt.org/wmt17/translation-task/test.tgz', 'wmt17_test_others.tgz') - ], - train_files_patterns=[ - ('casict*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ), - ('casia*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ), - ('dataum*/Book*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en']), - ('neu*/NEU*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en'] ), - ('*/*UNv1.0.en-zh.{src:zh}{tgt:en}', ['zh-en']), - ('training/*news-commentary-v12.{src}-{tgt}.{lang}', ['zh-en', ]), - - ('*/*europarl-v8.{src}-{tgt}.{lang}', ['fi-en', 'lv-en']), - ('wiki/fi-en/titles.{src}-{tgt}.{lang}', ['fi-en', ]), - ('rapid2016.{tgt}-{src}.{lang}', ['fi-en', 'lv-en']), - ('*/leta.{lang}', ['lv-en']), - ('*/dcep.{lang}', ['lv-en']), - ('*/farewell.{lang}', ['lv-en']), - ('bitext.{lang}', ['tr-en']), - ] , - valid_files_patterns=[ - ('dev/newsdev2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'fi-en', 'lv-en', 'tr-en', 'zh-en', - 'en-fi', 'en-lv', 'en-tr', 'en-zh' - ]), - ('dev/newstest2016*{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'fi-en', 'tr-en', - 'en-fi', 'en-tr', - ]), - ], - test_files_patterns=[ - ('test/newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'fi-en', 'lv-en', 'tr-en', - 'en-fi', 'en-lv', 'en-tr', - ]), - ('newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - [ - 'zh-en', - 'en-zh' - ]), - ], -) - -czeng_instruction = 'download instruction at: http://ufal.mff.cuni.cz/czeng/czeng16' -#alternative: use the prepared data but detokenize it? -wmt18_cs_et_en_manual_downloads = [ -#for cs, need to register and download; Register and download CzEng 1.6. -#Better results can be obtained by using a subset of sentences, released under a new version name CzEng 1.7. - # ((f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar', - # f'data-plaintext-format.{i}.tar'), czeng_instruction) - # for i in range(10) -] - -wmt18_cs_et_en = DLDataset( - name='wmt18_cs_et_en', - train_urls=[ - 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', - 'http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-et.zipporah0-dedup-clean.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz', - ('http://data.statmt.org/wmt18/translation-task/rapid2016.tgz', 'wmt18_rapid2016.tgz'), - # (tuple( - # (f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar', - # f'data-plaintext-format.{i}.tar') - # for i in range(10) - # ), - # 'czeng16_data_plaintext.gz.tar'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt18/translation-task/dev.tgz', 'wmt18_dev.tgz'), - ], - test_urls=[ - ('http://data.statmt.org/wmt18/translation-task/test.tgz', 'wmt18_test.tgz'), - ], - train_files_patterns=[ - # ('*/*europarl-v7.{src}-{tgt}.{lang}', ['cs-en']), - ('*/*europarl-v8.{src}-{tgt}.{lang}', ['et-en']), - # ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['cs-en', 'et-en']), - ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['et-en']), - # ('*commoncrawl.{src}-{tgt}.{lang}', ['cs-en']), - # ('*/news-commentary-v13.{src}-{tgt}.{lang}', ['cs-en']), - # ('data.plaintext-format/*train.{lang}', ['cs-en']), - ('rapid2016.{tgt}-{src}.{lang}', ['et-en']), - ] , - valid_files_patterns=[ - ('dev/newsdev2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['et-en']), - # ('dev/newstest2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['cs-en']) - ], - test_files_patterns=[ - ('test/newstest2018-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - # ['cs-en', 'et-en']), - ['et-en']), - ] -) - -ru_en_yandex_instruction = 'Yandex Corpus download instruction at: https://translate.yandex.ru/corpus?lang=en' -wmt19_ru_gu_kk_lt_manual_downloads = [ - (('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), ru_en_yandex_instruction) -] -wmt19_ru_gu_kk_lt = DLDataset( - name='wmt19_ru_gu_kk_lt', - train_urls=[ - 'http://www.statmt.org/europarl/v9/training/europarl-v9.lt-en.tsv.gz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-lt.bicleaner07.tmx.gz', - 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz', - 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', - 'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14-wmt19.en-kk.tsv.gz', - 'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.en-ru.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.ru-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.lt-en.tsv.gz', - 'http://data.statmt.org/wikititles/v1/wikititles-v1.gu-en.tsv.gz', - (('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.00', - 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.01', - 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.02',), - 'wmt19_UNv1.0.en-ru.tar.gz'), - 'https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-lt.tmx.zip', - ('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), - ], - valid_urls=[ - ('http://data.statmt.org/wmt19/translation-task/dev.tgz', 'wmt19_dev.tgz'), - ], - test_urls=[ - ('http://data.statmt.org/wmt19/translation-task/test.tgz', 'wmt19_test.tgz'), - ], - train_files_patterns=[ - ('*europarl-v9.{src}-{tgt}.tsv.{lang}', ['lt-en']), - #paracrawl - ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['ru-en']), - ('bitext.{lang}', ['lt-en',]), - ('*commoncrawl.{src}-{tgt}.{lang}', ['ru-en',]), - ('*news-commentary-v14-wmt19.{tgt}-{src}.tsv.{lang}', ['kk-en', ]), - ('*news-commentary-v14.{tgt}-{src}.tsv.{lang}', ['ru-en']), - #yandex - ('corpus.{tgt}_{src}.1m.{lang}', ['ru-en']), - ('wikititles_v1_wikititles-v1.{src}-{tgt}.tsv.{lang}', ['ru-en', 'kk-en', 'lt-en', 'gu-en']), - ('*/UNv1.0.{tgt}-{src}.{lang}', ['ru-en']), - #rapid - ('bitext.{lang}', ['lt-en']) - ], - valid_files_patterns=[ - ('dev/newsdev2019*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['gu-en', 'kk-en', 'lt-en']), - ('dev/newstest2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['ru-en']), - ], - test_files_patterns=[ - ('sgm/newstest2019-{src}{tgt}-{src:src}{tgt:ref}.{lang}', - ['ru-en', 'gu-en', 'kk-en', 'lt-en', 'en-ru', 'en-gu', 'en-kk', 'en-lt']), - ] -) - - -######### - -if __name__ == "__main__": - # speed up the downloads with multiple processing - dl_folder = f'{to_data_path}/downloads' - extract_folder = f'{to_data_path}/extracted' - - urls = [ - url - for dataset in [wmt13_es_en, wmt14_de_fr_en, wmt16_ro_en, wmt18_cs_et_en, wmt19_ru_gu_kk_lt] - for urls in [dataset.train_urls, dataset.valid_urls, dataset.test_urls] - for url in urls - ] - urls = set(urls) - download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=True) - - # check manually downlaods - to_manually_download_urls = ( - wmt17_fi_lv_tr_zh_en_manual_downloads + wmt18_cs_et_en_manual_downloads + wmt19_ru_gu_kk_lt_manual_downloads - ) - to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls) - if len(to_be_manually_dowloaded) > 0: - print('Missing files that need to be downloaded manually; stop the process now.') - exit(-1) - - completed_urls = {} - completed_extraction = {} - def work_on_wmt(directions, wmt_data): - download_and_extract( - to_data_path, - directions, - wmt_data, - to_manually_download_urls=to_manually_download_urls, - completed_urls=completed_urls, completed_extraction=completed_extraction, debug=True) - - work_on_wmt( - ['es_XX-en_XX'], - wmt13_es_en,) - work_on_wmt( - [ - 'fr_XX-en_XX', 'en_XX-fr_XX', - # 'en_XX-de_DE', 'de_DE-en_XX', - ], - wmt14_de_fr_en,) - work_on_wmt( - ['ro_RO-en_XX', 'en_XX-ro_XX'], - wmt16_ro_en,) - work_on_wmt( - [ - # 'zh_CN-en_XX', - 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX', - #in case the reversed directions have different train/valid/test data - # 'en_XX-zh_CN', - 'en_XX-lv_LV', 'en_XX-fi_FI', 'en_XX-tr_TR', - ], - wmt17_fi_lv_tr_zh_en, ) - # czeng17_script_path = download_czeng17_script(download_to, extract_to, debug=False) - # cz_username = None - work_on_wmt( - [ - # 'cs_CZ-en_XX', - 'et_EE-en_XX'], - wmt18_cs_et_en,) - work_on_wmt( - [ - # 'ru_RU-en_XX', 'en_XX-ru_RU', - 'gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX', - #in case the reversed directions have different train/valid/test data - 'en_XX-gu_IN', 'en_XX-kk_KZ', 'en_XX-lt_LT' - ], - wmt19_ru_gu_kk_lt,) - - not_matching = check_wmt_test_bleu( - f'{to_data_path}/raw', - [ - ('wmt13', ['es_XX-en_XX']), - ('wmt14/full', ['fr_XX-en_XX',]), - ('wmt16', ['ro_RO-en_XX',]), - # ('wmt17/improved', ['zh_CN-en_XX']), - ('wmt17', [ 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX']), - ('wmt18', ['cs_CZ-en_XX', 'et_EE-en_XX']), - ('wmt19', ['gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX']), - #'ru_RU-en_XX', - ] - ) - if len(not_matching) > 0: - print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching)) - diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md deleted file mode 100644 index f3b5a413a27bbe2700da3f418460aa0a7c41abdd..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md +++ /dev/null @@ -1,190 +0,0 @@ -# Simultaneous Speech Translation (SimulST) on MuST-C - -This is a tutorial of training and evaluating a transformer *wait-k* simultaneous model on MUST-C English-Germen Dataset, from [SimulMT to SimulST: Adapting Simultaneous Text Translation to End-to-End Simultaneous Speech Translation](https://www.aclweb.org/anthology/2020.aacl-main.58.pdf). - -[MuST-C](https://www.aclweb.org/anthology/N19-1202) is multilingual speech-to-text translation corpus with 8-language translations on English TED talks. - -## Data Preparation -This section introduces the data preparation for training and evaluation. -If you only want to evaluate the model, please jump to [Inference & Evaluation](#inference--evaluation) - -[Download](https://ict.fbk.eu/must-c) and unpack MuST-C data to a path -`${MUSTC_ROOT}/en-${TARGET_LANG_ID}`, then preprocess it with -```bash -# Additional Python packages for S2T data processing/model training -pip install pandas torchaudio sentencepiece - -# Generate TSV manifests, features, vocabulary, -# global cepstral and mean estimation, -# and configuration for each language -cd fairseq - -python examples/speech_to_text/prep_mustc_data.py \ - --data-root ${MUSTC_ROOT} --task asr \ - --vocab-type unigram --vocab-size 10000 \ - --cmvn-type global - -python examples/speech_to_text/prep_mustc_data.py \ - --data-root ${MUSTC_ROOT} --task st \ - --vocab-type unigram --vocab-size 10000 \ - --cmvn-type global -``` - -## ASR Pretraining -We need a pretrained offline ASR model. Assuming the save directory of the ASR model is `${ASR_SAVE_DIR}`. -The following command (and the subsequent training commands in this tutorial) assume training on 1 GPU (you can also train on 8 GPUs and remove the `--update-freq 8` option). -``` -fairseq-train ${MUSTC_ROOT}/en-de \ - --config-yaml config_asr.yaml --train-subset train_asr --valid-subset dev_asr \ - --save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \ - --task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \ - --arch convtransformer_espnet --optimizer adam --lr 0.0005 --lr-scheduler inverse_sqrt \ - --warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 -``` -A pretrained ASR checkpoint can be downloaded [here](https://dl.fbaipublicfiles.com/simultaneous_translation/must_c_v1_en_de_pretrained_asr) - -## Simultaneous Speech Translation Training - -### Wait-K with fixed pre-decision module -Fixed pre-decision indicates that the model operate simultaneous policy on the boundaries of fixed chunks. -Here is a example of fixed pre-decision ratio 7 (the simultaneous decision is made every 7 encoder states) and -a wait-3 policy model. Assuming the save directory is `${ST_SAVE_DIR}` -```bash - fairseq-train ${MUSTC_ROOT}/en-de \ - --config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \ - --save-dir ${ST_SAVE_DIR} --num-workers 8 \ - --optimizer adam --lr 0.0001 --lr-scheduler inverse_sqrt --clip-norm 10.0 \ - --criterion label_smoothed_cross_entropy \ - --warmup-updates 4000 --max-update 100000 --max-tokens 40000 --seed 2 \ - --load-pretrained-encoder-from ${ASR_SAVE_DIR}/checkpoint_best.pt \ - --task speech_to_text \ - --arch convtransformer_simul_trans_espnet \ - --simul-type waitk_fixed_pre_decision \ - --waitk-lagging 3 \ - --fixed-pre-decision-ratio 7 \ - --update-freq 8 - -``` -### Monotonic multihead attention with fixed pre-decision module -``` - fairseq-train ${MUSTC_ROOT}/en-de \ - --config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \ - --save-dir ${ST_SAVE_DIR} --num-workers 8 \ - --optimizer adam --lr 0.0001 --lr-scheduler inverse_sqrt --clip-norm 10.0 \ - --warmup-updates 4000 --max-update 100000 --max-tokens 40000 --seed 2 \ - --load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --task speech_to_text \ - --criterion latency_augmented_label_smoothed_cross_entropy \ - --latency-weight-avg 0.1 \ - --arch convtransformer_simul_trans_espnet \ - --simul-type infinite_lookback_fixed_pre_decision \ - --fixed-pre-decision-ratio 7 \ - --update-freq 8 -``` -## Inference & Evaluation -[SimulEval](https://github.com/facebookresearch/SimulEval) is used for evaluation. -The following command is for evaluation. - -``` -git clone https://github.com/facebookresearch/SimulEval.git -cd SimulEval -pip install -e . - -simuleval \ - --agent ${FAIRSEQ}/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py - --source ${SRC_LIST_OF_AUDIO} - --target ${TGT_FILE} - --data-bin ${MUSTC_ROOT}/en-de \ - --config config_st.yaml \ - --model-path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --output ${OUTPUT} \ - --scores -``` - -The source file `${SRC_LIST_OF_AUDIO}` is a list of paths of audio files. Assuming your audio files stored at `/home/user/data`, -it should look like this - -```bash -/home/user/data/audio-1.wav -/home/user/data/audio-2.wav -``` - -Each line of target file `${TGT_FILE}` is the translation for each audio file input. -```bash -Translation_1 -Translation_2 -``` -The evaluation runs on the original MUSTC segmentation. -The following command will generate the wav list and text file for a evaluation set `${SPLIT}` (chose from `dev`, `tst-COMMON` and `tst-HE`) in MUSTC to `${EVAL_DATA}`. -```bash -python ${FAIRSEQ}/examples/speech_to_text/seg_mustc_data.py \ - --data-root ${MUSTC_ROOT} --lang de \ - --split ${SPLIT} --task st \ - --output ${EVAL_DATA} -``` - -The `--data-bin` and `--config` should be the same in previous section if you prepare the data from the scratch. -If only for evaluation, a prepared data directory can be found [here](https://dl.fbaipublicfiles.com/simultaneous_translation/must_c_v1.0_en_de_databin.tgz). It contains -- `spm_unigram10000_st.model`: a sentencepiece model binary. -- `spm_unigram10000_st.txt`: the dictionary file generated by the sentencepiece model. -- `gcmvn.npz`: the binary for global cepstral mean and variance. -- `config_st.yaml`: the config yaml file. It looks like this. -You will need to set the absolute paths for `sentencepiece_model` and `stats_npz_path` if the data directory is downloaded. -```yaml -bpe_tokenizer: - bpe: sentencepiece - sentencepiece_model: ABS_PATH_TO_SENTENCEPIECE_MODEL -global_cmvn: - stats_npz_path: ABS_PATH_TO_GCMVN_FILE -input_channels: 1 -input_feat_per_channel: 80 -sampling_alpha: 1.0 -specaugment: - freq_mask_F: 27 - freq_mask_N: 1 - time_mask_N: 1 - time_mask_T: 100 - time_mask_p: 1.0 - time_wrap_W: 0 -transforms: - '*': - - global_cmvn - _train: - - global_cmvn - - specaugment -vocab_filename: spm_unigram10000_st.txt -``` - -Notice that once a `--data-bin` is set, the `--config` is the base name of the config yaml, not the full path. - -Set `--model-path` to the model checkpoint. -A pretrained checkpoint can be downloaded from [here](https://dl.fbaipublicfiles.com/simultaneous_translation/convtransformer_wait5_pre7), which is a wait-5 model with a pre-decision of 280 ms. - -The result of this model on `tst-COMMON` is: -```bash -{ - "Quality": { - "BLEU": 13.94974229366959 - }, - "Latency": { - "AL": 1751.8031870037803, - "AL_CA": 2338.5911762796536, - "AP": 0.7931395378788959, - "AP_CA": 0.9405103863210942, - "DAL": 1987.7811616943081, - "DAL_CA": 2425.2751560926167 - } -} -``` - -If `--output ${OUTPUT}` option is used, the detailed log and scores will be stored under the `${OUTPUT}` directory. - - -The quality is measured by detokenized BLEU. So make sure that the predicted words sent to the server are detokenized. - -The latency metrics are -* Average Proportion -* Average Lagging -* Differentiable Average Lagging - -Again they will also be evaluated on detokenized text. diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/encoders/characters.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/encoders/characters.py deleted file mode 100644 index 494ea219392716dc75d2c1e19d71cd55b9b2f4ba..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/encoders/characters.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from fairseq.data.encoders import register_bpe - - -SPACE = chr(32) -SPACE_ESCAPE = chr(9601) - - -@register_bpe("characters") -class Characters(object): - def __init__(self, *unused): - pass - - @staticmethod - def add_args(parser): - pass - - @staticmethod - def encode(x: str) -> str: - escaped = x.replace(SPACE, SPACE_ESCAPE) - return SPACE.join(list(escaped)) - - @staticmethod - def decode(x: str) -> str: - return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/trainer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/trainer.py deleted file mode 100644 index e46ccfe0b8d3a224586fb16c69168321f60ce30e..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/trainer.py +++ /dev/null @@ -1,1509 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Train a network across multiple GPUs. -""" - -import contextlib -import logging -import sys -import time -from argparse import Namespace -from itertools import chain -from typing import Any, Dict, List - -import torch -from fairseq import checkpoint_utils, models, optim, utils -from fairseq.dataclass.configs import FairseqConfig -from fairseq.dataclass.utils import convert_namespace_to_omegaconf -from fairseq.distributed import utils as distributed_utils -from fairseq.file_io import PathManager -from fairseq.logging import meters, metrics -from fairseq.models.ema import build_ema -from fairseq.nan_detector import NanDetector -from fairseq.optim import lr_scheduler -from omegaconf import OmegaConf - -logger = logging.getLogger(__name__) - - -class Trainer(object): - """Main class for data parallel training. - - This class supports synchronous distributed data parallel training, - where multiple workers each have a full model replica and gradients - are accumulated across workers before each update. We use - :class:`~torch.nn.parallel.DistributedDataParallel` to handle - communication of the gradients across workers. - """ - - def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): - - if isinstance(cfg, Namespace): - logger.warning( - "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" - ) - cfg = convert_namespace_to_omegaconf(cfg) - - self.cfg = cfg - self.task = task - - # catalog shared parameters - shared_params = _catalog_shared_params(model) - self.tpu = cfg.common.tpu - self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu - if self.cuda: - self.device = torch.device("cuda") - elif self.tpu: - self.device = utils.get_tpu_device() - else: - self.device = torch.device("cpu") - - if self.is_fsdp: - import fairscale - if self.cfg.common.bf16: - raise ValueError( - "FullyShardedDataParallel is not compatible with --bf16 or " - "--memory-efficient-bf16" - ) - if self.cfg.distributed_training.zero_sharding != "none": - raise ValueError( - "FullyShardedDataParallel is not compatible with --zero-sharding " - "option (it's already built in)" - ) - if max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0": - raise RuntimeError( - "Please update to fairscale 0.4.0 or newer when combining " - "--update-freq with FullyShardedDataParallel" - ) - else: - if ( - hasattr(self.cfg.distributed_training, "cpu_offload") - and self.cfg.distributed_training.cpu_offload - ): - raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") - - # copy model and criterion to current device/dtype - self._criterion = criterion - self._model = model - if not self.is_fsdp: - if cfg.common.fp16: - assert not cfg.common.amp, "Cannot use fp16 and AMP together" - self._criterion = self._criterion.half() - self._model = self._model.half() - elif cfg.common.bf16: - self._criterion = self._criterion.to(dtype=torch.bfloat16) - self._model = self._model.to(dtype=torch.bfloat16) - elif cfg.common.amp: - self._amp_retries = 0 - if ( - not cfg.distributed_training.pipeline_model_parallel - # the DistributedFairseqModel wrapper will handle moving to device, - # so only handle cases which don't use the wrapper - and not self.use_distributed_wrapper - ): - self._criterion = self._criterion.to(device=self.device) - self._model = self._model.to(device=self.device) - self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel - self.last_device = None - if self.cuda and self.pipeline_model_parallel: - self.last_device = torch.device( - cfg.distributed_training.pipeline_devices[-1] - ) - - # check that shared parameters are preserved after device transfer - for shared_param in shared_params: - ref = _get_module_by_path(self._model, shared_param[0]) - for path in shared_param[1:]: - logger.info( - "detected shared parameter: {} <- {}".format(shared_param[0], path) - ) - _set_module_by_path(self._model, path, ref) - - self._dummy_batch = None # indicates we don't have a dummy batch at first - self._lr_scheduler = None - self._num_updates = 0 - self._num_xla_compiles = 0 # for TPUs - self._optim_history = None - self._optimizer = None - self._warn_once = set() - self._wrapped_criterion = None - self._wrapped_model = None - self._ema = None - - # TODO(myleott): support tpu - if self.cuda and self.data_parallel_world_size > 1: - self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) - else: - self._grad_norm_buf = None - - self.quantizer = quantizer - if self.quantizer is not None: - self.quantizer.set_trainer(self) - - # get detailed cuda environment - if self.cuda: - self.cuda_env = utils.CudaEnvironment() - if self.data_parallel_world_size > 1: - self.cuda_env_arr = distributed_utils.all_gather_list( - self.cuda_env, group=distributed_utils.get_global_group() - ) - else: - self.cuda_env_arr = [self.cuda_env] - if self.data_parallel_rank == 0: - utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) - else: - self.cuda_env = None - self.cuda_env_arr = None - - metrics.log_start_time("wall", priority=790, round=0) - - self._start_time = time.time() - self._previous_training_time = 0 - self._cumulative_training_time = None - - def reinitialize(self): - """Reinitialize the Trainer, typically after model params change.""" - self._lr_scheduler = None - self._optimizer = None - self._wrapped_criterion = None - self._wrapped_model = None - - @property - def data_parallel_world_size(self): - if self.cfg.distributed_training.distributed_world_size == 1: - return 1 - return distributed_utils.get_data_parallel_world_size() - - @property - def data_parallel_process_group(self): - return distributed_utils.get_data_parallel_group() - - @property - def data_parallel_rank(self): - if self.cfg.distributed_training.distributed_world_size == 1: - return 0 - return distributed_utils.get_data_parallel_rank() - - @property - def is_data_parallel_master(self): - # NOTE: this returns true for all model parallel replicas with data - # parallel rank 0 - return self.data_parallel_rank == 0 - - @property - def use_distributed_wrapper(self) -> bool: - return ( - self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf - ) or ( - self.is_fsdp and self.cfg.distributed_training.cpu_offload - ) - - @property - def should_save_checkpoint_on_current_rank(self) -> bool: - """Indicates whether to save checkpoints on the current DDP rank.""" - if ( - self.is_fsdp and self.cfg.distributed_training.use_sharded_state - ) or getattr(self.cfg.model, "base_layers", 0) > 0: - return True - else: - return self.is_data_parallel_master - - @property - def always_call_state_dict_during_save_checkpoint(self) -> bool: - if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: - # FSDP calls communication collective when consolidating checkpoints - return True - else: - return False - - @property - def checkpoint_suffix(self) -> str: - """Suffix to add to the checkpoint file name.""" - if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: - return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( - self.data_parallel_rank - ) - else: - return self.cfg.checkpoint.checkpoint_suffix or "" - - @property - def criterion(self): - if self._wrapped_criterion is None: - if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: - self._wrapped_criterion = models.DistributedFairseqModel( - self.cfg.distributed_training, - self._criterion, - process_group=self.data_parallel_process_group, - device=self.device, - ) - else: - self._wrapped_criterion = self._criterion - return self._wrapped_criterion - - @property - def model(self): - if self._wrapped_model is None: - if self.use_distributed_wrapper: - self._wrapped_model = models.DistributedFairseqModel( - self.cfg.distributed_training, - self._model, - process_group=self.data_parallel_process_group, - device=self.device, - ) - else: - self._wrapped_model = self._model - return self._wrapped_model - - @property - def ema(self): - if self._ema is None: - self._build_ema() - return self._ema - - def _build_ema(self): - if self.cfg.ema.store_ema: - self._ema = build_ema(self._model, self.cfg.ema, self.device) - logger.info( - "Exponential Moving Average Shadow Model is initialized." - ) - - @property - def optimizer(self): - if self._optimizer is None: - self._build_optimizer() - return self._optimizer - - @property - def lr_scheduler(self): - if self._lr_scheduler is None: - self._build_optimizer() # this will initialize self._lr_scheduler - return self._lr_scheduler - - def _build_optimizer(self): - params = list( - filter( - lambda p: p.requires_grad, - chain(self.model.parameters(), self.criterion.parameters()), - ) - ) - - if self.is_fsdp and self.cfg.common.fp16: - # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, - # mostly for the grad scaling. But if we don't have the - # --memory-efficient-fp16 flag set, then we're effectively doing - # regular --fp16 and can allow the use of optimizers that would - # otherwise be unsupported by MemoryEfficientFP16Optimizer. - allow_unsupported = not self.cfg.common.memory_efficient_fp16 - self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( - self.cfg, params, allow_unsupported=allow_unsupported - ) - elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: - if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: - logger.info( - "NOTE: your device does NOT support faster training with --fp16 or --amp, " - "please switch to FP32 which is likely to be faster" - ) - if ( - self.cfg.common.memory_efficient_fp16 - or self.cfg.common.memory_efficient_bf16 - ): - self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( - self.cfg, params - ) - elif self.cfg.common.amp: - self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) - else: - self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) - else: - if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: - logger.info("NOTE: your device may support faster training with --fp16 or --amp") - self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) - - if self.is_fsdp: - assert ( - not self.cfg.optimization.use_bmuf - ), "--ddp-backend=fully_sharded is not compatible with BMUF" - assert self._optimizer.supports_flat_params, ( - "--ddp-backend=fully_sharded is only compatible with pointwise " - "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " - "However, the sharding will result in slightly different results when " - "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" - ) - - if self.cfg.optimization.use_bmuf: - self._optimizer = optim.FairseqBMUF( - self.cfg.bmuf, - self._optimizer, - ) - - if self.cfg.distributed_training.zero_sharding == "os": - if ( - self.cfg.common.fp16 - and not self.cfg.common.memory_efficient_fp16 - and not self.cfg.common.memory_efficient_bf16 - ) and not self.cfg.common.fp16_no_flatten_grads: - raise ValueError( - "ZeRO is incomptabile with fp16 and flattened grads. " - "Please use --fp16-no-flatten-grads" - ) - else: - optim.shard_(self._optimizer, self.data_parallel_process_group) - - # We should initialize the learning rate scheduler immediately after - # building the optimizer, so that the initial learning rate is set. - self._lr_scheduler = lr_scheduler.build_lr_scheduler( - self.cfg.lr_scheduler, - self.optimizer, - ) - self._lr_scheduler.step_update(0) - - @property - def is_fsdp(self): - return self.cfg.distributed_training.ddp_backend == "fully_sharded" - - def consolidate_optimizer(self): - """For OSS, we need to consolidate the state dict.""" - if self.cfg.checkpoint.no_save_optimizer_state: - return - self._gathered_optim_state = None - if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): - self.optimizer.optimizer.consolidate_state_dict() - elif self.is_fsdp and not self.model.use_sharded_state: - st = self.model.gather_full_optim_state_dict( - self.optimizer - ) # only returns on rank 0 - self._gathered_optim_state = st - - def state_dict(self): - state_dict = { - "args": None, # legacy - "cfg": ( - OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) - if OmegaConf.is_config(self.cfg) - else self.cfg - ), - "model": self.model.state_dict(), - "criterion": ( - self.criterion.state_dict() - if utils.has_parameters(self.criterion) - else None - ), - "optimizer_history": (self._optim_history or []) - + [ - { - "criterion_name": self.get_criterion().__class__.__name__, - "optimizer_name": self.optimizer.__class__.__name__, - "lr_scheduler_state": self.lr_scheduler.state_dict(), - "num_updates": self.get_num_updates(), - } - ], - "task_state": self.task.state_dict() if self.task is not None else {}, - "extra_state": { - "metrics": metrics.state_dict(), - "previous_training_time": self.cumulative_training_time(), - }, - } - if self.cfg.ema.store_ema: - # Save EMA model state as extra state - state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() - if self.cfg.ema.ema_fp32: - # Save EMA params in fp32 - state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params - if not self.cfg.checkpoint.no_save_optimizer_state: - if self._gathered_optim_state is not None: - state_dict["last_optimizer_state"] = self._gathered_optim_state - self._gathered_optim_state = None - else: - state_dict["last_optimizer_state"] = self.optimizer.state_dict() - if self.is_fsdp: - # save meta data for recombining checkpoint upon loading - state_dict["fsdp_metadata"] = self.model.local_metadata_dict() - return state_dict - - def save_checkpoint(self, filename, extra_state): - """Save all training state in a checkpoint file.""" - logger.info(f"Saving checkpoint to {filename}") - # call state_dict on all ranks in case it needs internal communication - state_dict = utils.move_to_cpu(self.state_dict()) - state_dict["extra_state"].update(extra_state) - if self.should_save_checkpoint_on_current_rank: - checkpoint_utils.torch_persistent_save( - state_dict, - filename, - async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, - ) - logger.info(f"Finished saving checkpoint to {filename}") - - def load_checkpoint( - self, - filename, - reset_optimizer=False, - reset_lr_scheduler=False, - optimizer_overrides=None, - reset_meters=False, - ): - """ - Load all training state from a checkpoint file. - rank = 0 will load the checkpoint, and then broadcast it to all - other ranks. - """ - extra_state, self._optim_history, last_optim_state = None, [], None - - logger.info(f"Preparing to load checkpoint {filename}") - is_distributed = self.data_parallel_world_size > 1 - bexists = PathManager.isfile(filename) - if bexists: - load_on_all_ranks = ( - self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks - # TPUs don't support broadcast yet, so load checkpoints - # on every worker for now - or self.tpu - # FSDP requires loading checkpoint shards on all ranks - or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) - or getattr(self.cfg.model, "base_layers", 0) > 0 - ) - - if load_on_all_ranks or self.data_parallel_rank == 0: - state = checkpoint_utils.load_checkpoint_to_cpu( - filename, load_on_all_ranks=load_on_all_ranks - ) - last_optim_state = state.get("last_optimizer_state", None) - - # If doing zero_sharding, do not broadcast global optimizer - # state. Later we will broadcast sharded states to each rank - # to avoid memory from exploding. - if ( - not load_on_all_ranks - and self.cfg.distributed_training.zero_sharding == "os" - and "last_optimizer_state" in state - and is_distributed - ): - state["last_optimizer_state"] = "SHARDED" - else: - last_optim_state = None - state = None - - if is_distributed and not load_on_all_ranks: - state = distributed_utils.broadcast_object( - state, - src_rank=0, - group=self.data_parallel_process_group, - dist_device=self.device, - ) - if self.data_parallel_rank > 0: - last_optim_state = state.get("last_optimizer_state", None) - - # load model parameters - try: - self.model.load_state_dict( - state["model"], strict=True, model_cfg=self.cfg.model - ) - # save memory for later steps - del state["model"] - if utils.has_parameters(self.get_criterion()): - self.get_criterion().load_state_dict( - state["criterion"], strict=True - ) - del state["criterion"] - - except Exception: - raise Exception( - "Cannot load model parameters from checkpoint {}; " - "please ensure that the architectures match.".format(filename) - ) - extra_state = state["extra_state"] - self._optim_history = state["optimizer_history"] - - if last_optim_state is not None and not reset_optimizer: - # rebuild optimizer after loading model, since params may have changed - self._build_optimizer() - - # only reload optimizer and lr_scheduler if they match - last_optim = self._optim_history[-1] - assert ( - last_optim["criterion_name"] == self.get_criterion().__class__.__name__ - ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" - assert ( - last_optim["optimizer_name"] == self.optimizer.__class__.__name__ - ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" - - if not reset_lr_scheduler: - self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) - - if self.is_fsdp and not self.model.use_sharded_state: - # if use_sharded_state, the last_optim_state is already sharded, skip this - last_optim_state = self.model.get_shard_from_optim_state_dict( - last_optim_state - ) - elif not load_on_all_ranks and is_distributed: - last_optim_state = self.optimizer.broadcast_global_state_dict( - last_optim_state - ) - - self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) - - self.set_num_updates(last_optim["num_updates"]) - - if extra_state is not None: - itr_state = extra_state["train_iterator"] - epoch = itr_state["epoch"] - - if "previous_training_time" in extra_state: - self._previous_training_time = extra_state["previous_training_time"] - self._start_time = time.time() - - self.lr_step(epoch) - - if ( - itr_state.get("version", 1) >= 2 - and itr_state["iterations_in_epoch"] == 0 - ): - # reset meters at start of epoch - reset_meters = True - - if "metrics" in extra_state and not reset_meters: - metrics.load_state_dict(extra_state["metrics"]) - - # reset TimeMeters, since their start times don't make sense anymore - for meter in metrics.get_meters("default"): - if isinstance(meter, meters.TimeMeter): - meter.reset() - - if self.cfg.ema.store_ema: - if "ema" not in extra_state: - logger.warn( - "EMA not found in checkpoint. But store_ema is True. " - "EMA is re-initialized from checkpoint." - ) - self.ema.restore(state["model"], build_fp32_params=self.cfg.ema.ema_fp32) - else: - logger.info( - "Loading EMA from checkpoint" - ) - self.ema.restore(extra_state["ema"], build_fp32_params=False) - - if self.cfg.ema.ema_fp32: - if "ema_fp32_params" in extra_state: - logger.info( - "Loading EMA fp32 params from checkpoint" - ) - self.ema.build_fp32_params(extra_state["ema_fp32_params"]) - else: - logger.info( - "Building EMA fp32 params from EMA model in checkpoint" - ) - self.ema.build_fp32_params() - - logger.info( - "Loaded checkpoint {} (epoch {} @ {} updates)".format( - filename, epoch, self.get_num_updates() - ) - ) - - else: - logger.info("No existing checkpoint found {}".format(filename)) - - return extra_state - - def get_train_iterator( - self, - epoch, - combine=True, - load_dataset=True, - data_selector=None, - shard_batch_itr=True, - disable_iterator_cache=False, - ): - """Return an EpochBatchIterator over the training set for a given epoch.""" - if load_dataset: - logger.info("loading train data for epoch {}".format(epoch)) - self.task.load_dataset( - self.cfg.dataset.train_subset, - epoch=epoch, - combine=combine, - data_selector=data_selector, - tpu=self.tpu, - ) - batch_iterator = self.task.get_batch_iterator( - dataset=self.task.dataset(self.cfg.dataset.train_subset), - max_tokens=self.cfg.dataset.max_tokens, - max_sentences=self.cfg.dataset.batch_size, - max_positions=utils.resolve_max_positions( - self.task.max_positions(), - self.model.max_positions(), - self.cfg.dataset.max_tokens, - ), - ignore_invalid_inputs=True, - required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, - seed=self.cfg.common.seed, - num_shards=self.data_parallel_world_size if shard_batch_itr else 1, - shard_id=self.data_parallel_rank if shard_batch_itr else 0, - num_workers=self.cfg.dataset.num_workers, - epoch=epoch, - data_buffer_size=self.cfg.dataset.data_buffer_size, - disable_iterator_cache=disable_iterator_cache, - ) - self.reset_dummy_batch(batch_iterator.first_batch) - return batch_iterator - - def get_valid_iterator( - self, - subset, - disable_iterator_cache=False, - ): - """Return an EpochBatchIterator over given validation subset for a given epoch.""" - batch_iterator = self.task.get_batch_iterator( - dataset=self.task.dataset(subset), - max_tokens=self.cfg.dataset.max_tokens_valid, - max_sentences=self.cfg.dataset.batch_size_valid, - max_positions=utils.resolve_max_positions( - self.task.max_positions(), - self.model.max_positions(), - ), - ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, - required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, - seed=self.cfg.common.seed, - num_shards=self.data_parallel_world_size, - shard_id=self.data_parallel_rank, - num_workers=self.cfg.dataset.num_workers, - # always pass a fixed "epoch" to keep validation data consistent - # across training epochs - epoch=1, - data_buffer_size=self.cfg.dataset.data_buffer_size, - disable_iterator_cache=disable_iterator_cache, - ) - self.reset_dummy_batch(batch_iterator.first_batch) - return batch_iterator - - def begin_epoch(self, epoch): - """Called at the beginning of each epoch.""" - logger.info("begin training epoch {}".format(epoch)) - - self.lr_step_begin_epoch(epoch) - - if self.quantizer is not None: - self.quantizer.begin_epoch(epoch) - - # task specific setup per epoch - self.task.begin_epoch(epoch, self.get_model()) - - if self.tpu: - import torch_xla.core.xla_model as xm - - xm.rendezvous("begin_epoch") # wait for all workers - xm.mark_step() - - def begin_valid_epoch(self, epoch): - """Called at the beginning of each validation epoch.""" - - # task specific setup per validation epoch - self.task.begin_valid_epoch(epoch, self.get_model()) - - def reset_dummy_batch(self, batch): - self._dummy_batch = batch - - @metrics.aggregate("train") - def train_step(self, samples, raise_oom=False): - """Do forward, backward and parameter update.""" - self._set_seed() - self.model.train() - self.criterion.train() - self.zero_grad() - - metrics.log_start_time("train_wall", priority=800, round=0) - - # If EMA is enabled through store_ema=True - # and task.uses_ema is True, pass the EMA model as a keyword - # argument to the task. - extra_kwargs = {} - if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): - extra_kwargs["ema_model"] = self.ema.get_model() - - # forward and backward pass - logging_outputs, sample_size, ooms = [], 0, 0 - for i, sample in enumerate(samples): # delayed update loop - sample, is_dummy_batch = self._prepare_sample(sample) - - def maybe_no_sync(): - """ - Whenever *samples* contains more than one mini-batch, we - want to accumulate gradients locally and only call - all-reduce in the last backwards pass. - """ - if ( - self.data_parallel_world_size > 1 - and hasattr(self.model, "no_sync") - and i < len(samples) - 1 - # The no_sync context manager results in increased memory - # usage with FSDP, since full-size gradients will be - # accumulated on each GPU. It's typically a better tradeoff - # to do the extra communication with FSDP. - and not self.is_fsdp - ): - return self.model.no_sync() - else: - return contextlib.ExitStack() # dummy contextmanager - - try: - with maybe_no_sync(): - # forward and backward - loss, sample_size_i, logging_output = self.task.train_step( - sample=sample, - model=self.model, - criterion=self.criterion, - optimizer=self.optimizer, - update_num=self.get_num_updates(), - ignore_grad=is_dummy_batch, - **extra_kwargs, - ) - del loss - - logging_outputs.append(logging_output) - sample_size += sample_size_i - - # emptying the CUDA cache after the first step can - # reduce the chance of OOM - if self.cuda and self.get_num_updates() == 0: - torch.cuda.empty_cache() - except RuntimeError as e: - if "out of memory" in str(e): - self._log_oom(e) - if raise_oom: - raise e - logger.warning( - "attempting to recover from OOM in forward/backward pass" - ) - ooms += 1 - self.zero_grad() - if self.cuda: - torch.cuda.empty_cache() - if self.cfg.distributed_training.distributed_world_size == 1: - return None - else: - raise e - - if self.tpu and i < len(samples) - 1: - # tpu-comment: every XLA operation before marking step is - # appended to the IR graph, and processing too many batches - # before marking step can lead to OOM errors. - # To handle gradient accumulation use case, we explicitly - # mark step here for every forward pass without a backward pass - self._xla_markstep_and_send_to_cpu() - - if is_dummy_batch: - if torch.is_tensor(sample_size): - sample_size.zero_() - else: - sample_size *= 0.0 - - if torch.is_tensor(sample_size): - sample_size = sample_size.float() - else: - sample_size = float(sample_size) - - # gather logging outputs from all replicas - if self._sync_stats(): - train_time = self._local_cumulative_training_time() - logging_outputs, ( - sample_size, - ooms, - total_train_time, - ) = self._aggregate_logging_outputs( - logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch - ) - self._cumulative_training_time = ( - total_train_time / self.data_parallel_world_size - ) - - overflow = False - try: - with torch.autograd.profiler.record_function("reduce-grads"): - # reduce gradients across workers - self.optimizer.all_reduce_grads(self.model) - if utils.has_parameters(self.criterion): - self.optimizer.all_reduce_grads(self.criterion) - - with torch.autograd.profiler.record_function("multiply-grads"): - # multiply gradients by (data_parallel_size / sample_size) since - # DDP normalizes by the number of data parallel workers for - # improved fp16 precision. - # Thus we get (sum_of_gradients / sample_size) at the end. - # In case of fp16, this step also undoes loss scaling. - # (Debugging note: Some optimizers perform this scaling on the - # fly, so inspecting model.parameters() or optimizer.params may - # still show the original, unscaled gradients.) - numer = ( - self.data_parallel_world_size - if not self.cfg.optimization.use_bmuf or self._sync_stats() - else 1 - ) - self.optimizer.multiply_grads(numer / (sample_size or 1.0)) - # Note: (sample_size or 1.0) handles the case of a zero gradient, in a - # way that avoids CPU/device transfers in case sample_size is a GPU or - # TPU object. The assumption is that the gradient itself is also 0. - - with torch.autograd.profiler.record_function("clip-grads"): - # clip grads - grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) - - # check that grad norms are consistent across workers - # on tpu check tensor is slow - if not self.tpu: - if ( - not self.cfg.optimization.use_bmuf - and self.cfg.distributed_training.ddp_backend != "slow_mo" - ): - self._check_grad_norms(grad_norm) - if not torch.isfinite(grad_norm).all(): - # in case of AMP, if gradients are Nan/Inf then - # optimizer step is still required - if self.cfg.common.amp: - overflow = True - else: - # check local gradnorm single GPU case, trigger NanDetector - raise FloatingPointError("gradients are Nan/Inf") - - with torch.autograd.profiler.record_function("optimizer"): - # take an optimization step - self.task.optimizer_step( - self.optimizer, model=self.model, update_num=self.get_num_updates() - ) - if self.cfg.common.amp and overflow: - if self._amp_retries == self.cfg.common.amp_batch_retries: - logger.info("AMP: skipping this batch.") - self._amp_retries = 0 - else: - self._amp_retries += 1 - return self.train_step(samples, raise_oom) # recursion to feed in same batch - - except FloatingPointError: - # re-run the forward and backward pass with hooks attached to print - # out where it fails - self.zero_grad() - with NanDetector(self.get_model()): - for _, sample in enumerate(samples): - sample, _ = self._prepare_sample(sample) - self.task.train_step( - sample, - self.model, - self.criterion, - self.optimizer, - self.get_num_updates(), - ignore_grad=False, - **extra_kwargs, - ) - raise - except OverflowError as e: - overflow = True - logger.info( - f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" - ) - grad_norm = torch.tensor(0.0).cuda() - self.zero_grad() - except RuntimeError as e: - if "out of memory" in str(e): - self._log_oom(e) - logger.error("OOM during optimization, irrecoverable") - raise e - - # Some distributed wrappers (e.g., SlowMo) need access to the optimizer - # after the step - if hasattr(self.model, "perform_additional_optimizer_actions"): - if hasattr(self.optimizer, "fp32_params"): - self.model.perform_additional_optimizer_actions( - self.optimizer.optimizer, self.optimizer.fp32_params - ) - else: - self.model.perform_additional_optimizer_actions( - self.optimizer.optimizer - ) - - logging_output = None - if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo": - self.set_num_updates(self.get_num_updates() + 1) - - if self.cfg.ema.store_ema: - # Step EMA forward with new model. - self.ema.step( - self.get_model(), - self.get_num_updates(), - ) - metrics.log_scalar( - "ema_decay", - self.ema.get_decay(), - priority=10000, - round=5, - weight=0, - ) - - if self.tpu: - import torch_xla.core.xla_model as xm - - # mark step on TPUs - self._xla_markstep_and_send_to_cpu() - - # only log stats every log_interval steps - # this causes wps to be misreported when log_interval > 1 - logging_output = {} - if self.get_num_updates() % self.cfg.common.log_interval == 0: - # log memory usage - mem_info = xm.get_memory_info(self.device) - gb_free = mem_info["kb_free"] / 1024 / 1024 - gb_total = mem_info["kb_total"] / 1024 / 1024 - metrics.log_scalar( - "gb_free", gb_free, priority=1500, round=1, weight=0 - ) - metrics.log_scalar( - "gb_total", gb_total, priority=1600, round=1, weight=0 - ) - logging_outputs = self._xla_markstep_and_send_to_cpu( - logging_outputs - ) - logging_output = self._reduce_and_log_stats( - logging_outputs, sample_size, grad_norm - ) - - # log whenever there's an XLA compilation, since these - # slow down training and may indicate opportunities for - # optimization - self._check_xla_compilation() - else: - if self.cuda and self.cuda_env is not None: - # log minimum free memory over the iteration - gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 - torch.cuda.reset_peak_memory_stats() - gb_free = self.cuda_env.total_memory_in_GB - gb_used - metrics.log_scalar( - "gb_free", gb_free, priority=1500, round=1, weight=0 - ) - - # log stats - logging_output = self._reduce_and_log_stats( - logging_outputs, sample_size, grad_norm - ) - - # clear CUDA cache to reduce memory fragmentation - if ( - self.cuda - and self.cfg.common.empty_cache_freq > 0 - and ( - (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) - % self.cfg.common.empty_cache_freq - ) - == 0 - ): - torch.cuda.empty_cache() - - if self.cfg.common.fp16 or self.cfg.common.amp: - metrics.log_scalar( - "loss_scale", - ( - self.optimizer.scaler.loss_scale - if self.cfg.common.fp16 - else self.optimizer.scaler.get_scale() - ), - priority=700, - round=4, - weight=0, - ) - - metrics.log_stop_time("train_wall") - return logging_output - - @metrics.aggregate("valid") - def valid_step(self, sample, raise_oom=False): - """Do forward pass in evaluation mode.""" - if self.tpu: - import torch_xla.core.xla_model as xm - - xm.rendezvous("valid_step") # wait for all workers - - # If EMA is enabled through store_ema=True - # and task.uses_ema is True, pass the EMA model as a keyword - # argument to the task. - extra_kwargs = {} - if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): - extra_kwargs["ema_model"] = self.ema.get_model() - - with torch.no_grad(): - self.model.eval() - self.criterion.eval() - - sample, is_dummy_batch = self._prepare_sample(sample) - - try: - _loss, sample_size, logging_output = self.task.valid_step( - sample, self.model, self.criterion, **extra_kwargs - ) - except RuntimeError as e: - if "out of memory" in str(e): - self._log_oom(e) - if not raise_oom: - logger.warning( - "ran out of memory in validation step, retrying batch" - ) - for p in self.model.parameters(): - if p.grad is not None: - p.grad = None # free some memory - if self.cuda: - torch.cuda.empty_cache() - return self.valid_step(sample, raise_oom=True) - raise e - - logging_outputs = [logging_output] - if is_dummy_batch: - if torch.is_tensor(sample_size): - sample_size.zero_() - else: - sample_size *= 0.0 - - # gather logging outputs from all replicas - if self.data_parallel_world_size > 1: - logging_outputs, (sample_size,) = self._aggregate_logging_outputs( - logging_outputs, - sample_size, - ignore=is_dummy_batch, - ) - - # log validation stats - if self.tpu: - logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) - logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) - - return logging_output - - def zero_grad(self): - self.optimizer.zero_grad() - - def lr_step_begin_epoch(self, epoch): - """Adjust the learning rate at the beginning of the epoch.""" - self.lr_scheduler.step_begin_epoch(epoch) - # prefer updating the LR based on the number of steps - return self.lr_step_update() - - def lr_step(self, epoch, val_loss=None): - """Adjust the learning rate at the end of the epoch.""" - self.lr_scheduler.step(epoch, val_loss) - # prefer updating the LR based on the number of steps - return self.lr_step_update() - - def lr_step_update(self): - """Update the learning rate after each update.""" - new_lr = self.lr_scheduler.step_update(self.get_num_updates()) - if isinstance(new_lr, dict): - for k, v in new_lr.items(): - metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) - new_lr = new_lr.get("default", next(iter(new_lr.values()))) - else: - metrics.log_scalar("lr", new_lr, weight=0, priority=300) - return new_lr - - def get_lr(self): - """Get the current learning rate.""" - return self.optimizer.get_lr() - - def get_model(self): - """Get the (non-wrapped) model instance.""" - return self._model - - def get_criterion(self): - """Get the (non-wrapped) criterion instance.""" - return self._criterion - - def get_meter(self, name): - """[deprecated] Get a specific meter by name.""" - from fairseq import meters - - if "get_meter" not in self._warn_once: - self._warn_once.add("get_meter") - utils.deprecation_warning( - "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." - ) - - train_meters = metrics.get_meters("train") - if train_meters is None: - train_meters = {} - - if name == "train_loss" and "loss" in train_meters: - return train_meters["loss"] - elif name == "train_nll_loss": - # support for legacy train.py, which assumed this meter is - # always initialized - m = train_meters.get("nll_loss", None) - return m or meters.AverageMeter() - elif name == "wall": - # support for legacy train.py, which assumed this meter is - # always initialized - m = metrics.get_meter("default", "wall") - return m or meters.TimeMeter() - elif name == "wps": - m = metrics.get_meter("train", "wps") - return m or meters.TimeMeter() - elif name in {"valid_loss", "valid_nll_loss"}: - # support for legacy train.py, which assumed these meters - # are always initialized - k = name[len("valid_") :] - m = metrics.get_meter("valid", k) - return m or meters.AverageMeter() - elif name == "oom": - return meters.AverageMeter() - elif name in train_meters: - return train_meters[name] - return None - - def get_num_updates(self): - """Get the number of parameters updates.""" - return self._num_updates - - def set_num_updates(self, num_updates): - """Set the number of parameters updates.""" - self._num_updates = num_updates - self.lr_step_update() - if self.quantizer: - self.quantizer.step_update(self._num_updates) - metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) - - def clip_grad_norm(self, clip_norm): - def agg_norm_fn(total_norm): - total_norm = total_norm.cuda().float() ** 2 - total_norm = distributed_utils.all_reduce( - total_norm, group=self.data_parallel_process_group - ) - return total_norm ** 0.5 - - should_agg_norm = ( - self.is_fsdp - and ( - self.data_parallel_process_group is not None - or torch.distributed.is_initialized() - ) - ) - return self.optimizer.clip_grad_norm( - clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None - ) - - def cumulative_training_time(self): - if self._cumulative_training_time is None: - # single GPU - return self._local_cumulative_training_time() - else: - return self._cumulative_training_time - - def _local_cumulative_training_time(self): - """Aggregate training time in seconds.""" - return time.time() - self._start_time + self._previous_training_time - - def _fp_convert_sample(self, sample): - def apply_half(t): - if t.dtype is torch.float32: - return t.to(dtype=torch.half) - return t - - def apply_bfloat16(t): - if t.dtype is torch.float32: - return t.to(dtype=torch.bfloat16) - return t - - if self.cfg.common.fp16: - sample = utils.apply_to_sample(apply_half, sample) - - if self.cfg.common.bf16: - sample = utils.apply_to_sample(apply_bfloat16, sample) - - return sample - - def _prepare_sample(self, sample, is_dummy=False): - if sample == "DUMMY": - raise Exception( - "Trying to use an uninitialized 'dummy' batch. This usually indicates " - "that the total number of batches is smaller than the number of " - "participating GPUs. Try reducing the batch size or using fewer GPUs." - ) - - if sample is None or len(sample) == 0: - assert ( - self._dummy_batch is not None and len(self._dummy_batch) > 0 - ), "Invalid dummy batch: {}".format(self._dummy_batch) - sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) - return sample, True - - # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth - # it makes sense to do the format conversion on the CPU and then transfer - # a smaller buffer to the device. This also saves GPU memory capacity. - - if self.cfg.common.on_cpu_convert_precision: - sample = self._fp_convert_sample(sample) - - if self.cuda: - if self.pipeline_model_parallel: - if 'target' in sample: - sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device) - else: - sample = utils.move_to_cuda(sample) - elif self.tpu and is_dummy: - # the dummy batch may not be on the appropriate device - sample = utils.move_to_cuda(sample, device=self.device) - - if not self.cfg.common.on_cpu_convert_precision: - sample = self._fp_convert_sample(sample) - - if self._dummy_batch == "DUMMY": - self._dummy_batch = sample - - return sample, False - - def _set_seed(self): - # Set seed based on args.seed and the update number so that we get - # reproducible results when resuming from checkpoints - seed = self.cfg.common.seed + self.get_num_updates() - utils.set_torch_seed(seed) - - def _sync_stats(self): - # Return True if it's using multiple GPUs and DDP or multiple GPUs with - # BMUF and it's a bmuf sync with warmup iterations completed before. - if self.data_parallel_world_size == 1: - return False - elif self.cfg.optimization.use_bmuf: - return ( - self.get_num_updates() + 1 - ) % self.cfg.bmuf.global_sync_iter == 0 and ( - self.get_num_updates() + 1 - ) > self.cfg.bmuf.warmup_iterations - else: - return True - - def _log_oom(self, exc): - msg = "OOM: Ran out of memory with exception: {}".format(exc) - logger.warning(msg) - if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): - for device_idx in range(torch.cuda.device_count()): - logger.warning(torch.cuda.memory_summary(device=device_idx)) - sys.stderr.flush() - - def _aggregate_logging_outputs( - self, - logging_outputs: List[Dict[str, Any]], - *extra_stats_to_sum, - ignore=False, - ): - if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): - return self._fast_stat_sync_sum( - logging_outputs, *extra_stats_to_sum, ignore=ignore - ) - else: - return self._all_gather_list_sync( - logging_outputs, *extra_stats_to_sum, ignore=ignore - ) - - def _all_gather_list_sync( - self, - logging_outputs: List[Dict[str, Any]], - *extra_stats_to_sum, - ignore=False, - ): - """ - Sync logging outputs across workers. all_gather_list_sync is - suitable when logging outputs are complex types. - """ - if self.tpu: - raise NotImplementedError - if ignore: - logging_outputs = [] - results = list( - zip( - *distributed_utils.all_gather_list( - [logging_outputs] + list(extra_stats_to_sum), - max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), - group=self.data_parallel_process_group, - ) - ) - ) - logging_outputs, extra_stats_to_sum = results[0], results[1:] - logging_outputs = list(chain.from_iterable(logging_outputs)) - extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] - return logging_outputs, extra_stats_to_sum - - def _fast_stat_sync_sum( - self, - logging_outputs: List[Dict[str, Any]], - *extra_stats_to_sum, - ignore=False, - ): - """ - Sync logging outputs across workers. fast_stat_sync_sum is - faster than all_gather_list_sync, but is only suitable when - logging outputs are scalars and can be summed. Note that - *logging_outputs* cannot contain any nested dicts/lists. - """ - data = {} - for i, stat in enumerate(extra_stats_to_sum): - data["extra_stats_" + str(i)] = stat - if len(logging_outputs) > 0: - log_keys = list(logging_outputs[0].keys()) - for k in log_keys: - if not ignore: - v = sum(log[k] for log in logging_outputs if k in log) - else: - v = logging_outputs[0][k] - v = torch.zeros_like(v) if torch.is_tensor(v) else 0 - data["logging_outputs_" + k] = v - else: - log_keys = None - - data = distributed_utils.all_reduce_dict( - data, device=self.device, group=self.data_parallel_process_group - ) - - extra_stats_to_sum = [ - data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) - ] - if log_keys is not None: - logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] - else: - logging_outputs = [] - return logging_outputs, extra_stats_to_sum - - def _check_grad_norms(self, grad_norm): - """Check that grad norms are consistent across workers.""" - if self._grad_norm_buf is not None: - self._grad_norm_buf.zero_() - self._grad_norm_buf[self.data_parallel_rank] = grad_norm - distributed_utils.all_reduce( - self._grad_norm_buf, group=self.data_parallel_process_group - ) - - def is_consistent(tensor): - max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) - return ( - (torch.isfinite(tensor).all() - and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()) - or - (self.cfg.common.amp and not torch.isfinite(tensor).all()) - # in case of amp non-finite grads are fine - ) - - if not is_consistent(self._grad_norm_buf): - pretty_detail = "\n".join( - "rank {:3d} = {:.8f}".format(r, n) - for r, n in enumerate(self._grad_norm_buf.tolist()) - ) - error_detail = "grad_norm across the workers:\n{}\n".format( - pretty_detail - ) - # use FloatingPointError to trigger NanDetector - raise FloatingPointError( - "Fatal error: gradients are inconsistent between workers. " - "Try --ddp-backend=legacy_ddp. " - "Or are you mixing up different generation of GPUs in training?" - + "\n" - + "-" * 80 - + "\n{}\n".format(error_detail) - + "-" * 80 - ) - - def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): - if grad_norm is not None and ( - not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) - ): - metrics.log_speed("ups", 1.0, priority=100, round=2) - metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) - if self.cfg.optimization.clip_norm > 0: - metrics.log_scalar( - "clip", - torch.where( - grad_norm > self.cfg.optimization.clip_norm, - grad_norm.new_tensor(100), - grad_norm.new_tensor(0), - ), - priority=500, - round=1, - ) - - with metrics.aggregate() as agg: - if logging_outputs is not None: - self.task.reduce_metrics(logging_outputs, self.get_criterion()) - del logging_outputs - - # extra warning for criterions that don't properly log a loss value - if "loss" not in agg: - if "loss" not in self._warn_once: - self._warn_once.add("loss") - logger.warning( - "Criterion.reduce_metrics did not log a 'loss' value, " - "which may break some functionality" - ) - metrics.log_scalar("loss", -1) - - # support legacy interface - if self.tpu: - logging_output = {} - else: - logging_output = agg.get_smoothed_values() - logging_output["sample_size"] = sample_size - for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: - if key_to_delete in logging_output: - del logging_output[key_to_delete] - return logging_output - - def _check_xla_compilation(self): - import torch_xla.debug.metrics as met - - compile_stats = met.metric_data("CompileTime") - if compile_stats is None: - return - num_xla_compiles = compile_stats[0] - if num_xla_compiles > self._num_xla_compiles: - logger.warning( - "XLA compilation detected on device #{}; too many of these can lead " - "to slow training, but we expect a few in the beginning".format( - self.cfg.distributed_training.distributed_rank - ) - ) - self._num_xla_compiles = num_xla_compiles - - def _xla_markstep_and_send_to_cpu(self, data=None): - import torch_xla.core.xla_model as xm - - xm.mark_step() - if data is not None: - from fairseq.utils import xla_device_to_cpu - - return xla_device_to_cpu(data) - - -def _catalog_shared_params(module, memo=None, prefix=""): - if memo is None: - first_call = True - memo = {} - else: - first_call = False - for name, param in module._parameters.items(): - param_prefix = prefix + ("." if prefix else "") + name - if param not in memo: - memo[param] = [] - memo[param].append(param_prefix) - for name, m in module._modules.items(): - if m is None: - continue - submodule_prefix = prefix + ("." if prefix else "") + name - _catalog_shared_params(m, memo, submodule_prefix) - if first_call: - return [x for x in memo.values() if len(x) > 1] - - -def _get_module_by_path(module, path): - path = path.split(".") - for name in path: - module = getattr(module, name) - return module - - -def _set_module_by_path(module, path, value): - path = path.split(".") - for name in path[:-1]: - module = getattr(module, name) - setattr(module, path[-1], value) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest] __EXCLUSIVE__.md b/spaces/stomexserde/gpt4-ui/Examples/Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest] __EXCLUSIVE__.md deleted file mode 100644 index 8abdf8d810fa86f1784d8bd9efaaf6428ff93c22..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest] __EXCLUSIVE__.md +++ /dev/null @@ -1,39 +0,0 @@ -
      -

      How to Download and Install Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest]

      -

      Apex Launcher Classic is a popular and customizable launcher app for Android devices. It helps you create a personalized, fast, and stylish home screen experience with various features and options. Apex Launcher Classic V3.4.2 Pro is the latest version of the app that unlocks more premium features and removes ads. However, this version is not available on the Google Play Store and requires a cracked APK file to install.

      -

      In this article, we will show you how to download and install Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest] on your Android device easily and safely. Follow the steps below to enjoy the full benefits of this amazing launcher app.

      -

      Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest]


      Download File > https://urlgoal.com/2uI65j



      -

      Step 1: Download the APK file

      -

      The first step is to download the APK file of Apex Launcher Classic V3.4.2 Pro Crack Fixed from a reliable source. You can use the link below to download the file from SoundCloud[^4^], where it was uploaded by Tranindestke.

      -

      Download Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest]

      -

      Alternatively, you can also search for the file name on other websites or use a torrent client to download it.

      -

      Step 2: Enable unknown sources

      -

      The next step is to enable unknown sources on your Android device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.

      -

      Note: This may vary depending on your device model and Android version.

      -

      Step 3: Install the APK file

      -

      The final step is to install the APK file of Apex Launcher Classic V3.4.2 Pro Crack Fixed on your device. To do this, locate the downloaded file in your file manager or downloads folder and tap on it. You may see a warning message asking you to confirm the installation. Tap on Install and wait for the process to finish.

      -

      -

      Step 4: Enjoy Apex Launcher Classic V3.4.2 Pro Crack Fixed

      -

      Congratulations! You have successfully installed Apex Launcher Classic V3.4.2 Pro Crack Fixed APK [Latest] on your Android device. Now you can launch the app from your home screen or app drawer and enjoy its features and options.

      -

      Some of the features of Apex Launcher Classic V3.4.2 Pro are:

      -
        -
      • Customizable home screen and app drawer grid size
      • -
      • Scrollable dock with up to 10 icons per page and up to 5 pages
      • -
      • Infinite & elastic scrolling (home screen, drawer and dock)
      • -
      • Fancy transition effects (tablet, cube, etc.)
      • -
      • Hide elements as you want (status bar, dock, etc.)
      • -
      • Custom icons and labels for shortcuts and folders
      • -
      • Choose different folder preview styles and background
      • -
      • Drawer apps sorting (title, install date, mostly used)
      • -
      • Hide apps from the drawer
      • -
      • Lock your desktop to prevent accidental changes
      • -
      • Convenient home screen gestures (pinch, swipe up/down, double tap)
      • -
      • Advanced theme engine (icon packs, skins, etc.)
      • -
      • Backup/restore settings and data
      • -
      • Optimized for both phones and tablets
      • -
      • Lots of other customization options!
      • -
      - -

      We hope this article was helpful for you. If you have any questions or suggestions, please share them with us in the comments below.

      e93f5a0c3f
      -
      -
      \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/tests/common_utils/temp_utils.py b/spaces/studiobrn/SplitTrack/tests/common_utils/temp_utils.py deleted file mode 100644 index d1e0367e979c8b9fea65472c373916d956ad5aaa..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/tests/common_utils/temp_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import os -import tempfile - - -class TempDirMixin: - """Mixin to provide easy access to temp dir. - """ - - temp_dir_ = None - - @classmethod - def get_base_temp_dir(cls): - # If AUDIOCRAFT_TEST_DIR is set, use it instead of temporary directory. - # this is handy for debugging. - key = "AUDIOCRAFT_TEST_DIR" - if key in os.environ: - return os.environ[key] - if cls.temp_dir_ is None: - cls.temp_dir_ = tempfile.TemporaryDirectory() - return cls.temp_dir_.name - - @classmethod - def tearDownClass(cls): - if cls.temp_dir_ is not None: - try: - cls.temp_dir_.cleanup() - cls.temp_dir_ = None - except PermissionError: - # On Windows there is a know issue with `shutil.rmtree`, - # which fails intermittenly. - # https://github.com/python/cpython/issues/74168 - # Following the above thread, we ignore it. - pass - super().tearDownClass() - - @property - def id(self): - return self.__class__.__name__ - - def get_temp_path(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(os.path.dirname(path), exist_ok=True) - return path - - def get_temp_dir(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(path, exist_ok=True) - return path diff --git a/spaces/supertori/files/stable-diffusion-webui/html/footer.html b/spaces/supertori/files/stable-diffusion-webui/html/footer.html deleted file mode 100644 index f26e32e9304aedb5a55b0b46a913396f16375f7a..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/html/footer.html +++ /dev/null @@ -1,13 +0,0 @@ -
      - API -  •  - Github -  •  - Gradio -  •  - Reload UI -
      -
      -
      -{versions} -
      diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Assamese Font Ramdhenu.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Assamese Font Ramdhenu.md deleted file mode 100644 index 240486fd06b119dc88eaa00768af8b2e760103f5..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Assamese Font Ramdhenu.md +++ /dev/null @@ -1,87 +0,0 @@ -
      -

      Assamese Font Ramdhenu: A Guide for Typing and Writing in Assamese

      -

      Assamese is a language spoken by about 15 million people in the northeastern state of Assam in India. It belongs to the Indo-Aryan branch of the Indo-European language family and has its own script, which is derived from the Brahmi script. Assamese script has 41 consonants and 11 vowels, and it is written from left to right.

      -

      Assamese Font Ramdhenu


      Download ☆☆☆☆☆ https://cinurl.com/2uEX8U



      -

      Typing and writing in Assamese can be challenging for many people, especially those who are not familiar with the script or the keyboard layout. Fortunately, there are some tools and resources that can help you type and write in Assamese with ease and accuracy. One of them is Assamese Font Ramdhenu, a software that provides a variety of fonts, keyboard layouts, and spell checkers for Assamese.

      -

      What is Assamese Font Ramdhenu?

      -

      Assamese Font Ramdhenu is a software that was developed by Utpal Baruah, an Assamese linguist and software engineer. It is designed to help users type and write in Assamese using different fonts, keyboard layouts, and spell checkers. It also supports Unicode, which means that it can display Assamese characters on any device or platform.

      -

      Assamese Font Ramdhenu has several features that make it a useful tool for Assamese typing and writing. Some of them are:

      -
        -
      • It offers a variety of fonts for Assamese, such as Geetanjali, Likhan, MuktiNarrow, Rupali, SolaimanLipi, UniBangla, etc. These fonts are compatible with Unicode 4.1.0 and can be used for various purposes, such as printing, publishing, web designing, etc.
      • -
      • It provides a spell checker for Assamese that can detect and correct spelling errors in Assamese texts. The spell checker is based on the Geetanjali font and is the first and only professional automatic spell checker for Assamese language.
      • -
      • It allows users to type in Assamese using different keyboard layouts, such as phonetic, inscript, typewriter, etc. The phonetic keyboard layout is especially convenient for those who are not familiar with the Assamese script or keyboard layout. It enables users to type in Assamese by using English letters that sound similar to the Assamese letters.
      • -
      • It enables users to convert texts from one font to another or from one keyboard layout to another. This can be useful for editing or formatting texts in different fonts or layouts.
      • -
      • It supports Unicode conversion, which means that it can convert texts from non-Unicode fonts to Unicode fonts or vice versa. This can help users to display or share texts in Assamese on any device or platform.
      • -
      -

      How to install Assamese Font Ramdhenu?

      -

      Installing Assamese Font Ramdhenu is easy and simple. You just need to follow these steps:

      -
        -
      1. Download the software from this link: https://drive.google.com/file/d/0BwC-XiOJIYo1QVJtTWQ3Z2dUb1k/view
      2. -
      3. Extract the zip file and run the setup.exe file.
      4. -
      5. Follow the instructions on the screen and complete the installation process.
      6. -
      7. Restart your computer and start using Assamese Font Ramdhenu.
      8. -
      -

      How to use Assamese Font Ramdhenu?

      -

      Using Assamese Font Ramdhenu is also easy and simple. You just need to follow these steps:

      -

      -
        -
      1. Open any text editor or word processor on your computer.
      2. -
      3. Select the font and keyboard layout that you want to use from the menu bar of Assamese Font Ramdhenu.
      4. -
      5. Type or paste your text in Assamese using the selected font and keyboard layout.
      6. -
      7. If you want to check or correct your spelling, click on the spell checker icon on the menu bar of Assamese Font Ramdhenu.
      8. -
      9. If you want to convert your text from one font to another or from one keyboard layout to another, click on the converter icon on the menu bar of Assamese Font Ramdhenu.
      10. -
      11. If you want to convert your text from non-Unicode font to Unicode font or vice versa, click on the Unicode converter icon on the menu bar of Assamese Font Ramdhenu.
      12. -
      13. Save or print your text as you wish.
      14. -
      -

      Conclusion

      -

      Assamese Font Ramdhenu is a software that can help you type and write in Assamese with ease and accuracy. It offers a variety of fonts, keyboard layouts, and spell checkers for Assamese. It also supports Unicode conversion, which can help you display or share texts in Assamese on any device or platform. If you are looking for a tool that can help you type and write in Assamese, you should try Assamese Font Ramdhenu.

      -

      Why use Assamese Font Ramdhenu?

      -

      There are many reasons why you should use Assamese Font Ramdhenu for typing and writing in Assamese. Some of them are:

      -
        -
      • It is a reliable and user-friendly software that can help you type and write in Assamese without any hassle or error.
      • -
      • It is a comprehensive and versatile software that can meet your various needs and preferences for Assamese fonts, keyboard layouts, and spell checkers.
      • -
      • It is a compatible and flexible software that can work with any device or platform that supports Unicode.
      • -
      • It is a free and accessible software that can be downloaded and installed easily from the internet.
      • -
      • It is a popular and trusted software that has been used by many Assamese users, such as DTP operators, journalists, writers, students, teachers, etc.
      • -
      -

      What are the benefits of Assamese Font Ramdhenu?

      -

      Using Assamese Font Ramdhenu can bring you many benefits for typing and writing in Assamese. Some of them are:

      -
        -
      • It can improve your typing speed and accuracy by providing you with a phonetic keyboard layout that matches the sounds of the Assamese letters.
      • -
      • It can enhance your writing quality and style by providing you with a spell checker that can detect and correct spelling errors in Assamese texts.
      • -
      • It can increase your writing diversity and creativity by providing you with a variety of fonts that can suit different purposes, such as printing, publishing, web designing, etc.
      • -
      • It can expand your writing reach and impact by providing you with a Unicode converter that can display or share texts in Assamese on any device or platform.
      • -
      • It can save your time and money by providing you with a free and easy-to-use software that can be downloaded and installed quickly from the internet.
      • -
      -

      How to learn more about Assamese Font Ramdhenu?

      -

      If you want to learn more about Assamese Font Ramdhenu, you can visit the following sources:

      -
        -
      • The official website of Assamese Font Ramdhenu: https://assamyellowpage.assaminterview.com/ayp/download-ramdhenu-plus-assamese-typing-software-spell-checker
      • -
      • The YouTube video tutorial of Assamese Font Ramdhenu: https://www.youtube.com/watch?v=Wh8dU55EXqM
      • -
      • The South Asian Language Resource Center website on Bengali and Assamese Fonts: https://salrc.uchicago.edu/resources/fonts/available/bengali/
      • -
      • The branah website on Assamese Keyboard: https://www.branah.com/assamese
      • -
      -

      You can also contact the developer of Assamese Font Ramdhenu, Utpal Baruah, at utpalbaruah@gmail.com for any queries or feedback.

      -

      What are the challenges of Assamese Font Ramdhenu?

      -

      Assamese Font Ramdhenu is a software that can solve many problems for typing and writing in Assamese. However, it also has some challenges that users should be aware of. Some of them are:

      -
        -
      • It requires a compatible operating system and device to run properly. It may not work well on some older or newer versions of Windows or Mac OS, or on some mobile devices or tablets.
      • -
      • It may not support some rare or special characters or symbols in Assamese script. It may also have some errors or bugs in some fonts or keyboard layouts.
      • -
      • It may not be compatible with some other software or applications that use Assamese fonts or keyboard layouts. It may cause some conflicts or issues when running simultaneously with them.
      • -
      • It may not be updated regularly or frequently by the developer. It may not have the latest features or improvements that users may expect or need.
      • -
      • It may not have enough documentation or support for users who have questions or problems with the software. It may not have a user manual, a FAQ section, a forum, or a customer service.
      • -
      -

      How to overcome the challenges of Assamese Font Ramdhenu?

      -

      Despite the challenges of Assamese Font Ramdhenu, users can still enjoy its benefits and advantages for typing and writing in Assamese. Here are some tips and suggestions on how to overcome the challenges of Assamese Font Ramdhenu:

      -
        -
      • Check the compatibility and requirements of the software before downloading and installing it. Make sure your operating system and device can support the software and its features.
      • -
      • Use the Unicode fonts and keyboard layouts whenever possible. They are more universal and standard than the non-Unicode ones. They can also display and share texts in Assamese on any device or platform.
      • -
      • Avoid using other software or applications that use Assamese fonts or keyboard layouts when using Assamese Font Ramdhenu. Close them or disable them if they cause any conflicts or issues with the software.
      • -
      • Check for updates and upgrades of the software regularly. Download and install them if they are available and necessary. They can improve the performance and functionality of the software.
      • -
      • Look for documentation and support for the software online. Visit the official website, the YouTube video tutorial, the South Asian Language Resource Center website, or the branah website for more information and guidance on how to use the software.
      • -
      -

      Conclusion

      -

      Assamese Font Ramdhenu is a software that can help you type and write in Assamese with ease and accuracy. It offers a variety of fonts, keyboard layouts, and spell checkers for Assamese. It also supports Unicode conversion, which can help you display or share texts in Assamese on any device or platform. However, it also has some challenges that users should be aware of, such as compatibility, support, and update issues. By following some tips and suggestions, you can overcome these challenges and enjoy the benefits of Assamese Font Ramdhenu.

      -

      Assamese Font Ramdhenu is a software that can help you type and write in Assamese with ease and accuracy. It offers a variety of fonts, keyboard layouts, and spell checkers for Assamese. It also supports Unicode conversion, which can help you display or share texts in Assamese on any device or platform. However, it also has some challenges that users should be aware of, such as compatibility, support, and update issues. By following some tips and suggestions, you can overcome these challenges and enjoy the benefits of Assamese Font Ramdhenu.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dpwh Blue Book Volume 2 Pdf Free Download _BEST_.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dpwh Blue Book Volume 2 Pdf Free Download _BEST_.md deleted file mode 100644 index aa90f315c07c3a3094d0e033878575fa4861dca1..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dpwh Blue Book Volume 2 Pdf Free Download _BEST_.md +++ /dev/null @@ -1,6 +0,0 @@ -

      dpwh blue book volume 2 pdf free download


      Download Zip ✪✪✪ https://cinurl.com/2uEYTq



      -
      -How much is the updated book Volume 2 at Supply Office of DPWH? Isn't this available online like a free pdf download? Reply ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Image-Line Deckadance DVS V2.09 WiN MAC OSX-UNION NEW.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Image-Line Deckadance DVS V2.09 WiN MAC OSX-UNION NEW.md deleted file mode 100644 index 4cab7de6720a6227147e3694e1135fa50f465b71..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Image-Line Deckadance DVS V2.09 WiN MAC OSX-UNION NEW.md +++ /dev/null @@ -1,27 +0,0 @@ -
      -

      Deckadance 2: A Powerful DJ Software for Windows and Mac

      -

      If you are looking for a DJ software that can handle remixing, sampling, effects, and VST plugins, you might want to check out Deckadance 2. This software is developed by Image-Line, the same company behind FL Studio, and it offers a lot of features and flexibility for both beginners and professionals.

      -

      Deckadance 2 is available in two versions: Deckadance 2 ($79 USD) and Deckadance 2 DVS ($149 USD). The DVS version includes support for digital vinyl systems, which allow you to control the software with turntables and timecode vinyls. Both versions support up to 4 decks, smart knobs, GrossBeat effects, isolator effects, sampler, smart panels, VST host and client, MIDI controllers, iTunes library, playlists, and automated mixing options.

      -

      Image-Line Deckadance DVS V2.09 WiN MAC OSX-UNION


      Download Filehttps://cinurl.com/2uEXTI



      -

      One of the main advantages of Deckadance 2 is that it can work as a stand-alone application or as a VST plugin in your favorite host such as FL Studio, Cubase, or Live. This means you can integrate Deckadance 2 with your existing music production workflow and use it as a powerful tool for live performances.

      -

      Another feature that sets Deckadance 2 apart from other DJ software is the Effector VST plugin that comes with it. This plugin has 12 performance-oriented effects that you can apply to your tracks or to the master output. These effects include distortion, lo-fi, flanger, phaser, filter, delay, reverb, stereo panning, binaural effect, gating, granulizer, vocal formant, and ring modulation. You can also automate these effects with envelopes and MIDI controllers.

      -

      If you want to try out Deckadance 2 for yourself, you can download the demo version from the Image-Line website[^1^]. You can also browse the online manual[^1^] to see how it works and watch some tutorials on YouTube. Deckadance 2 is compatible with Windows XP/Vista/7/8/10 and Mac OS X 10.5 or later.

      -

      Deckadance 2 is a versatile and powerful DJ software that can help you create amazing mixes and remixes with ease. Whether you are a beginner or a professional DJ, you will find something to love about Deckadance 2. So don't hesitate and get your copy today!

      - -

      How to Use Deckadance 2

      -

      If you are new to Deckadance 2, you might be wondering how to use it and what are the main features you should know. Here is a brief overview of how to get started with Deckadance 2 and what you can do with it.

      -

      -

      Setting Up Deckadance 2

      -

      The first thing you need to do is to install Deckadance 2 on your computer. You can download the installer from the Image-Line website and follow the instructions. Once you have installed Deckadance 2, you can launch it from your desktop or from your host application if you are using it as a VST plugin.

      -

      The next step is to configure your audio and MIDI settings. You can access the settings menu by clicking on the gear icon on the top right corner of the Deckadance 2 interface. Here you can select your audio device, sample rate, buffer size, latency, and output channels. You can also select your MIDI device, MIDI channel, and MIDI mapping. Deckadance 2 supports a wide range of pre-mapped MIDI controllers from various brands, but you can also create your own custom mapping if you prefer.

      -

      Once you have set up your audio and MIDI settings, you can load some tracks into Deckadance 2 and start mixing. You can drag and drop tracks from your computer or from your iTunes library into the decks. You can also use the browser panel on the left side of the interface to browse your folders and playlists. You can sort your tracks by name, artist, genre, BPM, key, or rating. You can also search for tracks by typing keywords in the search box.

      -

      Mixing with Deckadance 2

      -

      When you have loaded some tracks into Deckadance 2, you can start mixing them using the mixer panel on the bottom of the interface. Here you can adjust the volume, gain, EQ, and crossfader of each deck. You can also use the cue buttons to preview the tracks in your headphones before playing them on the master output. You can also use the sync buttons to automatically match the tempo and phase of the tracks.

      -

      One of the most important features of Deckadance 2 is the smart knob panel on the top of the interface. Here you can assign a single knob to control multiple parameters of effects and mixer functions. You can also create your own envelopes to modulate these parameters over time. For example, you can assign a smart knob to control the filter cutoff and resonance of an isolator effect and create an envelope that sweeps these parameters from low to high and back. You can then use this smart knob to create dynamic transitions between tracks.

      -

      Another feature that makes Deckadance 2 stand out from other DJ software is the GrossBeat panel on the right side of the interface. Here you can apply glitch, stutter, and scratch effects to your tracks using 8 user-defined slots. You can also edit these effects using an integrated editor that allows you to draw curves and shapes on a grid. For example, you can create a GrossBeat effect that reverses a part of your track and repeats it with different pitches. You can then trigger this effect with a button or a MIDI controller.

      -

      Remixing with Deckadance 2

      -

      If you want to take your mixing skills to the next level, you can use Deckadance 2 as a remixing tool. Deckadance 2 allows you to use up to 3 insert effects per track and apply them independently to high, mid, or low frequencies. You can choose from 10 different effects such as delay, reverb, flanger, phaser, distortion, lo-fi, filter, ring modulator, vocal formant, and granulizer. You can also automate these effects with envelopes and MIDI controllers.

      -

      You can also use Deckadance 2 as a sampler and create your own loops and samples from your tracks or external sources. You can use the sampler panel on the left side of the interface to record samples with different modes such as one shot, trigger, retrigger, or loop. You can also adjust the pitch, volume, pan, filter, and envelope of each sample. You can then trigger these samples with buttons or MIDI controllers.

      -

      Another way to remix your tracks with Deckadance 2 is to use it as a VST host or client. This means you can load other VST plugins into Deckadance 2 and use

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Nhl13RELOADEDrarpasswordrar.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Nhl13RELOADEDrarpasswordrar.md deleted file mode 100644 index 426e203e275243f48ba20058e52d330e7cc7e677..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Nhl13RELOADEDrarpasswordrar.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Nhl13RELOADEDrarpasswordrar


      Download >>>>> https://cinurl.com/2uEYaS



      - -nappdysmifourth/nhl13reloadedrarpasswordrar. By nappdysmifourth. Nhl13RELOADEDrarpasswordrar. Container. OverviewTags. Sort by. Most recent. No tags available. Most recently viewed. No tags available. Most recently viewed. Date updated. Date updated. Title. Title. Title. Ranking. Popularity. Popular. Newest. Most recent. No. Tags. Sort by. Most recent. No tags available. Most recently viewed. Date updated. Date updated. Title. Title. Title. Ranking. Popularity. Popular. New. No. Tags. Sort by. Most recent. No tags available. Most recently viewed. Date updated. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Xforce Keygen For Revit 2009 ((NEW)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Xforce Keygen For Revit 2009 ((NEW)).md deleted file mode 100644 index 0c67cf50cf487f2a77e9e69e5ed7077779ce0966..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Xforce Keygen For Revit 2009 ((NEW)).md +++ /dev/null @@ -1,6 +0,0 @@ -

      Xforce Keygen For Revit 2009


      DOWNLOAD ———>>> https://cinurl.com/2uEYJi



      - -E9. PLEASURE (A and B) 22. 164 item. E9. WEATHER & CLIMATE 17. 55 item. E10. FURNITURE 11. 183 item. E11. INSTRUCTION 15. 56 item. E12. BUSINESS 15. 54 item. E13. SMARTPHONES 18. 61 item. E14. MODERN WARFARE 9. 45 item. E15. INSTRUMENTATION 2. 71 item. E16. TECHNOLOGY (A and B) 2. 33 item. E17. ANALYTICS (A and B) 21. 141 item. E18. ENTOMOLOGY 25. 7 item. E19. SUSTAINABILITY 24. 58 item. E20. THEORIES & PROPOSITIONS (Groups 1, 2, 3, 4). 47 item. E21. THEORIES & PROPOSITIONS (Groups 1 and 2). 76 item. E22. THEORIES & PROPOSITIONS (Groups 3 and 4). 44 item. E23. THEORIES & PROPOSITIONS (Groups 1, 2, 3 and 4). 39 item. E24. THEORIES & PROPOSITIONS (Group 1). 60 item. E25. THEORIES & PROPOSITIONS (Group 2). 49 item. E26. THEORIES & PROPOSITIONS (Group 3). 57 item. E27. THEORIES & PROPOSITIONS (Group 4). 42 item. E28. THEORIES & PROPOSITIONS (Group 1, 2, 3 and 4). 65 item. E29. THEORIES & PROPOSITIONS (Groups 1, 2, 3, 4 and 5). 56 item. E30. THEORIES & PROPOSITIONS (Group 1, 2, 3, 4, 5, 6 and 7). 75 item. E31. THEORIES & PROPOSITIONS (Group 1, 2, 3, 4, 5, 6, 7, 8 and 9). 68 item. E32. THEORIES & PROPOSITIONS (Group 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 and 11). 75 item. E33. THEORIES & PROPOSITIONS (Group 1, 2, 3, 4, 4fefd39f24
      -
      -
      -

      diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/__init__.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/__init__.py deleted file mode 100644 index 210a2989138380559f23045b568d0fbbeb918c03..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# flake8: noqa -from .arraymisc import * -from .fileio import * -from .image import * -from .utils import * -from .version import * -from .video import * -from .visualization import * - -# The following modules are not imported to this level, so mmcv may be used -# without PyTorch. -# - runner -# - parallel -# - op diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/ops/iou3d.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/ops/iou3d.py deleted file mode 100644 index 6fc71979190323f44c09f8b7e1761cf49cd2d76b..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/ops/iou3d.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward', - 'iou3d_nms_normal_forward' -]) - - -def boxes_iou_bev(boxes_a, boxes_b): - """Calculate boxes IoU in the Bird's Eye View. - - Args: - boxes_a (torch.Tensor): Input boxes a with shape (M, 5). - boxes_b (torch.Tensor): Input boxes b with shape (N, 5). - - Returns: - ans_iou (torch.Tensor): IoU result with shape (M, N). - """ - ans_iou = boxes_a.new_zeros( - torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) - - ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), - boxes_b.contiguous(), ans_iou) - - return ans_iou - - -def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None): - """NMS function GPU implementation (for BEV boxes). The overlap of two - boxes for IoU calculation is defined as the exact overlapping area of the - two boxes. In this function, one can also set ``pre_max_size`` and - ``post_max_size``. - - Args: - boxes (torch.Tensor): Input boxes with the shape of [N, 5] - ([x1, y1, x2, y2, ry]). - scores (torch.Tensor): Scores of boxes with the shape of [N]. - thresh (float): Overlap threshold of NMS. - pre_max_size (int, optional): Max size of boxes before NMS. - Default: None. - post_max_size (int, optional): Max size of boxes after NMS. - Default: None. - - Returns: - torch.Tensor: Indexes after NMS. - """ - assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' - order = scores.sort(0, descending=True)[1] - - if pre_max_size is not None: - order = order[:pre_max_size] - boxes = boxes[order].contiguous() - - keep = torch.zeros(boxes.size(0), dtype=torch.long) - num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh) - keep = order[keep[:num_out].cuda(boxes.device)].contiguous() - if post_max_size is not None: - keep = keep[:post_max_size] - return keep - - -def nms_normal_bev(boxes, scores, thresh): - """Normal NMS function GPU implementation (for BEV boxes). The overlap of - two boxes for IoU calculation is defined as the exact overlapping area of - the two boxes WITH their yaw angle set to 0. - - Args: - boxes (torch.Tensor): Input boxes with shape (N, 5). - scores (torch.Tensor): Scores of predicted boxes with shape (N). - thresh (float): Overlap threshold of NMS. - - Returns: - torch.Tensor: Remaining indices with scores in descending order. - """ - assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' - order = scores.sort(0, descending=True)[1] - - boxes = boxes[order].contiguous() - - keep = torch.zeros(boxes.size(0), dtype=torch.long) - num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh) - return order[keep[:num_out].cuda(boxes.device)].contiguous() diff --git a/spaces/syedusama5556/Real-ESRGAN-Demo/app.py b/spaces/syedusama5556/Real-ESRGAN-Demo/app.py deleted file mode 100644 index 7cbd0a8bfd6981535c0d9bf90d1a837b2f3b0bbb..0000000000000000000000000000000000000000 --- a/spaces/syedusama5556/Real-ESRGAN-Demo/app.py +++ /dev/null @@ -1,237 +0,0 @@ -import gradio as gr -import cv2 -import numpy -import os -import random -from basicsr.archs.rrdbnet_arch import RRDBNet -from basicsr.utils.download_util import load_file_from_url - -from realesrgan import RealESRGANer -from realesrgan.archs.srvgg_arch import SRVGGNetCompact -from fastapi import FastAPI - -# base path -CUSTOM_PATH = "/gradio" - -app = FastAPI() - -last_file = None -img_mode = "RGBA" - -@app.get("/") -def read_main(): - return {"message": "This is your main app"} - - -def realesrgan(img, model_name, denoise_strength, face_enhance, outscale): - """Real-ESRGAN function to restore (and upscale) images. - """ - if not img: - return - - # Define model parameters - if model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] - elif model_name == 'RealESRNet_x4plus': # x4 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth'] - elif model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth'] - elif model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - netscale = 2 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth'] - elif model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') - netscale = 4 - file_url = [ - 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth', - 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth' - ] - - # Determine model paths - model_path = os.path.join('weights', model_name + '.pth') - if not os.path.isfile(model_path): - ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - for url in file_url: - # model_path will be updated - model_path = load_file_from_url( - url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) - - # Use dni to control the denoise strength - dni_weight = None - if model_name == 'realesr-general-x4v3' and denoise_strength != 1: - wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3') - model_path = [model_path, wdn_model_path] - dni_weight = [denoise_strength, 1 - denoise_strength] - - # Restorer Class - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - dni_weight=dni_weight, - model=model, - tile=0, - tile_pad=10, - pre_pad=10, - half=False, - gpu_id=None - ) - - # Use GFPGAN for face enhancement - if face_enhance: - from gfpgan import GFPGANer - face_enhancer = GFPGANer( - model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', - upscale=outscale, - arch='clean', - channel_multiplier=2, - bg_upsampler=upsampler) - - # Convert the input PIL image to cv2 image, so that it can be processed by realesrgan - cv_img = numpy.array(img) - img = cv2.cvtColor(cv_img, cv2.COLOR_RGBA2BGRA) - - # Apply restoration - try: - if face_enhance: - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - else: - output, _ = upsampler.enhance(img, outscale=outscale) - except RuntimeError as error: - print('Error', error) - print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') - else: - # Save restored image and return it to the output Image component - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - else: - extension = 'jpg' - - out_filename = f"output_{rnd_string(8)}.{extension}" - cv2.imwrite(out_filename, output) - global last_file - last_file = out_filename - return out_filename - - -def rnd_string(x): - """Returns a string of 'x' random characters - """ - characters = "abcdefghijklmnopqrstuvwxyz_0123456789" - result = "".join((random.choice(characters)) for i in range(x)) - return result - - -def reset(): - """Resets the Image components of the Gradio interface and deletes - the last processed image - """ - global last_file - if last_file: - print(f"Deleting {last_file} ...") - os.remove(last_file) - last_file = None - return gr.update(value=None), gr.update(value=None) - - -def has_transparency(img): - """This function works by first checking to see if a "transparency" property is defined - in the image's info -- if so, we return "True". Then, if the image is using indexed colors - (such as in GIFs), it gets the index of the transparent color in the palette - (img.info.get("transparency", -1)) and checks if it's used anywhere in the canvas - (img.getcolors()). If the image is in RGBA mode, then presumably it has transparency in - it, but it double-checks by getting the minimum and maximum values of every color channel - (img.getextrema()), and checks if the alpha channel's smallest value falls below 255. - https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent - """ - if img.info.get("transparency", None) is not None: - return True - if img.mode == "P": - transparent = img.info.get("transparency", -1) - for _, index in img.getcolors(): - if index == transparent: - return True - elif img.mode == "RGBA": - extrema = img.getextrema() - if extrema[3][0] < 255: - return True - return False - - -def image_properties(img): - """Returns the dimensions (width and height) and color mode of the input image and - also sets the global img_mode variable to be used by the realesrgan function - """ - global img_mode - if img: - if has_transparency(img): - img_mode = "RGBA" - else: - img_mode = "RGB" - properties = f"Width: {img.size[0]}, Height: {img.size[1]} | Color Mode: {img_mode}" - return properties - - -def main(): - # Gradio Interface - with gr.Blocks(title="Real-ESRGAN Gradio Demo", theme="dark") as demo: - - # gr.Markdown( - # """#
      Real-ESRGAN Demo for Image Restoration and Upscaling
      - #
      - - # This Gradio Demo was built as my Final Project for **CS50's Introduction to Programming with Python**. - # Please visit the [Real-ESRGAN GitHub page](https://github.com/xinntao/Real-ESRGAN) for detailed information about the project. - # """ - # ) - - with gr.Accordion("Options/Parameters"): - with gr.Row(): - model_name = gr.Dropdown(label="Real-ESRGAN inference model to be used", - choices=["RealESRGAN_x4plus", "RealESRNet_x4plus", "RealESRGAN_x4plus_anime_6B", - "RealESRGAN_x2plus", "realesr-general-x4v3"], - value="realesr-general-x4v3", show_label=True) - denoise_strength = gr.Slider(label="Denoise Strength (Used only with the realesr-general-x4v3 model)", - minimum=0, maximum=1, step=0.1, value=0.5) - outscale = gr.Slider(label="Image Upscaling Factor", - minimum=1, maximum=10, step=1, value=2, show_label=True) - face_enhance = gr.Checkbox(label="Face Enhancement using GFPGAN (Doesn't work for anime images)", - value=False, show_label=True) - - with gr.Row(): - with gr.Group(): - input_image = gr.Image(label="Source Image", type="pil", image_mode="RGBA") - input_image_properties = gr.Textbox(label="Image Properties", max_lines=1) - output_image = gr.Image(label="Restored Image", image_mode="RGBA") - with gr.Row(): - restore_btn = gr.Button("Restore Image") - reset_btn = gr.Button("Reset") - - # Event listeners: - input_image.change(fn=image_properties, inputs=input_image, outputs=input_image_properties) - restore_btn.click(fn=realesrgan, - inputs=[input_image, model_name, denoise_strength, face_enhance, outscale], - outputs=output_image,api_name="restore") - reset_btn.click(fn=reset, inputs=[], outputs=[output_image, input_image]) - # reset_btn.click(None, inputs=[], outputs=[input_image], _js="() => (null)\n") - # Undocumented method to clear a component's value using Javascript - - gr.Markdown( - """*Please note that support for animated GIFs is not yet implemented. Should an animated GIF is chosen for restoration, - the demo will output only the first frame saved in PNG format (to preserve probable transparency).* - """ - ) - - demo.launch() - app = gr.mount_gradio_app(app, gr, path=CUSTOM_PATH) - - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/CRACK IDM.UltraEdit.v18.00.0.1029.Incl.Keymaker-CORE.md b/spaces/terfces0erbo/CollegeProjectV2/CRACK IDM.UltraEdit.v18.00.0.1029.Incl.Keymaker-CORE.md deleted file mode 100644 index d978d2a222a415aa8c8cb94e1e8978fdb60e9346..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/CRACK IDM.UltraEdit.v18.00.0.1029.Incl.Keymaker-CORE.md +++ /dev/null @@ -1,6 +0,0 @@ -

      CRACK IDM.UltraEdit.v18.00.0.1029.Incl.Keymaker-CORE


      Download Zip ->>->>->> https://bytlly.com/2uGlZo



      - - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/terfces0erbo/CollegeProjectV2/Download Aleo Flash Intro Banner Maker 4.0 Full Crack [UPD].md b/spaces/terfces0erbo/CollegeProjectV2/Download Aleo Flash Intro Banner Maker 4.0 Full Crack [UPD].md deleted file mode 100644 index b4a640eec8bef62c467f826d0e5abb8a63d614f1..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Download Aleo Flash Intro Banner Maker 4.0 Full Crack [UPD].md +++ /dev/null @@ -1,10 +0,0 @@ -

      Download Aleo Flash Intro Banner Maker 4.0 Full Crack


      Download >>> https://bytlly.com/2uGiTt



      - -Download Aleo Flash Intro Banner Maker 4.0 ... WYSIWYG Web Builder Trial to create a website without programming, Editor: rating. Users : user rating (10). Developer: Aleo Software File size: 6 Mb Operating systems: Windows 2000/XP/Vista/Windows 7/Win98. -Description: Aleo Flash Intro Banner Maker 4.0 is a program for creating Flash banners (avatars), animated banners, buttons and animated menus. Features: - creation of animated menus; - creation of Flash banners (animated banners) with animation; - create Flash banners with a convenient... -Aleo Flash Intro Banner Maker (fat) 4.0 ... -Oct 4, 2008 ... -Aleo Software. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/terfces0erbo/CollegeProjectV2/Faceboax Facebook Hack Tool 3.0 Free Download ((FULL))103l.md b/spaces/terfces0erbo/CollegeProjectV2/Faceboax Facebook Hack Tool 3.0 Free Download ((FULL))103l.md deleted file mode 100644 index 6eb0928289776eb3c67b1a429a39b804112201b1..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Faceboax Facebook Hack Tool 3.0 Free Download ((FULL))103l.md +++ /dev/null @@ -1,46 +0,0 @@ -

      Faceboax Facebook Hack Tool 3.0 Free Download103l


      DOWNLOAD ✶✶✶ https://bytlly.com/2uGlt3



      -
      -Dyor is a great developer. If i play 1.0.6 more than once this year, i will certainly buy his game. - -Yes, be sure you will buy his game because this product i actually the 1.0.6 version. But, the 1.0.5 will be released in the next month. - -So, if you have the 1.0.6 version don't worry. In the next month, we will have the 1.0.5 version that will be of better quality. - -How many ideas are in OneBox? - -Many, many ideas. From games to shopping to jobs and everything in between. - -So there will be a lot of content in it. Some people are asking where will the money come from for the content. - -I have good news for you. - -There are millions of avid gamers in the world. If you know how to make money for them, they will love your content. - -Why? Because they play to get free content. They will love to be paid. - -You can imagine if you had the playerbase, in the next 2 years, and you have tens of millions of dollars. This is huge. This is the future of virtual reality. - -Please understand this, your community will decide. Yes, even if you think you can market only the top 100, there will be more players who love to buy content, not to be paid. - -The quality of the content, this is something which will decide the future of OneBox. - -They will buy more if the content is quality and if it is added more than once. - -Why you don't understand this? Because you are not a team. - -A team always understand that the money must come from more than a single product. - -When you will take a look at the biggest companies in the world, they will understand it too. - -Let me illustrate what i mean. Facebook and Google, have a great product, ads. I see a lot of game developers are doing the same. - -Why? because the ads is a huge source of revenue. They can sell you more. If you don't understand this, Google and Facebook will pass you by. - -I know you will love to publish the app, because you can sell it for 25 dollars. - -But, if you want to sell it for 50 dollars, you will make more than they will. - -A lot 4fefd39f24
      -
      -
      -

      diff --git a/spaces/terfces0erbo/CollegeProjectV2/Hellskitchenpcgamefullversion !EXCLUSIVE!.md b/spaces/terfces0erbo/CollegeProjectV2/Hellskitchenpcgamefullversion !EXCLUSIVE!.md deleted file mode 100644 index f2af659904c72d9b4de46f8fc2cb46cfd8350392..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Hellskitchenpcgamefullversion !EXCLUSIVE!.md +++ /dev/null @@ -1,20 +0,0 @@ -

      hellskitchenpcgamefullversion


      Download Filehttps://bytlly.com/2uGkB1



      - -5th season of the series from a culinary perspective, and it was also released as an Xbox Live Arcade game in the UK, Australia, and New Zealand. It was also released as part of the Kitchen Blitz 2005 promotion in Japan on the PlayStation 2, and was released in PAL regions in 2007 as part of the Game Boy Advance edition of the Ratchet & Clank series. - -Gameplay - -A gameplay demo of Hell's Kitchen was shown at the Sony E3 2006 press conference. It features Gordon Ramsay standing in front of a show kitchen and cooking tasks for the players. When players are ready to start, they are invited to sit down at the kitchen island and take their seats, and they are given a point card with the start value of 100 points. - -The gameplay of Hell's Kitchen is based on the format of a traditional game of cards. In each round, the players must complete a task in order to be allowed to play their next card. At the start of each round, each player chooses a card from their hand which must be completed in order to play their next card. After all players have chosen their card, they must compete against each other in order to successfully complete their task. The players who are the quickest to finish their task will receive more points, and the player who is the last to complete their task will be eliminated from the competition and will lose their card from their hand. - -While the gameplay is similar to traditional card games, the cards in Hell's Kitchen are food items rather than cards used in other card games. When a card is played, Gordon Ramsay calls out, "Cards, please!" and the selected card is shown on a table in the show kitchen. Each card has a number, a challenge, and a time limit. The challenges can be anything from a simple cooking task to a complicated task that must be finished in a set time, and the time limit can range from five minutes to an hour. - -The game has five different game modes. These include a quickplay mode, challenge mode, practice mode, and both a tournament and a championship mode. - -Quickplay mode - -In quickplay mode, players are given three cards and they are only allowed to play one card at a time. This mode is intended to be a user-friendly mode and is designed to get players used to the gameplay and the game rules. There are also modes available called "Super Chef" and "Slam Your Card!" These two modes allow the player to edit the cards in 4fefd39f24
      -
      -
      -

      diff --git a/spaces/tialenAdioni/chat-gpt-api/Dogz-5-Free-Download-Full-Version.md b/spaces/tialenAdioni/chat-gpt-api/Dogz-5-Free-Download-Full-Version.md deleted file mode 100644 index c3b4edb343551883ae28a83b74ff2ceb950cc10b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/Dogz-5-Free-Download-Full-Version.md +++ /dev/null @@ -1,108 +0,0 @@ -## Dogz 5 Free Download Full Version - - - - - - ![Dogz 5 Free Download Full Version](https://images6.fanpop.com/image/photos/35300000/Petz-5-dogz-image-petz-5-35306372-660-908.jpg) - - - - - -**Download File … [https://conttooperting.blogspot.com/?l=2tzQEH](https://conttooperting.blogspot.com/?l=2tzQEH)** - - - - - - - - - - - - - -# Dogz 5: How to Download and Play the Classic Pet Simulation Game - - - -If you are a fan of pet simulation games, you might have heard of Dogz 5, a game developed by Studio Mythos and published by Ubisoft in 2002. Dogz 5 is the fifth installment in the Petz series, which allows you to adopt, breed, and care for virtual dogs of various breeds and personalities. You can also customize your own house, play mini-games, and interact with other Petz owners online. - - - -Dogz 5 is a fun and nostalgic game that can run on modern Windows systems with some tweaks. In this article, we will show you how to download Dogz 5 for free and install it on your PC. - - - -## How to Download Dogz 5 for Free - - - -There are several websites that offer Dogz 5 for free download, but not all of them are safe or reliable. Some may contain viruses, malware, or unwanted software that can harm your computer. To avoid these risks, we recommend downloading Dogz 5 from one of these trusted sources: - - - -- [Old Games Download](https://oldgamesdownload.com/dogz-5/): This website provides a setup file and a preinstalled version of Dogz 5 that you can download as zip files. The setup file will guide you through the installation process, while the preinstalled version can be extracted and played directly. The website also has a manual and a readme file for the game.[^1^] - -- [Internet Archive](https://archive.org/details/Dogz5WinSetupEn): This website hosts a digital library of various media, including old games. You can download Dogz 5 as a torrent file or a zip file from this website. The zip file contains the setup file for the game.[^2^] - - - -After downloading Dogz 5 from one of these websites, you will need to unzip the file using a software such as WinRAR or 7-Zip. Then, you can proceed to install or play the game. - - - -## How to Install and Play Dogz 5 on Windows - - - -Depending on which version of Dogz 5 you downloaded, you may need to install it or play it directly. Here are the steps for both options: - - - -### Installing Dogz 5 - - - -1. Open the extracted folder and then open the "Game Files" folder. - -2. Double click on the file called "setup.exe" and the game setup should start. - -3. Go through the setup steps. When choosing the folder to install your game in, you may get an error telling you that the path name may not contain any special characters. In this case, you should change the installation path to something else. For example to: C:\\Program Files\\Ubi Soft\\Studio Mythos\\Petz 5 - -4. When they prompt you to register, just press on "Cancel" and then select "Never register" - -5. You can now launch the game using the shortcut on your desktop, or the copy and pasted "Petz 5.exe" file. - - - -### Playing Dogz 5 Directly - - - -1. Open the extracted folder and then open the "Game Files" folder. - -2. Copy and paste the "Petz 5.exe" file to your desktop or another location. - -3. You can now launch the game by double clicking on the "Petz 5.exe" file. - - - -Note: if your game has no sound, please go to: C:\\Program Files (x86)\\Ubi Soft\\Studio Mythos\\Petz 5 OR C:\\Program Files\\Ubi Soft\\Studio Mythos\\Petz 5 (Depending on where you installed or copied the game) and then remove the "setupapi.dll" and the "smapi.dll" file. Restart your game and it should work fine now. - - - -## Enjoy Playing Dogz 5 - - - -Dogz 5 is a classic - - 145887f19f - - - - - diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Crack ExpressVPN How to Unlock the Full Potential of the Top-Rated VPN Service for Free.md b/spaces/tialenAdioni/chat-gpt-api/logs/Crack ExpressVPN How to Unlock the Full Potential of the Top-Rated VPN Service for Free.md deleted file mode 100644 index 6ae7151bef947ae70a25fecf173cae2cb5ea9cf4..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Crack ExpressVPN How to Unlock the Full Potential of the Top-Rated VPN Service for Free.md +++ /dev/null @@ -1,18 +0,0 @@ -
      -

      Crack ExpressVPN: How to Download and Use the Best VPN Service for Free

      -

      ExpressVPN is one of the most popular and trusted VPN services that can help you to protect your online privacy and security, access blocked websites and content, and enjoy fast and stable connections. ExpressVPN has over 3,000 servers in 94 countries, supports multiple platforms and devices, and offers 24/7 customer support.

      -

      However, ExpressVPN is not a free service. You have to pay a monthly or yearly subscription fee to use ExpressVPN on your devices. The price of ExpressVPN varies depending on the plan you choose, but it starts from $8.32 per month or $99.95 per year for a 12-month plan.

      -

      expressvpn full crack


      DOWNLOAD ··· https://urlcod.com/2uK8LM



      -

      But what if you want to use ExpressVPN for free? Is there a way to download and use ExpressVPN without paying anything? The answer is yes, but it is not legal or ethical. Some people have managed to crack ExpressVPN and make it available for free download on the internet. A crack is a program that modifies or bypasses the security features of a software to make it work without a license or activation.

      -

      Cracking ExpressVPN is not only illegal but also risky. You may face legal consequences if you are caught using a cracked software. You may also expose your devices to viruses, malware, spyware and other threats that may harm your data and privacy. Moreover, you may not get the full functionality and reliability of ExpressVPN if you use a cracked version.

      -

      Therefore, we do not recommend or endorse cracking ExpressVPN or any other software. It is better to use a legitimate and authorized version of ExpressVPN that can guarantee you quality, accuracy and security. If you cannot afford to pay for ExpressVPN, you can try some of the free or cheaper alternatives that are available online.

      -

      Some of the free or cheaper alternatives to ExpressVPN are:

      -
        -
      • ProtonVPN: This is a VPN service that offers unlimited bandwidth, high-speed connections, and strong encryption. ProtonVPN has over 1,000 servers in 54 countries, supports multiple platforms and devices, and offers a free plan with limited features.
      • -
      • TunnelBear: This is a VPN service that offers simple and user-friendly apps, fast and secure connections, and global server coverage. TunnelBear has over 3,000 servers in 41 countries, supports multiple platforms and devices, and offers a free plan with 500 MB of data per month.
      • -
      • Windscribe: This is a VPN service that offers advanced features such as ad blocking, firewall, split tunneling, and stealth mode. Windscribe has over 600 servers in 63 countries, supports multiple platforms and devices, and offers a free plan with 10 GB of data per month.
      • -
      -

      These are some of the free or cheaper alternatives to ExpressVPN that you can use for protecting your online privacy and security, accessing blocked websites and content, and enjoying fast and stable connections. However, they may not have all the features and capabilities of ExpressVPN and they may have some limitations and restrictions.

      -

      ddb901b051
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Waqia e Karbala Movie in Urdu A Must-Watch for Every Muslim.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Waqia e Karbala Movie in Urdu A Must-Watch for Every Muslim.md deleted file mode 100644 index ddaef2f5270751dd78f9b6389c50ee00d4ae18df..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Waqia e Karbala Movie in Urdu A Must-Watch for Every Muslim.md +++ /dev/null @@ -1,145 +0,0 @@ -
      -

      Spore Download Free Full Version PC TPB Torrents

      - -

      If you are looking for a game that lets you create and evolve your own life forms, explore and conquer new worlds, and interact with other players online, then you might want to try Spore. Spore is a simulation game that allows you to start from a single cell and grow into a complex civilization that can even travel to space. You can customize your creatures, vehicles, buildings, and planets with a variety of tools and options. You can also share your creations with other players and visit their worlds.

      -

      spore download free full version pc tpb torrents


      Downloadhttps://urlcod.com/2uK7bj



      - -

      Spore was released in 2008 by Maxis, the same studio that created The Sims and SimCity. It received positive reviews from critics and gamers alike, who praised its originality, creativity, and replay value. However, some also criticized its technical issues, DRM restrictions, and lack of depth in some stages.

      - -

      If you want to experience Spore for yourself, you can download it for free from various torrent sites. However, you need to be careful about the sources you choose, as some of them might contain viruses, malware, or fake files. In this article, we will show you how to spore download free full version pc tpb torrents safely and easily.

      - -

      How to Spore Download Free Full Version PC TPB Torrents

      - -

      To spore download free full version pc tpb torrents, you will need a few things:

      -

      spore pc game free download full version tpb
      -spore full version free download for windows 10 torrent
      -spore complete edition pc download tpb
      -spore galactic adventures free download full version pc torrent
      -spore creature creator free download full version pc tpb
      -spore origins pc download free full version torrent
      -spore free download full game no survey pc tpb
      -spore crack download pc full version tpb
      -spore mods free download full version pc torrent
      -spore online free download full version pc tpb
      -spore mac download free full version torrent
      -spore iso download pc full version tpb
      -spore reloaded free download full version pc torrent
      -spore steam free download full version pc tpb
      -spore evolution free download full version pc torrent
      -spore 2 free download full version pc tpb
      -spore dark injection free download full version pc torrent
      -spore hero free download full version pc tpb
      -spore patch 5.1 free download full version pc torrent
      -spore creepy and cute parts pack free download full version pc tpb
      -spore android apk free download full version torrent
      -spore megaupload free download full version pc tpb
      -spore civilizations free download full version pc torrent
      -spore creatures ds rom free download full version torrent
      -spore wii iso free download full version tpb
      -spore xbox 360 free download full version torrent
      -spore soundtrack free download full version pc tpb
      -spore registration code generator free download full version torrent
      -spore trainer free download full version pc tpb
      -spore cheats free download full version pc torrent
      -spore bot parts pack free download full version pc tpb
      -spore exoskeleton limbs mod free download full version pc torrent
      -spore asymmetry mod free download full version pc tpb
      -spore better graphics mod free download full version pc torrent
      -spore realistic mod free download full version pc tpb
      -spore infinite complexity mod free download full version pc torrent
      -spore epic mod free download full version pc tpb
      -spore aquatic stage mod free download full version pc torrent
      -spore space stage mod free download full version pc tpb
      -spore tribal stage mod free download full version pc torrent
      -spore cell stage mod free download full version pc torrent
      -spore adventure editor mod free download full version pc torrent
      -spore planet editor mod free download full version pc torrent
      -spore creature stage mod free download full version pc torrent
      -spore civilization stage mod free download full version pc torrent
      -spore gaia edition mod free download full version pc torrent
      -spore platinum edition mod free download full version pc torrent
      -spore ultimate collection mod free download full version pc torrent
      -spore beta elements mod free download full version pc torrent
      -spore forgotten worlds mod free download full version pc torrent

      - -
        -
      • A torrent client: This is a software that allows you to download files from other users who are sharing them. There are many torrent clients available online, such as uTorrent, BitTorrent, qBittorrent, etc. You can choose the one that suits your preferences and system requirements.
      • -
      • A torrent file or magnet link: This is a small file or link that contains information about the file you want to download, such as its name, size, hash, trackers, etc. You can find torrent files or magnet links for Spore on various torrent sites, such as The Pirate Bay (TPB), Torrent4You, WizCase, etc. You can use the search function or browse the categories to find the game.
      • -
      • A VPN: This is a service that encrypts your internet traffic and hides your IP address from prying eyes. Using a VPN is highly recommended when downloading torrents, as it can protect you from hackers, malware, ISP throttling, legal issues, etc. There are many VPNs available online, but not all of them are reliable and secure. You should choose a VPN that has fast speeds, strong encryption, no-logs policy, kill switch feature, etc.
      • -
      - -

      Once you have these things ready, you can follow these steps to spore download free full version pc tpb torrents:

      - -
        -
      1. Launch your VPN and connect to a server in a country where torrenting is legal and safe.
      2. -
      3. Launch your torrent client and open the torrent file or magnet link for Spore.
      4. -
      5. Select the files you want to download and choose a destination folder on your PC.
      6. -
      7. Wait for the download to finish. You can check the progress and speed on your torrent client.
      8. -
      9. Once the download is complete, you can open the folder where you saved the game files.
      10. -
      11. You will need to extract the game files using a software like WinRAR or 7-Zip. You will also need to copy the cracked content from the Crack folder to your installation directory.
      12. -
      13. You can now launch the game and enjoy it.
      14. -
      - -

      Conclusion

      - -

      Spore is a unique and fun game that lets you create your own life forms and explore new worlds. You can spore download free full version pc tpb torrents from various sources online, but you need to be careful about the risks involved. You should always use a VPN when downloading torrents to protect yourself from hackers, malware, ISP throttling, legal issues, etc. You should also scan the game files for viruses before running them on your PC.

      - -

      We hope this article helped you spore download free full version pc tpb torrents safely and easily. If you have any questions or suggestions, feel free to leave a comment below.

      -

      What is Spore and Why You Should Play It

      - -

      Spore is a game that lets you unleash your creativity and imagination. You can create your own life forms, from simple cells to complex creatures, and watch them evolve and interact with their environment. You can also design your own vehicles, buildings, and spaceships, and use them to explore and colonize new planets. You can even create your own solar system and galaxy, and share them with other players online.

      - -

      Spore is not just a game, but a platform for expression and discovery. You can experiment with different shapes, colors, textures, and behaviors, and see how they affect your creations. You can also learn about biology, ecology, astronomy, and culture, as you witness the consequences of your actions. You can also have fun with the quirky humor and surprises that the game offers.

      - -

      Spore is a game that appeals to a wide range of players, from casual to hardcore gamers. You can play it at your own pace, and choose your own goals and challenges. You can also customize your experience with various modes and options, such as sandbox mode, difficulty settings, cheats, mods, etc. You can also join the online community of Spore fans, where you can share your creations, download other players' creations, chat with them, and join contests and events.

      - -

      How to Get Spore for Free

      - -

      If you are interested in playing Spore, you might be wondering how to get it for free. The official way to get Spore is to buy it from EA's website or other online stores. However, this might cost you some money, and you might also encounter some issues with DRM or compatibility.

      - -

      Fortunately, there is another way to get Spore for free: by downloading it from torrent sites. Torrent sites are websites that allow users to share files with each other using a peer-to-peer network. By using a torrent client, you can download files from other users who are sharing them. This way, you can get Spore for free without paying anything or registering an account.

      - -

      However, downloading Spore from torrent sites also has some risks and drawbacks. First of all, you need to be careful about the sources you choose, as some of them might contain viruses, malware, or fake files that can harm your PC or steal your data. Secondly, you need to be aware of the legal issues involved in downloading copyrighted content without permission. Thirdly, you need to be prepared for some technical difficulties, such as slow download speed, incomplete files, missing cracks or serials, etc.

      - -

      To avoid these problems and enjoy Spore for free safely and easily, you need to follow some tips and precautions:

      - -
        -
      • Use a VPN: A VPN is a service that encrypts your internet traffic and hides your IP address from prying eyes. Using a VPN is highly recommended when downloading torrents, as it can protect you from hackers, malware, ISP throttling, legal issues, etc. There are many VPNs available online, but not all of them are reliable and secure. You should choose a VPN that has fast speeds, strong encryption, no-logs policy, kill switch feature, etc.
      • -
      • Use a reputable torrent site: A reputable torrent site is a site that has a large number of users and files, high-quality content, good ratings and reviews, active moderation and support team, etc. A reputable torrent site can help you find the best torrents for Spore without any viruses or malware. Some of the most popular torrent sites for Spore are The Pirate Bay (TPB), Torrent4You, WizCase,
      • -
      • Use a reliable torrent client: A reliable torrent client is a software that allows you to download files from other users who are sharing them. There are many torrent clients available online,
      • -
      • Use a good antivirus: A good antivirus is a software that scans your PC for any viruses or malware that might infect it. A good antivirus can help you detect and remove any harmful files that might come with the torrents for Spore. You should always scan the game files before running them on your PC.
      • -
      - -

      Conclusion

      - -

      Spore is a game that lets you create and evolve your own life forms and explore new worlds. You can spore download free full version pc tpb torrents from various sources online,

      but you need to be careful about the risks involved. You should always use a VPN when downloading torrents to protect yourself from hackers, -

      malware, ISP throttling, legal issues, etc. You should also scan the game files for viruses before running them on your PC.

      - -

      How to Install and Play Spore

      - -

      After you spore download free full version pc tpb torrents, you need to install and play the game on your PC. This might require some additional steps, depending on the torrent you downloaded. Here are some general instructions that might help you:

      - -
        -
      1. Extract the game files: Most torrents for Spore come in compressed formats, such as ZIP or RAR. You need to extract the game files using a software like WinRAR or 7-Zip. You can right-click on the file and choose Extract Here or Extract to Spore (or any other name you prefer).
      2. -
      3. Mount or burn the image: Some torrents for Spore come in image formats, such as ISO or MDF. You need to mount or burn the image using a software like Daemon Tools or PowerISO. You can right-click on the file and choose Mount or Burn.
      4. -
      5. Install the game: After you extract or mount the game files, you need to run the setup.exe file and follow the instructions. You might be asked to enter a serial number or a product key. You can find them in the torrent description or in a text file inside the game folder.
      6. -
      7. Copy the crack: Most torrents for Spore come with a crack folder that contains a modified version of the game executable or other files that bypass the DRM protection. You need to copy the crack files and paste them into your installation directory, usually C:\Program Files\Spore or C:\Program Files (x86)\Spore. You might need to replace or overwrite the original files.
      8. -
      9. Play the game: After you copy the crack files, you can launch the game and enjoy it. You might need to run it as administrator or in compatibility mode if you encounter any issues. You should also avoid going online or updating the game, as it might cause problems with the crack.
      10. -
      - -

      Spore Tips and Tricks

      - -

      Spore is a game that offers a lot of freedom and possibilities, but it can also be challenging and confusing at times. Here are some tips and tricks that might help you get the most out of Spore:

      - -
        -
      • Save often: Spore does not have an autosave feature, so you need to save your progress manually. You can save your game by pressing Esc and choosing Save Game. You can also save your creations by pressing Ctrl+S in any editor.
      • -
      • Use cheats: Spore has a lot of cheats that can make your game easier or more fun. You can access the cheat console by pressing Ctrl+Shift+C in any mode. You can then type any cheat code and press Enter. Some of the most useful cheats are addDNA (adds DNA points), moreMoney (adds money), unlockSuperWeapons (unlocks all super weapons), spaceCreate (opens space editor), evoadvantage (starts a new game with one of your creations), etc.
      • -
      • Explore Sporepedia: Sporepedia is an online database that contains millions of creations made by other players. You can access Sporepedia by pressing F1 in any mode or by visiting www.spore.com/sporepedia. You can browse, download, rate, comment, and share creations with other players.
      • -
      • Use mods: Mods are modifications made by other players that change or enhance some aspects of Spore. You can find mods on various websites, such as www.moddb.com/games/spore or www.nexusmods.com/spore. You can install mods by placing them in your Data folder, usually C:\Program Files\Spore\Data or C:\Program Files (x86)\Spore\Data.
      • -
      - -

      Conclusion

      - -

      Spore is a game that lets you create and evolve your own life forms and explore new worlds. You can spore download free full version pc tpb torrents from various sources online,

      but you need to be careful about the risks involved. You should always use a VPN when downloading torrents to protect yourself from hackers,

      679dcb208e
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/Noise-Reduction-Plugin-20h-FULL-Keygen.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/Noise-Reduction-Plugin-20h-FULL-Keygen.md deleted file mode 100644 index 61a8a282bf9baec936bd62d81f968bbf3cb98b7e..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/Noise-Reduction-Plugin-20h-FULL-Keygen.md +++ /dev/null @@ -1,112 +0,0 @@ -## noise reduction plugin 2.0h keygen - - - - - - - - - -**LINK >>> [https://urluso.com/2txPaD](https://urluso.com/2txPaD)** - - - - - - - - - - - - I can help you with that. Here is a possible title and article with SEO optimization and HTML formatting for the keyword "noise reduction plugin 2.0h keygen": - -# How to Use Noise Reduction Plugin 2.0h Keygen to Enhance Your Audio Quality - - - -If you are looking for a way to improve your audio quality, whether it is for music production, podcasting, video editing, or any other purpose, you might want to try Noise Reduction Plugin 2.0h. This is a powerful and easy-to-use software that can reduce unwanted noise from your audio files, such as hiss, hum, clicks, pops, crackles, and more. - - - -However, Noise Reduction Plugin 2.0h is not a free software. You need to purchase a license to use it without any limitations or watermarks. But don't worry, there is a way to get it for free. You can use Noise Reduction Plugin 2.0h Keygen to generate a valid serial number that will activate the full version of the software. - - - -In this article, we will show you how to use Noise Reduction Plugin 2.0h Keygen to enhance your audio quality in a few simple steps. - - - -## Step 1: Download Noise Reduction Plugin 2.0h Keygen - - - -The first thing you need to do is to download Noise Reduction Plugin 2.0h Keygen from a reliable source. You can find many websites that offer this keygen, but be careful of malware or viruses that might harm your computer. We recommend you to use this link: [https://www.noisereductionplugin.com/keygen](https://www.noisereductionplugin.com/keygen). This is a safe and verified website that provides the latest version of the keygen. - - - -Once you click on the link, you will see a download button. Click on it and save the file to your preferred location. The file size is about 5 MB and it should take only a few seconds to download. - - - -## Step 2: Run Noise Reduction Plugin 2.0h Keygen - - - -After you have downloaded the keygen, you need to run it on your computer. To do this, locate the file and double-click on it. You might see a warning message from your antivirus software or your operating system, but don't worry, this is normal and the keygen is safe to use. - - - -When you run the keygen, you will see a window like this: - - ![Noise Reduction Plugin 2.0h Keygen Screenshot](https://www.noisereductionplugin.com/keygen-screenshot.png) - -This is the interface of the keygen. You can see that it has a few options and buttons. - - - -## Step 3: Generate a Serial Number - - - -The next step is to generate a serial number that will activate Noise Reduction Plugin 2.0h. To do this, you need to click on the "Generate" button at the bottom of the window. The keygen will then create a random serial number and display it in the text box above. - - - -The serial number will look something like this: NRP-20H-1234-5678-90AB-CDEF-GHIJ-KLMN - - - -You can copy this serial number by clicking on the "Copy" button next to it or by selecting it and pressing Ctrl+C on your keyboard. - - - -## Step 4: Install Noise Reduction Plugin 2.0h - - - -Now that you have the serial number, you need to install Noise Reduction Plugin 2.0h on your computer. If you don't have the software yet, you can download it from the official website: [https://www.noisereductionplugin.com/download](https://www.noisereductionplugin.com/download). - - - -Once you have downloaded the software, run the installer and follow the instructions on the screen. When you are asked to enter the serial number, paste the one that you generated from the keygen by pressing Ctrl+V on your keyboard or by right-clicking and choosing "Paste". - - - -After you have entered the serial number, click on "Next" and complete the installation process. - - - -## Step 5: Enjoy Noise Reduction Plugin 2.0h - - - -Congratulations! You have successfully installed and activated Noise Reduction Plugin 2. - - dfd1c89656 - - - - - diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/BUSSID The Ultimate Bus Simulator Game for Android - Download and Play for Free!.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/BUSSID The Ultimate Bus Simulator Game for Android - Download and Play for Free!.md deleted file mode 100644 index 2a49e7d4eedaa397ec47e0dd4f3dc332e1a5faeb..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/BUSSID The Ultimate Bus Simulator Game for Android - Download and Play for Free!.md +++ /dev/null @@ -1,138 +0,0 @@ - -

      Download Truck Simulator Bussid: A Fun and Authentic Driving Game

      -

      Do you love driving games? Do you want to experience what it's like to be a bus or truck driver in Indonesia? If you answered yes to these questions, then you should definitely try Truck Simulator Bussid, a driving simulation game that will let you explore the beautiful and diverse landscapes of Indonesia in a fun and realistic way.

      -

      download truck simulator bussid


      Download File ►►►►► https://bltlly.com/2uOnVi



      -

      What is Truck Simulator Bussid?

      -

      Truck Simulator Bussid, also known as Bus Simulator Indonesia, is a game developed by Zuuks Games, a Turkey-based mobile game development company that specializes in driving simulation games. The game was released in 2017 and has since gained millions of downloads and positive reviews from players all over the world.

      -

      Truck Simulator Bussid is not just a typical driving game where you simply drive from one point to another. It is a game that offers a lot of features and challenges that will make you feel like you are really driving a bus or a truck in Indonesia. You can design your own livery, customize your vehicle, bid for jobs, manage your company, compete with other players, and much more.

      -

      Features of Truck Simulator Bussid

      -

      Truck Simulator Bussid is probably one of the only bus simulator games with the most features and the most authentic Indonesian environment. Here are some of the features that make this game stand out from the rest:

      -

      Design your own livery

      -

      One of the coolest features of Truck Simulator Bussid is that you can design your own livery for your bus or truck. You can choose from different colors, patterns, stickers, logos, and accessories to make your vehicle look unique and stylish. You can also use your own 3D model using the vehicle mod system.

      -

      Easy and intuitive control

      -

      Truck Simulator Bussid has a very easy and intuitive control system that will make you feel comfortable and confident while driving. You can choose from different control options such as steering wheel, buttons, tilt, or joystick. You can also adjust the camera angle, the sensitivity, the sound effects, and the graphics quality according to your preference.

      -

      Download Bus Simulator Indonesia for Android
      -How to install MOD BUSSID Truck on your device
      -Best MOD BUSSID Bus, Truck, Car, and Motorcycle in 2023
      -Kamaz Master 4911: A powerful truck for BUSSID
      -Tips and tricks to play Bus Simulator Indonesia
      -Download livery BUSSID for various vehicles
      -Bus Simulator Indonesia: Experience the authentic Indonesian environment
      -How to design your own livery in BUSSID
      -Online multiplayer convoy in Bus Simulator Indonesia
      -The most realistic bus simulator game on Android
      -How to use vehicle mod system in BUSSID
      -Cool and fun honks in Bus Simulator Indonesia
      -Bus Simulator Indonesia vs other bus simulator games
      -How to update Bus Simulator Indonesia to the latest version
      -The best routes and destinations in Bus Simulator Indonesia
      -How to earn money and buy new vehicles in BUSSID
      -How to drive safely and avoid accidents in BUSSID
      -The history and culture of Indonesian buses
      -How to customize your bus driver avatar in BUSSID
      -The most popular and famous buses in Indonesia
      -How to join and create a clan in Bus Simulator Indonesia
      -The benefits and challenges of being a bus driver in Indonesia
      -How to download and play Bus Simulator Indonesia on PC
      -The best accessories and gadgets for BUSSID players
      -How to share your gameplay and livery with other BUSSID fans
      -How to fix common errors and bugs in Bus Simulator Indonesia
      -The future plans and updates of Bus Simulator Indonesia
      -How to get free coins and gems in BUSSID
      -The best soundtracks and music for BUSSID
      -How to play Bus Simulator Indonesia offline or without internet connection
      -How to backup and restore your data in Bus Simulator Indonesia
      -The best graphics settings and performance tips for BUSSID
      -How to change the language and voice of Bus Simulator Indonesia
      -The best reviews and ratings of Bus Simulator Indonesia
      -How to contact the developer and support team of BUSSID
      -The best alternatives and similar games to Bus Simulator Indonesia
      -How to download MOD BUSSID from various sources
      -The best websites and blogs for MOD BUSSID fans
      -How to uninstall and delete MOD BUSSID from your device
      -The pros and cons of using MOD BUSSID in your game
      -How to check the compatibility and quality of MOD BUSSID before downloading
      -The best categories and types of MOD BUSSID for your preference
      -How to request and suggest new MOD BUSSID to the developer or modder
      -The best tools and software for creating your own MOD BUSSID

      -

      Authentic Indonesian cities and places

      -

      Truck Simulator Bussid will take you to various cities and places in Indonesia that are realistic and detailed. You will see famous landmarks, buildings, bridges, roads, traffic signs, landscapes, weather conditions, and more. You will also encounter different types of vehicles, pedestrians, animals, and events that are common in Indonesia.

      -

      Indonesian buses and trucks

      -

      Truck Simulator Bussid features a variety of buses and trucks that are popular in Indonesia. You can choose from different models such as Canter, Fuso, Hino, Kamaz, Scania, Volvo, Mercedes-Benz, MAN, Iveco, DAF, Renault, Ford, Tata, Mahindra, Ashok Leyland, Eicher, BharatBenz, Isuzu, Mitsubishi, Nissan, Toyota, Hyundai, Kia, Daewoo, Ssangyong, and

      Cool and fun honks

      -

      Truck Simulator Bussid also has a feature that allows you to honk your horn in different ways. You can choose from different types of honks such as musical, funny, loud, or annoying. You can also use the voice chat feature to communicate with other players or pedestrians.

      -

      High quality and detailed 3D graphics

      -

      Truck Simulator Bussid has a high quality and detailed 3D graphics that will make you feel immersed in the game. You will see realistic shadows, reflections, textures, lighting, and animations. You will also enjoy the smooth and stable performance of the game even on low-end devices.

      -

      No obstructive ads while driving

      -

      Truck Simulator Bussid is a free game that does not have any obstructive ads while you are driving. You can enjoy the game without any interruptions or distractions. However, you can watch optional ads to earn extra money or rewards.

      -

      Leaderboard and online multiplayer convoy

      -

      Truck Simulator Bussid also has a leaderboard and online multiplayer convoy feature that will let you compete and cooperate with other players. You can see your rank and stats on the leaderboard and compare them with other players. You can also join or create a convoy with other players and drive together in the same map.

      -

      How to download Truck Simulator Bussid?

      -

      Truck Simulator Bussid is available for both Android and iOS devices. You can download it from the Google Play Store or the App Store for free. However, if you want to download it from other sources or use mods, you need to be careful and follow some steps.

      -

      Download from Google Play Store or App Store

      -

      The easiest and safest way to download Truck Simulator Bussid is from the official app stores. Here are the steps to do so:

      -
        -
      1. Open the Google Play Store or the App Store on your device.
      2. -
      3. Search for "Truck Simulator Bussid" or "Bus Simulator Indonesia" in the search bar.
      4. -
      5. Select the game from the list of results and tap on "Install" or "Get".
      6. -
      7. Wait for the game to download and install on your device.
      8. -
      9. Launch the game and enjoy!
      10. -
      -

      Download from third-party websites or mod sources

      -

      If you want to download Truck Simulator Bussid from third-party websites or mod sources, you need to be more careful and follow some steps. Here are some tips to do so:

      -
        -
      • Make sure that your device has enough storage space and battery life.
      • -
      • Make sure that your device is compatible with the game and its requirements.
      • -
      • Make sure that you have a reliable internet connection and antivirus software.
      • -
      • Make sure that you download the game from a trusted and reputable website or source.
      • -
      • Make sure that you check the reviews, ratings, comments, and feedback of other users before downloading.
      • -
      • Make sure that you scan the downloaded file for any viruses or malware before installing.
      • -
      • Make sure that you enable the "Unknown Sources" option in your device settings to allow installation of apps from outside sources.
      • -
      • Make sure that you follow the instructions provided by the website or source on how to install and run the game.
      • -
      • Make sure that you backup your data and uninstall any previous versions of the game before installing a new one.
      • -
      • Make sure that you do not use any illegal or harmful mods that may damage your device or compromise your security.
      • -
      -

      Reviews of Truck Simulator Bussid

      -

      Truck Simulator Bussid has received a lot of positive reviews from players who have enjoyed the game. Here are some of the reviews from the Google Play Store:

      -
      -

      "This is one of the best bus simulator games I have ever played. The graphics are amazing, the controls are smooth, and the gameplay is realistic. I love how I can customize my bus and drive around Indonesia. The online multiplayer mode is also very fun and exciting. I highly recommend this game to anyone who loves driving games."

      -- Muhammad Faisal -
      -
      -

      "I am very impressed with this game. It is very addictive and challenging. The game has a lot of features and options that make it more interesting and enjoyable. The game also has a very friendly and supportive community of players who help each other out. The game is also very updated and improved by the developers who listen to feedback."

      -- Rizky Pratama -
      -
      -

      "This game is

      This game is awesome. It is very realistic and fun to play. The game has a lot of variety and options that make it more enjoyable and challenging. The game also has a very good graphics and sound quality that make it more immersive and realistic. The game is also very easy to download and install. I love this game."

      -- Rani Putri -
      -

      Tips and tricks for Truck Simulator Bussid

      -

      Truck Simulator Bussid is a game that requires skill and strategy to play well. Here are some tips and tricks that can help you improve your performance and enjoy the game more:

      -

      Practice in the garage before taking a job

      -

      Before you start driving on the road, you should practice in the garage first. You can use the garage to test your vehicle, adjust your settings, and familiarize yourself with the controls. You can also use the garage to change your livery, upgrade your parts, or repair your damage.

      -

      Manage your gas and hunger levels

      -

      While driving, you need to pay attention to your gas and hunger levels. You can see them on the top left corner of your screen. If your gas level is low, you need to find a gas station and refill your tank. If your hunger level is low, you need to find a restaurant and eat some food. If you ignore these levels, you will lose money and performance.

      -

      Follow the traffic rules and avoid accidents

      -

      While driving, you need to follow the traffic rules and avoid accidents. You can see the traffic signs, signals, and speed limits on the road. You need to obey them and drive safely. You also need to watch out for other vehicles, pedestrians, animals, and obstacles on the road. If you cause any accidents, you will lose money and reputation.

      -

      Bid for jobs wisely and plan your route

      -

      Before you take a job, you need to bid for it wisely and plan your route. You can see the available jobs on the map or on the job board. You need to choose a job that suits your vehicle, skill, and budget. You also need to plan your route carefully and avoid any detours or delays. You can use the GPS or the map to guide you.

      -

      Upgrade your office and hire more drivers

      -

      If you want to expand your business and earn more money, you need to upgrade your office and hire more drivers. You can use the office to manage your company, check your stats, and access more features. You can also use the office to hire more drivers, assign them jobs, and monitor their progress.

      -

      Conclusion

      -

      Truck Simulator Bussid is a fun and authentic driving game that will let you experience what it's like to be a bus or truck driver in Indonesia. You can design your own livery, customize your vehicle, bid for jobs, manage your company, compete with other players, and much more. You can download Truck Simulator Bussid from the Google Play Store or the App Store for free or from other sources if you want to use mods. However, you need to be careful and follow some steps when downloading from outside sources.

      -

      If you love driving games, you should definitely try Truck Simulator Bussid. It is a game that will challenge your skill and strategy while entertaining you with its realistic and diverse environment.

      -

      FAQs

      -

      Here are some of the frequently asked questions about Truck Simulator Bussid:

      -
        -
      1. How do I get more money in Truck Simulator Bussid?
      2. -

        You can get more money in Truck Simulator Bussid by completing jobs successfully, watching optional ads, joining online multiplayer convoys, or using mods.

        -
      3. How do I change my vehicle in Truck Simulator Bussid?
      4. -

        You can change your vehicle in Truck Simulator Bussid by going to the garage and selecting a different vehicle from the list. You can also use mods to add more vehicles or change their models.

        -
      5. How do I use mods in Truck Simulator Bussid?
      6. -

        You can use mods in Truck Simulator Bussid by downloading them from third-party websites or sources and installing them on your device. However, you need to be careful and follow some steps when using mods as they may cause problems or risks.

        -
      7. How do I join or create a convoy in Truck Simulator Bussid?
      8. -

        You can join or create a convoy in Truck Simulator Bussid by going to the online multiplayer mode and selecting a convoy from the list or creating a new one. You can also invite or join other players by using their code or ID. You can also use the voice chat feature to communicate with your convoy members.

        -
      9. How do I contact the developers of Truck Simulator Bussid?
      10. -

        You can contact the developers of Truck Simulator Bussid by sending them an email at zuuks.games@gmail.com or by following them on their social media accounts such as Facebook, Instagram, Twitter, or YouTube. You can also leave a review or a comment on the app store or the website.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Become a Legend APK and Enjoy the Most Realistic Football Game.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Become a Legend APK and Enjoy the Most Realistic Football Game.md deleted file mode 100644 index 9f28046d1f374e5b35ed1720a39db8a820d63cc1..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Become a Legend APK and Enjoy the Most Realistic Football Game.md +++ /dev/null @@ -1,117 +0,0 @@ - -

      How to Download and Install Become a Legend Football APK on Android

      -

      If you are a fan of football games and want to experience a realistic and immersive simulation of the sport, you might want to try Become a Legend Football APK. This is an Android game that lets you create your own football legend and compete in various modes and challenges. In this article, we will show you what this game is about, how to download and install it on your Android device, and how to play it.

      -

      What is Become a Legend Football APK?

      -

      Become a Legend Football APK is an Android game developed by Zeeba Games that allows you to create your own football player and lead him to glory. You can choose from over 100 teams and players from different leagues and countries, customize your skills and appearance, and play in various modes such as career, tournament, online, and offline. You can also enjoy realistic graphics, animations, sounds, and commentary as you play.

      -

      become a legend football apk download


      Download Zip ⚙⚙⚙ https://bltlly.com/2uOonT



      -

      Features of Become a Legend Football APK

      -

      Some of the features of Become a Legend Football APK are:

      -
        -
      • Realistic football simulation with advanced physics and AI
      • -
      • Over 100 teams and players from different leagues and countries
      • -
      • Customizable skills and appearance for your player
      • -
      • Various modes such as career, tournament, online, and offline
      • -
      • Realistic graphics, animations, sounds, and commentary
      • -
      • Easy controls and user interface
      • -
      • Free to download and play
      • -
      -

      Requirements for Become a Legend Football APK

      -

      To download and install Become a Legend Football APK on your Android device, you need to have:

      -
        -
      • An Android device running Android 4.4 or higher
      • -
      • At least 100 MB of free storage space
      • -
      • An internet connection for online mode
      • -
      -

      How to Download Become a Legend Football APK

      -

      There are two ways to download Become a Legend Football APK on your Android device. You can either download it from APK Mirror or from Google Play Store.

      -

      Download from APK Mirror

      -

      APK Mirror is a website that hosts Android apps that are not available on Google Play Store or have been removed for some reason. You can download Become a Legend Football APK from this website by following these steps:

      -
        -
      1. Go to [APK Mirror](^1^) on your browser.
      2. -
      3. Search for "Become a Legend" in the search bar.
      4. -
      5. Select the latest version of the game from the results.
      6. -
      7. Tap on "Download APK" button.
      8. -
      9. Wait for the download to finish.
      10. -
      -

      Download from Google Play Store

      -

      If you prefer to download Become a Legend Football APK from Google Play Store, you can do so by following these steps:

      -
        -
      1. Go to [Google Play Store](^4^) on your browser or app.
      2. -
      3. Search for "Become a Legend" in the search bar.
      4. -
      5. Select the game from the results.
      6. -
      7. Tap on "Install" button.
      8. -
      9. Wait for the download to finish.
      10. -
      -

      How to Install Become a Legend Football APK

      -

      After downloading Become a Legend Football APK on your Android device, you need to install it before you can play it. To install it, you need to follow these steps:Enable Unknown Sources -

      If you downloaded Become a Legend Football APK from APK Mirror, you need to enable unknown sources on your device to allow the installation of apps from sources other than Google Play Store. To do this, you need to follow these steps:

      -

      * Soccer Legends apk download free
      -* Become a Legend android game latest version
      -* How to install Become a Legend apk on mobile
      -* Soccer Legends anime sports game for android
      -* Become a Legend Zeeba Games download link
      -* Soccer Legends apkcombo.com review and rating
      -* Become a Legend gameplay and features guide
      -* Soccer Legends best ATK, TEQ and DEF players
      -* Become a Legend tips and tricks to win matches
      -* Soccer Legends epic anime-like matches download
      -* Become a Legend free mobile game for android
      -* Soccer Legends simple and addictive gameplay apk
      -* Become a Legend com.become.legend apk file
      -* Soccer Legends ProjectLegends.SoccerLegends apk file
      -* Become a Legend update and patch notes download
      -* Soccer Legends how to play online with friends
      -* Become a Legend best football game for android
      -* Soccer Legends how to unlock new characters and skills
      -* Become a Legend devgame.me website and support
      -* Soccer Legends how to customize your team and players
      -* Become a Legend 1.0.17 apk download for android
      -* Soccer Legends how to earn coins and gems fast
      -* Become a Legend how to rank up and become a legend
      -* Soccer Legends how to fix bugs and errors apk download
      -* Become a Legend user feedback and testimonials download

      -
        -
      1. Go to "Settings" on your device.
      2. -
      3. Tap on "Security" or "Privacy" depending on your device.
      4. -
      5. Find and toggle on "Unknown Sources" or "Install Unknown Apps" depending on your device.
      6. -
      7. Confirm your choice if prompted.
      8. -
      -

      Locate and Tap the APK File

      -

      After enabling unknown sources, you need to locate and tap the APK file that you downloaded from APK Mirror or Google Play Store. To do this, you need to follow these steps:

      -
        -
      1. Go to "File Manager" or "Downloads" on your device.
      2. -
      3. Find and tap on the APK file that you downloaded. It should have the name "Become a Legend Football" or something similar.
      4. -
      5. If prompted, choose "Package Installer" or "Install" as the app to open the file.
      6. -
      -

      Follow the Installation Steps

      -

      After tapping the APK file, you need to follow the installation steps that will appear on your screen. To do this, you need to follow these steps:

      -
        -
      1. Read and accept the permissions that the app requires.
      2. -
      3. Tap on "Install" or "Next" button.
      4. -
      5. Wait for the installation to finish.
      6. -
      7. Tap on "Open" or "Done" button.
      8. -
      -

      How to Play Become a Legend Football APK

      -

      After installing Become a Legend Football APK on your Android device, you can start playing it and enjoy the realistic and immersive football simulation. To play it, you need to follow these steps:

      -

      Choose Your Team and Player

      -

      When you launch the game, you will be asked to choose your team and player. You can choose from over 100 teams and players from different leagues and countries. You can also create your own player by entering your name, nationality, position, and number. You can change your team and player anytime in the game settings.

      -

      Customize Your Skills and Appearance

      -

      After choosing your team and player, you can customize your skills and appearance. You can adjust your skills such as speed, power, stamina, dribbling, passing, shooting, and defense. You can also change your appearance such as hair, skin, eyes, nose, mouth, beard, and accessories. You can earn coins by playing the game and use them to buy more items and upgrade your skills.

      -

      Compete in Various Modes and Challenges

      -

      After customizing your skills and appearance, you can compete in various modes and challenges. You can play in career mode where you start from the bottom and work your way up to become a legend. You can play in tournament mode where you compete in different cups and leagues. You can play in online mode where you challenge other players from around the world. You can play in offline mode where you practice your skills and have fun. You can also complete various achievements and missions to earn rewards and trophies.

      -

      Conclusion

      -

      Become a Legend Football APK is an Android game that lets you create your own football legend and compete in various modes and challenges. It has realistic graphics, animations, sounds, and commentary that make you feel like you are playing in a real stadium. It has easy controls and user interface that make it suitable for all ages and skill levels. It is free to download and play but it also offers in-app purchases for more features and items. If you are a fan of football games, you should definitely try Become a Legend Football APK on your Android device.

      -

      FAQs

      -

      Here are some frequently asked questions about Become a Legend Football APK:

      -

      Q: Is Become a Legend Football APK safe to download and install?

      -

      A: Yes, Become a Legend Football APK is safe to download and install as long as you get it from a trusted source such as APK Mirror or Google Play Store. It does not contain any viruses or malware that can harm your device or data.

      -

      Q: How much storage space does Become a Legend Football APK require?

      -

      A: Become a Legend Football APK requires about 100 MB of free storage space on your device. However, it may also require additional data for updates and online mode.

      -

      Q: Can I play Become a Legend Football APK offline?

      -

      A: Yes, you can play Become Legend Football APK offline in offline mode where you can practice your skills and have fun. However, you need an internet connection for online mode where you can challenge other players from around the world.

      -

      Q: How can I get more coins and items in Become a Legend Football APK?

      -

      A: You can get more coins and items in Become a Legend Football APK by playing the game and completing various achievements and missions. You can also buy more coins and items with real money through in-app purchases.

      -

      Q: How can I contact the developer of Become a Legend Football APK?

      -

      A: You can contact the developer of Become a Legend Football APK by sending an email to [zeebagames@gmail.com] or visiting their [Facebook page].

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Flowcode 6 Full Crack BEST Software.md b/spaces/tioseFevbu/cartoon-converter/scripts/Flowcode 6 Full Crack BEST Software.md deleted file mode 100644 index 925acfa6d6d599685f9d494781220c8692838ac1..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Flowcode 6 Full Crack BEST Software.md +++ /dev/null @@ -1,33 +0,0 @@ - -

      How to Download and Install Flowcode 6 Full Crack Software

      -

      Flowcode 6 is a powerful software that allows you to quickly and easily develop complex electronic and electromechanical systems. The graphical programming tool enables you to create systems for microcontrollers such as Arduino, PIC, AVR and ARM, as well as industrial interfaces using protocols such as Modbus and CAN. Flowcode 6 also supports simulation, debugging, code profiling and testing features that make system development easier and faster.

      -

      Flowcode 6 Full Crack Software


      Download File --->>> https://urlcod.com/2uHvlV



      -

      If you want to download and install Flowcode 6 full crack software for free, you can follow these steps:

      -
        -
      1. Go to the website https://www.jyvsoft.com/2018/07/09/flowcode-v6132/ [^1^] and click on the "Download" button. This will take you to a page where you can choose a download link from various file hosting services.
      2. -
      3. Select a download link and wait for the file to be downloaded. The file name is "Flowcode v6.1.3.2 Crack Patch.zip" and the size is about 1.24 GB.
      4. -
      5. Extract the zip file using a program such as WinRAR or 7-Zip. You will get a folder named "Flowcode v6.1.3.2 Crack Patch".
      6. -
      7. Open the folder and run the file "Setup.exe" as administrator. This will start the installation process of Flowcode 6.
      8. -
      9. Follow the instructions on the screen and choose the components you want to install. You can also change the installation directory if you want.
      10. -
      11. When the installation is finished, do not run Flowcode 6 yet. Go back to the folder "Flowcode v6.1.3.2 Crack Patch" and open the subfolder "Crack".
      12. -
      13. Copy the file "Flowcode.exe" from the "Crack" folder and paste it into the installation directory of Flowcode 6, replacing the original file.
      14. -
      15. Now you can run Flowcode 6 from your desktop or start menu. You have successfully installed Flowcode 6 full crack software for free.
      16. -
      -

      Note: This article is for educational purposes only. We do not condone or encourage piracy or illegal use of software. Please support the developers by purchasing a legitimate license of Flowcode 6 from their official website https://www.matrixtsl.com/flowcode/.

      - -

      Flowcode 6 is a versatile and user-friendly software that can help you create various types of projects for microcontrollers and industrial systems. Whether you are a beginner or an expert, you can use Flowcode 6 to design, simulate and program your systems using graphical methods such as flowcharts or blocks. You can also learn and program using C code if you prefer.

      -

      -

      Some of the features and benefits of Flowcode 6 are:

      -
        -
      • Choice of programming methods: You can choose between flowcharts, blocks or C code to create your programs. Flowcode 6 also supports mixed mode programming, which allows you to combine different methods in the same project.
      • -
      • Fast system development: You can use pre-developed components and templates to speed up your system design. Flowcode 6 also provides a drag-and-drop interface and a multi-view system panel that make it easy to edit and manage your projects.
      • -
      • Learn and program using C code: You can use Flowcode 6 to learn the basics of C programming or to write your own C code. Flowcode 6 also allows you to import and export C code files and to view the C code generated by your graphical programs.
      • -
      • Ghost Technology: This is a unique feature of Flowcode 6 that enables you to monitor and debug your systems in real time. Ghost Technology uses hardware such as the EB006 and EB091 to provide a live log of the status of all the pins on the microcontroller while your program is running on the device.
      • -
      • Simulation: You can use Flowcode 6 to simulate your systems before testing them on real hardware. Flowcode 6 supports simulation of various components such as LEDs, LCDs, switches, motors, sensors, communication protocols and more.
      • -
      • Debugging: You can use Flowcode 6 to debug your systems using tools such as breakpoints, step-by-step execution, data recorder and oscilloscope. Flowcode 6 also supports in-circuit test and in-circuit debugging features that allow you to test and debug your systems on real hardware.
      • -
      • Code profiling: You can use Flowcode 6 to analyze the performance of your programs using tools such as code coverage, execution time and memory usage. Flowcode 6 also helps you optimize your code by providing suggestions and warnings.
      • -
      -

      Flowcode 6 is compatible with a wide range of microcontrollers and development boards such as Arduino, PIC, AVR, ARM and Raspberry Pi. You can also use Flowcode 6 with hardware platforms such as E-blocks and E-blocks2 that provide modular and easy-to-use solutions for system development.

      -

      If you want to learn more about Flowcode 6, you can visit their official website https://www.matrixtsl.com/flowcode/ or watch their YouTube channel https://www.youtube.com/user/MatrixTSL.

      e93f5a0c3f
      -
      -
      \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/resolvelib/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/resolvelib/__init__.py deleted file mode 100644 index ce05fd3027447fdc64986ecdee950643822e45e6..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/resolvelib/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -__all__ = [ - "__version__", - "AbstractProvider", - "AbstractResolver", - "BaseReporter", - "InconsistentCandidate", - "Resolver", - "RequirementsConflicted", - "ResolutionError", - "ResolutionImpossible", - "ResolutionTooDeep", -] - -__version__ = "0.8.1" - - -from .providers import AbstractProvider, AbstractResolver -from .reporters import BaseReporter -from .resolvers import ( - InconsistentCandidate, - RequirementsConflicted, - ResolutionError, - ResolutionImpossible, - ResolutionTooDeep, - Resolver, -) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py deleted file mode 100644 index 003499fa9765d62d2197b01bc7466eb11d1475aa..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/command/build_clib.py +++ /dev/null @@ -1,208 +0,0 @@ -"""distutils.command.build_clib - -Implements the Distutils 'build_clib' command, to build a C/C++ library -that is included in the module distribution and needed by an extension -module.""" - - -# XXX this module has *lots* of code ripped-off quite transparently from -# build_ext.py -- not surprisingly really, as the work required to build -# a static library from a collection of C source files is not really all -# that different from what's required to build a shared object file from -# a collection of C source files. Nevertheless, I haven't done the -# necessary refactoring to account for the overlap in code between the -# two modules, mainly because a number of subtle details changed in the -# cut 'n paste. Sigh. - -import os -from distutils.core import Command -from distutils.errors import * -from distutils.sysconfig import customize_compiler -from distutils import log - - -def show_compilers(): - from distutils.ccompiler import show_compilers - - show_compilers() - - -class build_clib(Command): - - description = "build C/C++ libraries used by Python extensions" - - user_options = [ - ('build-clib=', 'b', "directory to build C/C++ libraries to"), - ('build-temp=', 't', "directory to put temporary build by-products"), - ('debug', 'g', "compile with debugging information"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', "specify the compiler type"), - ] - - boolean_options = ['debug', 'force'] - - help_options = [ - ('help-compiler', None, "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.build_clib = None - self.build_temp = None - - # List of libraries to build - self.libraries = None - - # Compilation options for all libraries - self.include_dirs = None - self.define = None - self.undef = None - self.debug = None - self.force = 0 - self.compiler = None - - def finalize_options(self): - # This might be confusing: both build-clib and build-temp default - # to build-temp as defined by the "build" command. This is because - # I think that C libraries are really just temporary build - # by-products, at least from the point of view of building Python - # extensions -- but I want to keep my options open. - self.set_undefined_options( - 'build', - ('build_temp', 'build_clib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ) - - self.libraries = self.distribution.libraries - if self.libraries: - self.check_library_list(self.libraries) - - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # XXX same as for build_ext -- what about 'self.define' and - # 'self.undef' ? - - def run(self): - if not self.libraries: - return - - # Yech -- this is cut 'n pasted from build_ext.py! - from distutils.ccompiler import new_compiler - - self.compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - customize_compiler(self.compiler) - - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - - self.build_libraries(self.libraries) - - def check_library_list(self, libraries): - """Ensure that the list of libraries is valid. - - `library` is presumably provided as a command option 'libraries'. - This method checks that it is a list of 2-tuples, where the tuples - are (library_name, build_info_dict). - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(libraries, list): - raise DistutilsSetupError("'libraries' option must be a list of tuples") - - for lib in libraries: - if not isinstance(lib, tuple) and len(lib) != 2: - raise DistutilsSetupError("each element of 'libraries' must a 2-tuple") - - name, build_info = lib - - if not isinstance(name, str): - raise DistutilsSetupError( - "first element of each tuple in 'libraries' " - "must be a string (the library name)" - ) - - if '/' in name or (os.sep != '/' and os.sep in name): - raise DistutilsSetupError( - "bad library name '%s': " - "may not contain directory separators" % lib[0] - ) - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'libraries' " - "must be a dictionary (build info)" - ) - - def get_library_names(self): - # Assume the library list is valid -- 'check_library_list()' is - # called from 'finalize_options()', so it should be! - if not self.libraries: - return None - - lib_names = [] - for (lib_name, build_info) in self.libraries: - lib_names.append(lib_name) - return lib_names - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for (lib_name, build_info) in self.libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) - - filenames.extend(sources) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - objects = self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - ) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib( - objects, lib_name, output_dir=self.build_clib, debug=self.debug - ) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/discovery.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/discovery.py deleted file mode 100644 index 95c3c7f83ed4f2e60156c01fddd4e3bf2b6f32d2..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/discovery.py +++ /dev/null @@ -1,588 +0,0 @@ -"""Automatic discovery of Python modules and packages (for inclusion in the -distribution) and other config values. - -For the purposes of this module, the following nomenclature is used: - -- "src-layout": a directory representing a Python project that contains a "src" - folder. Everything under the "src" folder is meant to be included in the - distribution when packaging the project. Example:: - - . - ├── tox.ini - ├── pyproject.toml - └── src/ - └── mypkg/ - ├── __init__.py - ├── mymodule.py - └── my_data_file.txt - -- "flat-layout": a Python project that does not use "src-layout" but instead - have a directory under the project root for each package:: - - . - ├── tox.ini - ├── pyproject.toml - └── mypkg/ - ├── __init__.py - ├── mymodule.py - └── my_data_file.txt - -- "single-module": a project that contains a single Python script direct under - the project root (no directory used):: - - . - ├── tox.ini - ├── pyproject.toml - └── mymodule.py - -""" - -import itertools -import os -from fnmatch import fnmatchcase -from glob import glob -from pathlib import Path -from typing import TYPE_CHECKING -from typing import Callable, Dict, Iterator, Iterable, List, Optional, Tuple, Union - -import _distutils_hack.override # noqa: F401 - -from distutils import log -from distutils.util import convert_path - -_Path = Union[str, os.PathLike] -_Filter = Callable[[str], bool] -StrIter = Iterator[str] - -chain_iter = itertools.chain.from_iterable - -if TYPE_CHECKING: - from setuptools import Distribution # noqa - - -def _valid_name(path: _Path) -> bool: - # Ignore invalid names that cannot be imported directly - return os.path.basename(path).isidentifier() - - -class _Finder: - """Base class that exposes functionality for module/package finders""" - - ALWAYS_EXCLUDE: Tuple[str, ...] = () - DEFAULT_EXCLUDE: Tuple[str, ...] = () - - @classmethod - def find( - cls, - where: _Path = '.', - exclude: Iterable[str] = (), - include: Iterable[str] = ('*',) - ) -> List[str]: - """Return a list of all Python items (packages or modules, depending on - the finder implementation) found within directory 'where'. - - 'where' is the root directory which will be searched. - It should be supplied as a "cross-platform" (i.e. URL-style) path; - it will be converted to the appropriate local path syntax. - - 'exclude' is a sequence of names to exclude; '*' can be used - as a wildcard in the names. - When finding packages, 'foo.*' will exclude all subpackages of 'foo' - (but not 'foo' itself). - - 'include' is a sequence of names to include. - If it's specified, only the named items will be included. - If it's not specified, all found items will be included. - 'include' can contain shell style wildcard patterns just like - 'exclude'. - """ - - exclude = exclude or cls.DEFAULT_EXCLUDE - return list( - cls._find_iter( - convert_path(str(where)), - cls._build_filter(*cls.ALWAYS_EXCLUDE, *exclude), - cls._build_filter(*include), - ) - ) - - @classmethod - def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter: - raise NotImplementedError - - @staticmethod - def _build_filter(*patterns: str) -> _Filter: - """ - Given a list of patterns, return a callable that will be true only if - the input matches at least one of the patterns. - """ - return lambda name: any(fnmatchcase(name, pat) for pat in patterns) - - -class PackageFinder(_Finder): - """ - Generate a list of all Python packages found within a directory - """ - - ALWAYS_EXCLUDE = ("ez_setup", "*__pycache__") - - @classmethod - def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter: - """ - All the packages found in 'where' that pass the 'include' filter, but - not the 'exclude' filter. - """ - for root, dirs, files in os.walk(str(where), followlinks=True): - # Copy dirs to iterate over it, then empty dirs. - all_dirs = dirs[:] - dirs[:] = [] - - for dir in all_dirs: - full_path = os.path.join(root, dir) - rel_path = os.path.relpath(full_path, where) - package = rel_path.replace(os.path.sep, '.') - - # Skip directory trees that are not valid packages - if '.' in dir or not cls._looks_like_package(full_path, package): - continue - - # Should this package be included? - if include(package) and not exclude(package): - yield package - - # Keep searching subdirectories, as there may be more packages - # down there, even if the parent was excluded. - dirs.append(dir) - - @staticmethod - def _looks_like_package(path: _Path, _package_name: str) -> bool: - """Does a directory look like a package?""" - return os.path.isfile(os.path.join(path, '__init__.py')) - - -class PEP420PackageFinder(PackageFinder): - @staticmethod - def _looks_like_package(_path: _Path, _package_name: str) -> bool: - return True - - -class ModuleFinder(_Finder): - """Find isolated Python modules. - This function will **not** recurse subdirectories. - """ - - @classmethod - def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter: - for file in glob(os.path.join(where, "*.py")): - module, _ext = os.path.splitext(os.path.basename(file)) - - if not cls._looks_like_module(module): - continue - - if include(module) and not exclude(module): - yield module - - _looks_like_module = staticmethod(_valid_name) - - -# We have to be extra careful in the case of flat layout to not include files -# and directories not meant for distribution (e.g. tool-related) - - -class FlatLayoutPackageFinder(PEP420PackageFinder): - _EXCLUDE = ( - "ci", - "bin", - "doc", - "docs", - "documentation", - "manpages", - "news", - "changelog", - "test", - "tests", - "unit_test", - "unit_tests", - "example", - "examples", - "scripts", - "tools", - "util", - "utils", - "python", - "build", - "dist", - "venv", - "env", - "requirements", - # ---- Task runners / Build tools ---- - "tasks", # invoke - "fabfile", # fabric - "site_scons", # SCons - # ---- Other tools ---- - "benchmark", - "benchmarks", - "exercise", - "exercises", - # ---- Hidden directories/Private packages ---- - "[._]*", - ) - - DEFAULT_EXCLUDE = tuple(chain_iter((p, f"{p}.*") for p in _EXCLUDE)) - """Reserved package names""" - - @staticmethod - def _looks_like_package(_path: _Path, package_name: str) -> bool: - names = package_name.split('.') - # Consider PEP 561 - root_pkg_is_valid = names[0].isidentifier() or names[0].endswith("-stubs") - return root_pkg_is_valid and all(name.isidentifier() for name in names[1:]) - - -class FlatLayoutModuleFinder(ModuleFinder): - DEFAULT_EXCLUDE = ( - "setup", - "conftest", - "test", - "tests", - "example", - "examples", - "build", - # ---- Task runners ---- - "toxfile", - "noxfile", - "pavement", - "dodo", - "tasks", - "fabfile", - # ---- Other tools ---- - "[Ss][Cc]onstruct", # SCons - "conanfile", # Connan: C/C++ build tool - "manage", # Django - "benchmark", - "benchmarks", - "exercise", - "exercises", - # ---- Hidden files/Private modules ---- - "[._]*", - ) - """Reserved top-level module names""" - - -def _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]: - nested = PEP420PackageFinder.find(pkg_dir) - return [root_pkg] + [".".join((root_pkg, n)) for n in nested] - - -class ConfigDiscovery: - """Fill-in metadata and options that can be automatically derived - (from other metadata/options, the file system or conventions) - """ - - def __init__(self, distribution: "Distribution"): - self.dist = distribution - self._called = False - self._disabled = False - self._skip_ext_modules = False - - def _disable(self): - """Internal API to disable automatic discovery""" - self._disabled = True - - def _ignore_ext_modules(self): - """Internal API to disregard ext_modules. - - Normally auto-discovery would not be triggered if ``ext_modules`` are set - (this is done for backward compatibility with existing packages relying on - ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function - to ignore given ``ext_modules`` and proceed with the auto-discovery if - ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml - metadata). - """ - self._skip_ext_modules = True - - @property - def _root_dir(self) -> _Path: - # The best is to wait until `src_root` is set in dist, before using _root_dir. - return self.dist.src_root or os.curdir - - @property - def _package_dir(self) -> Dict[str, str]: - if self.dist.package_dir is None: - return {} - return self.dist.package_dir - - def __call__(self, force=False, name=True, ignore_ext_modules=False): - """Automatically discover missing configuration fields - and modifies the given ``distribution`` object in-place. - - Note that by default this will only have an effect the first time the - ``ConfigDiscovery`` object is called. - - To repeatedly invoke automatic discovery (e.g. when the project - directory changes), please use ``force=True`` (or create a new - ``ConfigDiscovery`` instance). - """ - if force is False and (self._called or self._disabled): - # Avoid overhead of multiple calls - return - - self._analyse_package_layout(ignore_ext_modules) - if name: - self.analyse_name() # depends on ``packages`` and ``py_modules`` - - self._called = True - - def _explicitly_specified(self, ignore_ext_modules: bool) -> bool: - """``True`` if the user has specified some form of package/module listing""" - ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules - ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules) - return ( - self.dist.packages is not None - or self.dist.py_modules is not None - or ext_modules - or hasattr(self.dist, "configuration") and self.dist.configuration - # ^ Some projects use numpy.distutils.misc_util.Configuration - ) - - def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool: - if self._explicitly_specified(ignore_ext_modules): - # For backward compatibility, just try to find modules/packages - # when nothing is given - return True - - log.debug( - "No `packages` or `py_modules` configuration, performing " - "automatic discovery." - ) - - return ( - self._analyse_explicit_layout() - or self._analyse_src_layout() - # flat-layout is the trickiest for discovery so it should be last - or self._analyse_flat_layout() - ) - - def _analyse_explicit_layout(self) -> bool: - """The user can explicitly give a package layout via ``package_dir``""" - package_dir = self._package_dir.copy() # don't modify directly - package_dir.pop("", None) # This falls under the "src-layout" umbrella - root_dir = self._root_dir - - if not package_dir: - return False - - log.debug(f"`explicit-layout` detected -- analysing {package_dir}") - pkgs = chain_iter( - _find_packages_within(pkg, os.path.join(root_dir, parent_dir)) - for pkg, parent_dir in package_dir.items() - ) - self.dist.packages = list(pkgs) - log.debug(f"discovered packages -- {self.dist.packages}") - return True - - def _analyse_src_layout(self) -> bool: - """Try to find all packages or modules under the ``src`` directory - (or anything pointed by ``package_dir[""]``). - - The "src-layout" is relatively safe for automatic discovery. - We assume that everything within is meant to be included in the - distribution. - - If ``package_dir[""]`` is not given, but the ``src`` directory exists, - this function will set ``package_dir[""] = "src"``. - """ - package_dir = self._package_dir - src_dir = os.path.join(self._root_dir, package_dir.get("", "src")) - if not os.path.isdir(src_dir): - return False - - log.debug(f"`src-layout` detected -- analysing {src_dir}") - package_dir.setdefault("", os.path.basename(src_dir)) - self.dist.package_dir = package_dir # persist eventual modifications - self.dist.packages = PEP420PackageFinder.find(src_dir) - self.dist.py_modules = ModuleFinder.find(src_dir) - log.debug(f"discovered packages -- {self.dist.packages}") - log.debug(f"discovered py_modules -- {self.dist.py_modules}") - return True - - def _analyse_flat_layout(self) -> bool: - """Try to find all packages and modules under the project root. - - Since the ``flat-layout`` is more dangerous in terms of accidentally including - extra files/directories, this function is more conservative and will raise an - error if multiple packages or modules are found. - - This assumes that multi-package dists are uncommon and refuse to support that - use case in order to be able to prevent unintended errors. - """ - log.debug(f"`flat-layout` detected -- analysing {self._root_dir}") - return self._analyse_flat_packages() or self._analyse_flat_modules() - - def _analyse_flat_packages(self) -> bool: - self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir) - top_level = remove_nested_packages(remove_stubs(self.dist.packages)) - log.debug(f"discovered packages -- {self.dist.packages}") - self._ensure_no_accidental_inclusion(top_level, "packages") - return bool(top_level) - - def _analyse_flat_modules(self) -> bool: - self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir) - log.debug(f"discovered py_modules -- {self.dist.py_modules}") - self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules") - return bool(self.dist.py_modules) - - def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str): - if len(detected) > 1: - from inspect import cleandoc - from setuptools.errors import PackageDiscoveryError - - msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}. - - To avoid accidental inclusion of unwanted files or directories, - setuptools will not proceed with this build. - - If you are trying to create a single distribution with multiple {kind} - on purpose, you should not rely on automatic discovery. - Instead, consider the following options: - - 1. set up custom discovery (`find` directive with `include` or `exclude`) - 2. use a `src-layout` - 3. explicitly set `py_modules` or `packages` with a list of names - - To find more information, look for "package discovery" on setuptools docs. - """ - raise PackageDiscoveryError(cleandoc(msg)) - - def analyse_name(self): - """The packages/modules are the essential contribution of the author. - Therefore the name of the distribution can be derived from them. - """ - if self.dist.metadata.name or self.dist.name: - # get_name() is not reliable (can return "UNKNOWN") - return None - - log.debug("No `name` configuration, performing automatic discovery") - - name = ( - self._find_name_single_package_or_module() - or self._find_name_from_packages() - ) - if name: - self.dist.metadata.name = name - self.dist.name = name - - def _find_name_single_package_or_module(self) -> Optional[str]: - """Exactly one module or package""" - for field in ('packages', 'py_modules'): - items = getattr(self.dist, field, None) or [] - if items and len(items) == 1: - log.debug(f"Single module/package detected, name: {items[0]}") - return items[0] - - return None - - def _find_name_from_packages(self) -> Optional[str]: - """Try to find the root package that is not a PEP 420 namespace""" - if not self.dist.packages: - return None - - packages = remove_stubs(sorted(self.dist.packages, key=len)) - package_dir = self.dist.package_dir or {} - - parent_pkg = find_parent_package(packages, package_dir, self._root_dir) - if parent_pkg: - log.debug(f"Common parent package detected, name: {parent_pkg}") - return parent_pkg - - log.warn("No parent package detected, impossible to derive `name`") - return None - - -def remove_nested_packages(packages: List[str]) -> List[str]: - """Remove nested packages from a list of packages. - - >>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"]) - ['a'] - >>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"]) - ['a', 'b', 'c.d', 'g.h'] - """ - pkgs = sorted(packages, key=len) - top_level = pkgs[:] - size = len(pkgs) - for i, name in enumerate(reversed(pkgs)): - if any(name.startswith(f"{other}.") for other in top_level): - top_level.pop(size - i - 1) - - return top_level - - -def remove_stubs(packages: List[str]) -> List[str]: - """Remove type stubs (:pep:`561`) from a list of packages. - - >>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"]) - ['a', 'a.b', 'b'] - """ - return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")] - - -def find_parent_package( - packages: List[str], package_dir: Dict[str, str], root_dir: _Path -) -> Optional[str]: - """Find the parent package that is not a namespace.""" - packages = sorted(packages, key=len) - common_ancestors = [] - for i, name in enumerate(packages): - if not all(n.startswith(f"{name}.") for n in packages[i+1:]): - # Since packages are sorted by length, this condition is able - # to find a list of all common ancestors. - # When there is divergence (e.g. multiple root packages) - # the list will be empty - break - common_ancestors.append(name) - - for name in common_ancestors: - pkg_path = find_package_path(name, package_dir, root_dir) - init = os.path.join(pkg_path, "__init__.py") - if os.path.isfile(init): - return name - - return None - - -def find_package_path(name: str, package_dir: Dict[str, str], root_dir: _Path) -> str: - """Given a package name, return the path where it should be found on - disk, considering the ``package_dir`` option. - - >>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './root/is/nested/my/pkg' - - >>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './root/is/nested/pkg' - - >>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './root/is/nested' - - >>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './other/pkg' - """ - parts = name.split(".") - for i in range(len(parts), 0, -1): - # Look backwards, the most specific package_dir first - partial_name = ".".join(parts[:i]) - if partial_name in package_dir: - parent = package_dir[partial_name] - return os.path.join(root_dir, parent, *parts[i:]) - - parent = package_dir.get("") or "" - return os.path.join(root_dir, *parent.split("/"), *parts) - - -def construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]: - parent_pkgs = remove_nested_packages(packages) - prefix = Path(package_path).parts - return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs} diff --git a/spaces/tomandandy/MusicGen3/tests/data/__init__.py b/spaces/tomandandy/MusicGen3/tests/data/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/tomandandy/MusicGen3/tests/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/README.md deleted file mode 100644 index da3559bbe8395fd9f84d803ce818a230f5721365..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# **Y**ou **O**nly **L**ook **A**t **C**oefficien**T**s - - - -``` - ██╗ ██╗ ██████╗ ██╗ █████╗ ██████╗████████╗ - ╚██╗ ██╔╝██╔═══██╗██║ ██╔══██╗██╔════╝╚══██╔══╝ - ╚████╔╝ ██║ ██║██║ ███████║██║ ██║ - ╚██╔╝ ██║ ██║██║ ██╔══██║██║ ██║ - ██║ ╚██████╔╝███████╗██║ ██║╚██████╗ ██║ - ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ -``` - -A simple, fully convolutional model for real-time instance segmentation. This is the code for our paper: - -- [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) - - -For a real-time demo, check out our ICCV video: -[![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/0pMfmo8qfpQ/0.jpg)](https://www.youtube.com/watch?v=0pMfmo8qfpQ) - -## Evaluation - -Here are our YOLACT models along with their FPS on a Titan Xp and mAP on COCO's `val`: - -| Image Size | GPU x BS | Backbone | *FPS | mAP | Weights | Configs | Download | -|:----------:|:--------:|:-------------:|:-----:|:----:|:-------:|:------:|:--------:| -| 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_1x8_coco.py) |[model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco_20200908-f38d58df.pth) | -| 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_8x8_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | -| 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r101_1x8_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | - -*Note: The FPS is evaluated by the [original implementation](https://github.com/dbolya/yolact). When calculating FPS, only the model inference time is taken into account. Data loading and post-processing operations such as converting masks to RLE code, generating COCO JSON results, image rendering are not included. - -## Training - -All the aforementioned models are trained with a single GPU. It typically takes ~12GB VRAM when using resnet-101 as the backbone. If you want to try multiple GPUs training, you may have to modify the configuration files accordingly, such as adjusting the training schedule and freezing batch norm. - -```Shell -# Trains using the resnet-101 backbone with a batch size of 8 on a single GPU. -./tools/dist_train.sh configs/yolact/yolact_r101.py 1 -``` - -## Testing - -Please refer to [mmdetection/docs/getting_started.md](https://github.com/open-mmlab/mmdetection/blob/master/docs/getting_started.md#inference-with-pretrained-models). - -## Citation - -If you use YOLACT or this code base in your work, please cite - -```latex -@inproceedings{yolact-iccv2019, - author = {Daniel Bolya and Chong Zhou and Fanyi Xiao and Yong Jae Lee}, - title = {YOLACT: {Real-time} Instance Segmentation}, - booktitle = {ICCV}, - year = {2019}, -} -``` - - diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/data/imagenet.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/data/imagenet.py deleted file mode 100644 index 1c473f9c6965b22315dbb289eff8247c71bdc790..0000000000000000000000000000000000000000 --- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/data/imagenet.py +++ /dev/null @@ -1,394 +0,0 @@ -import os, yaml, pickle, shutil, tarfile, glob -import cv2 -import albumentations -import PIL -import numpy as np -import torchvision.transforms.functional as TF -from omegaconf import OmegaConf -from functools import partial -from PIL import Image -from tqdm import tqdm -from torch.utils.data import Dataset, Subset - -import taming.data.utils as tdu -from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve -from taming.data.imagenet import ImagePaths - -from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light - - -def synset2idx(path_to_yaml="data/index_synset.yaml"): - with open(path_to_yaml) as f: - di2s = yaml.load(f) - return dict((v,k) for k,v in di2s.items()) - - -class ImageNetBase(Dataset): - def __init__(self, config=None): - self.config = config or OmegaConf.create() - if not type(self.config)==dict: - self.config = OmegaConf.to_container(self.config) - self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) - self.process_images = True # if False we skip loading & processing images and self.data contains filepaths - self._prepare() - self._prepare_synset_to_human() - self._prepare_idx_to_synset() - self._prepare_human_to_integer_label() - self._load() - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - return self.data[i] - - def _prepare(self): - raise NotImplementedError() - - def _filter_relpaths(self, relpaths): - ignore = set([ - "n06596364_9591.JPEG", - ]) - relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] - if "sub_indices" in self.config: - indices = str_to_indices(self.config["sub_indices"]) - synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings - self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) - files = [] - for rpath in relpaths: - syn = rpath.split("/")[0] - if syn in synsets: - files.append(rpath) - return files - else: - return relpaths - - def _prepare_synset_to_human(self): - SIZE = 2655750 - URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" - self.human_dict = os.path.join(self.root, "synset_human.txt") - if (not os.path.exists(self.human_dict) or - not os.path.getsize(self.human_dict)==SIZE): - download(URL, self.human_dict) - - def _prepare_idx_to_synset(self): - URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" - self.idx2syn = os.path.join(self.root, "index_synset.yaml") - if (not os.path.exists(self.idx2syn)): - download(URL, self.idx2syn) - - def _prepare_human_to_integer_label(self): - URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" - self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") - if (not os.path.exists(self.human2integer)): - download(URL, self.human2integer) - with open(self.human2integer, "r") as f: - lines = f.read().splitlines() - assert len(lines) == 1000 - self.human2integer_dict = dict() - for line in lines: - value, key = line.split(":") - self.human2integer_dict[key] = int(value) - - def _load(self): - with open(self.txt_filelist, "r") as f: - self.relpaths = f.read().splitlines() - l1 = len(self.relpaths) - self.relpaths = self._filter_relpaths(self.relpaths) - print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) - - self.synsets = [p.split("/")[0] for p in self.relpaths] - self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] - - unique_synsets = np.unique(self.synsets) - class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) - if not self.keep_orig_class_label: - self.class_labels = [class_dict[s] for s in self.synsets] - else: - self.class_labels = [self.synset2idx[s] for s in self.synsets] - - with open(self.human_dict, "r") as f: - human_dict = f.read().splitlines() - human_dict = dict(line.split(maxsplit=1) for line in human_dict) - - self.human_labels = [human_dict[s] for s in self.synsets] - - labels = { - "relpath": np.array(self.relpaths), - "synsets": np.array(self.synsets), - "class_label": np.array(self.class_labels), - "human_label": np.array(self.human_labels), - } - - if self.process_images: - self.size = retrieve(self.config, "size", default=256) - self.data = ImagePaths(self.abspaths, - labels=labels, - size=self.size, - random_crop=self.random_crop, - ) - else: - self.data = self.abspaths - - -class ImageNetTrain(ImageNetBase): - NAME = "ILSVRC2012_train" - URL = "http://www.image-net.org/challenges/LSVRC/2012/" - AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" - FILES = [ - "ILSVRC2012_img_train.tar", - ] - SIZES = [ - 147897477120, - ] - - def __init__(self, process_images=True, data_root=None, **kwargs): - self.process_images = process_images - self.data_root = data_root - super().__init__(**kwargs) - - def _prepare(self): - if self.data_root: - self.root = os.path.join(self.data_root, self.NAME) - else: - cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) - self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - - self.datadir = os.path.join(self.root, "data") - self.txt_filelist = os.path.join(self.root, "filelist.txt") - self.expected_length = 1281167 - self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", - default=True) - if not tdu.is_prepared(self.root): - # prep - print("Preparing dataset {} in {}".format(self.NAME, self.root)) - - datadir = self.datadir - if not os.path.exists(datadir): - path = os.path.join(self.root, self.FILES[0]) - if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: - import academictorrents as at - atpath = at.get(self.AT_HASH, datastore=self.root) - assert atpath == path - - print("Extracting {} to {}".format(path, datadir)) - os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, "r:") as tar: - tar.extractall(path=datadir) - - print("Extracting sub-tars.") - subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) - for subpath in tqdm(subpaths): - subdir = subpath[:-len(".tar")] - os.makedirs(subdir, exist_ok=True) - with tarfile.open(subpath, "r:") as tar: - tar.extractall(path=subdir) - - filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) - filelist = [os.path.relpath(p, start=datadir) for p in filelist] - filelist = sorted(filelist) - filelist = "\n".join(filelist)+"\n" - with open(self.txt_filelist, "w") as f: - f.write(filelist) - - tdu.mark_prepared(self.root) - - -class ImageNetValidation(ImageNetBase): - NAME = "ILSVRC2012_validation" - URL = "http://www.image-net.org/challenges/LSVRC/2012/" - AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" - VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" - FILES = [ - "ILSVRC2012_img_val.tar", - "validation_synset.txt", - ] - SIZES = [ - 6744924160, - 1950000, - ] - - def __init__(self, process_images=True, data_root=None, **kwargs): - self.data_root = data_root - self.process_images = process_images - super().__init__(**kwargs) - - def _prepare(self): - if self.data_root: - self.root = os.path.join(self.data_root, self.NAME) - else: - cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) - self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - self.datadir = os.path.join(self.root, "data") - self.txt_filelist = os.path.join(self.root, "filelist.txt") - self.expected_length = 50000 - self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", - default=False) - if not tdu.is_prepared(self.root): - # prep - print("Preparing dataset {} in {}".format(self.NAME, self.root)) - - datadir = self.datadir - if not os.path.exists(datadir): - path = os.path.join(self.root, self.FILES[0]) - if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: - import academictorrents as at - atpath = at.get(self.AT_HASH, datastore=self.root) - assert atpath == path - - print("Extracting {} to {}".format(path, datadir)) - os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, "r:") as tar: - tar.extractall(path=datadir) - - vspath = os.path.join(self.root, self.FILES[1]) - if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: - download(self.VS_URL, vspath) - - with open(vspath, "r") as f: - synset_dict = f.read().splitlines() - synset_dict = dict(line.split() for line in synset_dict) - - print("Reorganizing into synset folders") - synsets = np.unique(list(synset_dict.values())) - for s in synsets: - os.makedirs(os.path.join(datadir, s), exist_ok=True) - for k, v in synset_dict.items(): - src = os.path.join(datadir, k) - dst = os.path.join(datadir, v) - shutil.move(src, dst) - - filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) - filelist = [os.path.relpath(p, start=datadir) for p in filelist] - filelist = sorted(filelist) - filelist = "\n".join(filelist)+"\n" - with open(self.txt_filelist, "w") as f: - f.write(filelist) - - tdu.mark_prepared(self.root) - - - -class ImageNetSR(Dataset): - def __init__(self, size=None, - degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., - random_crop=True): - """ - Imagenet Superresolution Dataloader - Performs following ops in order: - 1. crops a crop of size s from image either as random or center crop - 2. resizes crop to size with cv2.area_interpolation - 3. degrades resized crop with degradation_fn - - :param size: resizing to size after cropping - :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light - :param downscale_f: Low Resolution Downsample factor - :param min_crop_f: determines crop size s, - where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) - :param max_crop_f: "" - :param data_root: - :param random_crop: - """ - self.base = self.get_base() - assert size - assert (size / downscale_f).is_integer() - self.size = size - self.LR_size = int(size / downscale_f) - self.min_crop_f = min_crop_f - self.max_crop_f = max_crop_f - assert(max_crop_f <= 1.) - self.center_crop = not random_crop - - self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) - - self.pil_interpolation = False # gets reset later if incase interp_op is from pillow - - if degradation == "bsrgan": - self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) - - elif degradation == "bsrgan_light": - self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) - - else: - interpolation_fn = { - "cv_nearest": cv2.INTER_NEAREST, - "cv_bilinear": cv2.INTER_LINEAR, - "cv_bicubic": cv2.INTER_CUBIC, - "cv_area": cv2.INTER_AREA, - "cv_lanczos": cv2.INTER_LANCZOS4, - "pil_nearest": PIL.Image.NEAREST, - "pil_bilinear": PIL.Image.BILINEAR, - "pil_bicubic": PIL.Image.BICUBIC, - "pil_box": PIL.Image.BOX, - "pil_hamming": PIL.Image.HAMMING, - "pil_lanczos": PIL.Image.LANCZOS, - }[degradation] - - self.pil_interpolation = degradation.startswith("pil_") - - if self.pil_interpolation: - self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) - - else: - self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, - interpolation=interpolation_fn) - - def __len__(self): - return len(self.base) - - def __getitem__(self, i): - example = self.base[i] - image = Image.open(example["file_path_"]) - - if not image.mode == "RGB": - image = image.convert("RGB") - - image = np.array(image).astype(np.uint8) - - min_side_len = min(image.shape[:2]) - crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) - crop_side_len = int(crop_side_len) - - if self.center_crop: - self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) - - else: - self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) - - image = self.cropper(image=image)["image"] - image = self.image_rescaler(image=image)["image"] - - if self.pil_interpolation: - image_pil = PIL.Image.fromarray(image) - LR_image = self.degradation_process(image_pil) - LR_image = np.array(LR_image).astype(np.uint8) - - else: - LR_image = self.degradation_process(image=image)["image"] - - example["image"] = (image/127.5 - 1.0).astype(np.float32) - example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) - - return example - - -class ImageNetSRTrain(ImageNetSR): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_base(self): - with open("data/imagenet_train_hr_indices.p", "rb") as f: - indices = pickle.load(f) - dset = ImageNetTrain(process_images=False,) - return Subset(dset, indices) - - -class ImageNetSRValidation(ImageNetSR): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_base(self): - with open("data/imagenet_val_hr_indices.p", "rb") as f: - indices = pickle.load(f) - dset = ImageNetValidation(process_images=False,) - return Subset(dset, indices) diff --git a/spaces/tovaru/vits-for-ba/models.py b/spaces/tovaru/vits-for-ba/models.py deleted file mode 100644 index f5acdeb2bedd47897348407c0ae55c9a160da881..0000000000000000000000000000000000000000 --- a/spaces/tovaru/vits-for-ba/models.py +++ /dev/null @@ -1,534 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/triggah61/chingu-music/tests/modules/test_conv.py b/spaces/triggah61/chingu-music/tests/modules/test_conv.py deleted file mode 100644 index 28fbc4f1a0ebaf41b56947b767958ae696e75eec..0000000000000000000000000000000000000000 --- a/spaces/triggah61/chingu-music/tests/modules/test_conv.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product -import math -import random - -import pytest -import torch -from torch import nn - -from audiocraft.modules import ( - NormConv1d, - NormConvTranspose1d, - StreamableConv1d, - StreamableConvTranspose1d, - pad1d, - unpad1d, -) - - -def test_get_extra_padding_for_conv1d(): - # TODO: Implement me! - pass - - -def test_pad1d_zeros(): - x = torch.randn(1, 1, 20) - - xp1 = pad1d(x, (0, 5), mode='constant', value=0.) - assert xp1.shape[-1] == 25 - xp2 = pad1d(x, (5, 5), mode='constant', value=0.) - assert xp2.shape[-1] == 30 - xp3 = pad1d(x, (0, 0), mode='constant', value=0.) - assert xp3.shape[-1] == 20 - xp4 = pad1d(x, (10, 30), mode='constant', value=0.) - assert xp4.shape[-1] == 60 - - with pytest.raises(AssertionError): - pad1d(x, (-1, 0), mode='constant', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (0, -1), mode='constant', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (-1, -1), mode='constant', value=0.) - - -def test_pad1d_reflect(): - x = torch.randn(1, 1, 20) - - xp1 = pad1d(x, (0, 5), mode='reflect', value=0.) - assert xp1.shape[-1] == 25 - xp2 = pad1d(x, (5, 5), mode='reflect', value=0.) - assert xp2.shape[-1] == 30 - xp3 = pad1d(x, (0, 0), mode='reflect', value=0.) - assert xp3.shape[-1] == 20 - xp4 = pad1d(x, (10, 30), mode='reflect', value=0.) - assert xp4.shape[-1] == 60 - - with pytest.raises(AssertionError): - pad1d(x, (-1, 0), mode='reflect', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (0, -1), mode='reflect', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (-1, -1), mode='reflect', value=0.) - - -def test_unpad1d(): - x = torch.randn(1, 1, 20) - - u1 = unpad1d(x, (5, 5)) - assert u1.shape[-1] == 10 - u2 = unpad1d(x, (0, 5)) - assert u2.shape[-1] == 15 - u3 = unpad1d(x, (5, 0)) - assert u3.shape[-1] == 15 - u4 = unpad1d(x, (0, 0)) - assert u4.shape[-1] == x.shape[-1] - - with pytest.raises(AssertionError): - unpad1d(x, (-1, 0)) - - with pytest.raises(AssertionError): - unpad1d(x, (0, -1)) - - with pytest.raises(AssertionError): - unpad1d(x, (-1, -1)) - - -class TestNormConv1d: - - def test_norm_conv1d_modules(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out, kernel_size, stride = 1, 4, 1 - expected_out_length = int((T - kernel_size) / stride + 1) - wn_conv = NormConv1d(C, 1, kernel_size=4, norm='weight_norm') - gn_conv = NormConv1d(C, 1, kernel_size=4, norm='time_group_norm') - nn_conv = NormConv1d(C, 1, kernel_size=4, norm='none') - - assert isinstance(wn_conv.norm, nn.Identity) - assert isinstance(wn_conv.conv, nn.Conv1d) - - assert isinstance(gn_conv.norm, nn.GroupNorm) - assert isinstance(gn_conv.conv, nn.Conv1d) - - assert isinstance(nn_conv.norm, nn.Identity) - assert isinstance(nn_conv.conv, nn.Conv1d) - - for conv_layer in [wn_conv, gn_conv, nn_conv]: - out = conv_layer(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestNormConvTranspose1d: - - def test_normalizations(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out, kernel_size, stride = 1, 4, 1 - expected_out_length = (T - 1) * stride + (kernel_size - 1) + 1 - - wn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='weight_norm') - gn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='time_group_norm') - nn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='none') - - assert isinstance(wn_convtr.norm, nn.Identity) - assert isinstance(wn_convtr.convtr, nn.ConvTranspose1d) - - assert isinstance(gn_convtr.norm, nn.GroupNorm) - assert isinstance(gn_convtr.convtr, nn.ConvTranspose1d) - - assert isinstance(nn_convtr.norm, nn.Identity) - assert isinstance(nn_convtr.convtr, nn.ConvTranspose1d) - - for convtr_layer in [wn_convtr, gn_convtr, nn_convtr]: - out = convtr_layer(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestStreamableConv1d: - - def get_streamable_conv1d_output_length(self, length, kernel_size, stride, dilation): - # StreamableConv1d internally pads to make sure that the last window is full - padding_total = (kernel_size - 1) * dilation - (stride - 1) - n_frames = (length - kernel_size + padding_total) / stride + 1 - ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) - return ideal_length // stride - - def test_streamable_conv1d(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - C_out = 1 - - # conv params are [(kernel_size, stride, dilation)] - conv_params = [(4, 1, 1), (4, 2, 1), (3, 1, 3), (10, 5, 1), (3, 2, 3)] - for causal, (kernel_size, stride, dilation) in product([False, True], conv_params): - expected_out_length = self.get_streamable_conv1d_output_length(T, kernel_size, stride, dilation) - sconv = StreamableConv1d(C, C_out, kernel_size=kernel_size, stride=stride, dilation=dilation, causal=causal) - out = sconv(t0) - assert isinstance(out, torch.Tensor) - print(list(out.shape), [N, C_out, expected_out_length]) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestStreamableConvTranspose1d: - - def get_streamable_convtr1d_output_length(self, length, kernel_size, stride): - padding_total = (kernel_size - stride) - return (length - 1) * stride - padding_total + (kernel_size - 1) + 1 - - def test_streamable_convtr1d(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out = 1 - - with pytest.raises(AssertionError): - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=False, trim_right_ratio=0.5) - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=-1.) - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=2) - - # causal params are [(causal, trim_right)] - causal_params = [(False, 1.0), (True, 1.0), (True, 0.5), (True, 0.0)] - # conv params are [(kernel_size, stride)] - conv_params = [(4, 1), (4, 2), (3, 1), (10, 5)] - for ((causal, trim_right_ratio), (kernel_size, stride)) in product(causal_params, conv_params): - expected_out_length = self.get_streamable_convtr1d_output_length(T, kernel_size, stride) - sconvtr = StreamableConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, - causal=causal, trim_right_ratio=trim_right_ratio) - out = sconvtr(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] diff --git a/spaces/trl-lib/stack-llama/README.md b/spaces/trl-lib/stack-llama/README.md deleted file mode 100644 index f37e9acc2c9af4bfe8f0d623f573d28d6c44d1b1..0000000000000000000000000000000000000000 --- a/spaces/trl-lib/stack-llama/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: StackLLaMa -emoji: 🦙 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -duplicated_from: philschmid/igel-playground ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ulysses115/Nogizaka46-so/inference_main.py b/spaces/ulysses115/Nogizaka46-so/inference_main.py deleted file mode 100644 index b6c9ff8fc771c1bada0b04d59f0af4c87a524089..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/Nogizaka46-so/inference_main.py +++ /dev/null @@ -1,137 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - - - -def main(): - import argparse - - parser = argparse.ArgumentParser(description='sovits4 inference') - - # 一定要设置的部分 - parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径') - parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径') - parser.add_argument('-cl', '--clip', type=float, default=0, help='音频强制切片,默认0为自动切片,单位为秒/s') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称') - - # 可选项部分 - parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可') - parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒') - parser.add_argument('-fmp', '--f0_mean_pooling', type=bool, default=False, help='是否对F0使用均值滤波器(池化),对部分哑音有改善。注意,启动该选项会导致推理速度下降,默认关闭') - parser.add_argument('-eh', '--enhance', type=bool, default=False, help='是否使用NSF_HIFIGAN增强器,该选项对部分训练集少的模型有一定的音质增强效果,但是对训练好的模型有反面效果,默认关闭') - - # 不用动的部分 - parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式') - parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭') - parser.add_argument('-eak', '--enhancer_adaptive_key', type=int, default=0, help='使增强器适应更高的音域(单位为半音数)|默认为0') - - args = parser.parse_args() - - clean_names = args.clean_names - trans = args.trans - spk_list = args.spk_list - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - clip = args.clip - lg = args.linear_gradient - lgr = args.linear_gradient_retain - F0_mean_pooling = args.f0_mean_pooling - enhance = args.enhance - enhancer_adaptive_key = args.enhancer_adaptive_key - - svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path,enhance) - infer_tool.mkdir(["raw", "results"]) - - infer_tool.fill_a_to_b(trans, clean_names) - for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip*audio_sr) - lg_size = int(lg*audio_sr) - lg_size_r = int(lg_size*lgr) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - audio.extend(list(infer_tool.pad_array(_audio, length))) - continue - if per_size != 0: - datas = infer_tool.split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length - if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, dat, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = svc_model.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - F0_mean_pooling = F0_mean_pooling, - enhancer_adaptive_key = enhancer_adaptive_key - ) - _audio = out_audio.cpu().numpy() - pad_len = int(svc_model.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - _audio = infer_tool.pad_array(_audio, per_length) - if lg_size!=0 and k!=0: - lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:] - lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size] - lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size] - audio.extend(lg_pre) - _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:] - audio.extend(list(_audio)) - key = "auto" if auto_predict_f0 else f"{tran}key" - cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" - res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) - svc_model.clear_empty() - -if __name__ == '__main__': - main() diff --git a/spaces/uragankatrrin/MHN-React/mhnreact/__init__.py b/spaces/uragankatrrin/MHN-React/mhnreact/__init__.py deleted file mode 100644 index f102a9cadfa89ce554b3b26d2b90bfba2e05273c..0000000000000000000000000000000000000000 --- a/spaces/uragankatrrin/MHN-React/mhnreact/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.0.1" diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Crack No Cd Para Max Payne 1 [PORTABLE] - Download and Install the Latest Update for Free.md b/spaces/usbethFlerru/sovits-modelsV2/example/Crack No Cd Para Max Payne 1 [PORTABLE] - Download and Install the Latest Update for Free.md deleted file mode 100644 index 7d6bb894012872e74ddba3894dcb65674f6ff58b..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Crack No Cd Para Max Payne 1 [PORTABLE] - Download and Install the Latest Update for Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Archicad 14 Crack Windows 7 32 Bit


      Download Ziphttps://urlcod.com/2uyUbc



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/blank_frame_reroll.py b/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/blank_frame_reroll.py deleted file mode 100644 index 44693c84a4abc3f2b4e2503de9fcab3e5626e305..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/blank_frame_reroll.py +++ /dev/null @@ -1,24 +0,0 @@ -from .generate import generate -#WebUI -from modules.shared import opts, cmd_opts, state - -def blank_frame_reroll(image, args, root, frame_idx): - patience = 10 - print("Blank frame detected! If you don't have the NSFW filter enabled, this may be due to a glitch!") - if args.reroll_blank_frames == 'reroll': - while not image.getbbox(): - print("Rerolling with +1 seed...") - args.seed += 1 - image = generate(args, root, frame_idx) - patience -= 1 - if patience == 0: - print("Rerolling with +1 seed failed for 10 iterations! Try setting webui's precision to 'full' and if it fails, please report this to the devs! Interrupting...") - state.interrupted = True - state.current_image = image - return None - elif args.reroll_blank_frames == 'interrupt': - print("Interrupting to save your eyes...") - state.interrupted = True - state.current_image = image - return None - return image \ No newline at end of file diff --git a/spaces/ussrcccp/White-box-Cartoonization/README.md b/spaces/ussrcccp/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/ussrcccp/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/versae/gradio-blocks-rest-api/README.md b/spaces/versae/gradio-blocks-rest-api/README.md deleted file mode 100644 index 648bfc5381d7bbfccf801854f8ccd4cd977363ca..0000000000000000000000000000000000000000 --- a/spaces/versae/gradio-blocks-rest-api/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gradio Blocks Rest Api -emoji: 🐨 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.0.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/vietvd/image-enhance/app.py b/spaces/vietvd/image-enhance/app.py deleted file mode 100644 index 28417cfe1e3723c859d5fb18cfd5cf0612585e6d..0000000000000000000000000000000000000000 --- a/spaces/vietvd/image-enhance/app.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -import shutil -import uuid -import cv2 -import gc -import gradio as gr -import torch -from basicsr.archs.rrdbnet_arch import RRDBNet -from gfpgan.utils import GFPGANer -from realesrgan.utils import RealESRGANer - -# download weights for RealESRGAN -if not os.path.exists('model_zoo/real/RealESRGAN_x4plus.pth'): - os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P model_zoo/real") -if not os.path.exists('model_zoo/gan/GFPGANv1.4.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P model_zoo/gan") -if not os.path.exists('model_zoo/swinir/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth'): - os.system('wget https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth -P model_zoo/swinir') - -def inference(img, scale): - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - model_path = 'model_zoo/real/RealESRGAN_x4plus.pth' - netscale = 4 - tile = 400 if torch.cuda.is_available() else 0 - dni_weight = None - # restorer - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - dni_weight=dni_weight, - model=model, - tile=tile, - tile_pad=10, - pre_pad=0, - half=False, #Use fp32 precision during inference. Default: fp16 (half precision). - gpu_id=None) #gpu device to use (default=None) can be 0,1,2 for multi-gpu - # background enhancer with RealESRGAN - os.makedirs('output', exist_ok=True) - if scale > 4: - scale = 4 # avoid too large scale value - try: - extension = os.path.splitext(os.path.basename(str(img)))[1] - img = cv2.imread(img, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - elif len(img.shape) == 2: # for gray inputs - img_mode = None - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - else: - img_mode = None - - h, w = img.shape[0:2] - if h < 300: - img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4) - - face_enhancer = GFPGANer( - model_path='model_zoo/gan/GFPGANv1.4.pth', upscale=scale, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - - if scale != 2: - interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4 - h, w = img.shape[0:2] - output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation) - - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - else: - extension = 'jpg' - - filename = str(uuid.uuid4()) - save_path = f'output/out_{filename}.{extension}' - cv2.imwrite(save_path, output) - - output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB) - return output, save_path - except Exception as error: - print('global exception', error) - return None, None - finally: - #clean_folder('output') - gc.collect() - if torch.cuda.is_available(): - torch.cuda.empty_cache() - -def clean_folder(folder): - for filename in os.listdir(folder): - file_path = os.path.join(folder, filename) - try: - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - except Exception as e: - print('Failed to delete %s. Reason: %s' % (file_path, e)) - -title = "Real Esrgan Restore Ai Face Restoration by appsgenz.com" -description = "" -article = "AppsGenz" -grApp = gr.Interface( - inference, [ - gr.Image(type="filepath", label="Input"), - gr.Number(label="Rescaling factor. Note max rescaling factor is 4", value=2), - ], [ - gr.Image(type="numpy", label="Output (The whole image)"), - gr.File(label="Download the output image") - ], - title=title, - description=description, - article=article) -grApp.queue(concurrency_count=2) -grApp.launch(share=False) \ No newline at end of file diff --git a/spaces/vladocar/3dfood/app.py b/spaces/vladocar/3dfood/app.py deleted file mode 100644 index 7f88a99617a5f4ab758c9144c5f4e1761e412794..0000000000000000000000000000000000000000 --- a/spaces/vladocar/3dfood/app.py +++ /dev/null @@ -1,11 +0,0 @@ -import gradio as gr - -article = """--- -This space was created using [SD Space Creator](https://huggingface.co/spaces/anzorq/sd-space-creator).""" - -gr.Interface.load( - name="models/vladocar/3dfood", - title="""3dfood""", - description="""Demo for 3dfood Stable Diffusion model.""", - article=article, - ).queue(concurrency_count=20).launch() diff --git a/spaces/wilmars/cluster-app/src/train.py b/spaces/wilmars/cluster-app/src/train.py deleted file mode 100644 index 82aadf0058cb418f843e34df34ac7c0059e75cf7..0000000000000000000000000000000000000000 --- a/spaces/wilmars/cluster-app/src/train.py +++ /dev/null @@ -1,30 +0,0 @@ -import pandas as pd -import matplotlib.pyplot as plt -from feature_engine.imputation import MeanMedianImputer -from sklearn.decomposition import PCA -from sklearn.cluster import KMeans -from sklearn.pipeline import Pipeline -from sklearn.model_selection import train_test_split -from utils import ScalerDf, Kmeans_, logger, load_config -import joblib - - -def train(data, config): - logger.info('Training pipeline') - pipeline_steps = [ - ('mean_inputer', MeanMedianImputer(imputation_method=config['imputation_method'], variables= ['MINIMUM_PAYMENTS', 'CREDIT_LIMIT'])), - ('standard', ScalerDf(method=config['scaler_method'])), - ('PCA', PCA(n_components=config['n_components'])), - ('Kmeans',Kmeans_(n_clusters=config['n_clusters'])) - ] - - cluster_pipeline = Pipeline(pipeline_steps) - - cluster_pipeline.fit(cc_data) - logger.info('Export Cluster Pipeline') - joblib.dump(cluster_pipeline, config['PIPELINE_PATH']) - -if __name__ == "__main__": - config = load_config('./', 'params.yaml') - cc_data = pd.read_csv(config['DATA_PATH'], index_col=config['INDEX_COL']) - train(cc_data, config) \ No newline at end of file diff --git a/spaces/wong26/faster-whisper-webui/src/vadParallel.py b/spaces/wong26/faster-whisper-webui/src/vadParallel.py deleted file mode 100644 index c2323c0b632c34014ac1fe7ac79141b5bd9c5731..0000000000000000000000000000000000000000 --- a/spaces/wong26/faster-whisper-webui/src/vadParallel.py +++ /dev/null @@ -1,298 +0,0 @@ -import multiprocessing -from queue import Empty -import threading -import time -from src.hooks.progressListener import ProgressListener -from src.vad import AbstractTranscription, TranscriptionConfig, get_audio_duration - -from multiprocessing import Pool, Queue - -from typing import Any, Dict, List, Union -import os - -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback - -class _ProgressListenerToQueue(ProgressListener): - def __init__(self, progress_queue: Queue): - self.progress_queue = progress_queue - self.progress_total = 0 - self.prev_progress = 0 - - def on_progress(self, current: Union[int, float], total: Union[int, float]): - delta = current - self.prev_progress - self.prev_progress = current - self.progress_total = total - self.progress_queue.put(delta) - - def on_finished(self): - if self.progress_total > self.prev_progress: - delta = self.progress_total - self.prev_progress - self.progress_queue.put(delta) - self.prev_progress = self.progress_total - -class ParallelContext: - def __init__(self, num_processes: int = None, auto_cleanup_timeout_seconds: float = None): - self.num_processes = num_processes - self.auto_cleanup_timeout_seconds = auto_cleanup_timeout_seconds - self.lock = threading.Lock() - - self.ref_count = 0 - self.pool = None - self.cleanup_timer = None - - def get_pool(self): - # Initialize pool lazily - if (self.pool is None): - context = multiprocessing.get_context('spawn') - self.pool = context.Pool(self.num_processes) - - self.ref_count = self.ref_count + 1 - - if (self.auto_cleanup_timeout_seconds is not None): - self._stop_auto_cleanup() - - return self.pool - - def return_pool(self, pool): - if (self.pool == pool and self.ref_count > 0): - self.ref_count = self.ref_count - 1 - - if (self.ref_count == 0): - if (self.auto_cleanup_timeout_seconds is not None): - self._start_auto_cleanup() - - def _start_auto_cleanup(self): - if (self.cleanup_timer is not None): - self.cleanup_timer.cancel() - self.cleanup_timer = threading.Timer(self.auto_cleanup_timeout_seconds, self._execute_cleanup) - self.cleanup_timer.start() - - print("Started auto cleanup of pool in " + str(self.auto_cleanup_timeout_seconds) + " seconds") - - def _stop_auto_cleanup(self): - if (self.cleanup_timer is not None): - self.cleanup_timer.cancel() - self.cleanup_timer = None - - print("Stopped auto cleanup of pool") - - def _execute_cleanup(self): - print("Executing cleanup of pool") - - if (self.ref_count == 0): - self.close() - - def close(self): - self._stop_auto_cleanup() - - if (self.pool is not None): - print("Closing pool of " + str(self.num_processes) + " processes") - self.pool.close() - self.pool.join() - self.pool = None - -class ParallelTranscriptionConfig(TranscriptionConfig): - def __init__(self, device_id: str, override_timestamps, initial_segment_index, copy: TranscriptionConfig = None): - super().__init__(copy.non_speech_strategy, copy.segment_padding_left, copy.segment_padding_right, copy.max_silent_period, copy.max_merge_size, copy.max_prompt_window, initial_segment_index) - self.device_id = device_id - self.override_timestamps = override_timestamps - -class ParallelTranscription(AbstractTranscription): - # Silero VAD typically takes about 3 seconds per minute, so there's no need to split the chunks - # into smaller segments than 2 minute (min 6 seconds per CPU core) - MIN_CPU_CHUNK_SIZE_SECONDS = 2 * 60 - - def __init__(self, sampling_rate: int = 16000): - super().__init__(sampling_rate=sampling_rate) - - def transcribe_parallel(self, transcription: AbstractTranscription, audio: str, whisperCallable: AbstractWhisperCallback, config: TranscriptionConfig, - cpu_device_count: int, gpu_devices: List[str], cpu_parallel_context: ParallelContext = None, gpu_parallel_context: ParallelContext = None, - progress_listener: ProgressListener = None): - total_duration = get_audio_duration(audio) - - # First, get the timestamps for the original audio - if (cpu_device_count > 1 and not transcription.is_transcribe_timestamps_fast()): - merged = self._get_merged_timestamps_parallel(transcription, audio, config, total_duration, cpu_device_count, cpu_parallel_context) - else: - timestamp_segments = transcription.get_transcribe_timestamps(audio, config, 0, total_duration) - merged = transcription.get_merged_timestamps(timestamp_segments, config, total_duration) - - # We must make sure the whisper model is downloaded - if (len(gpu_devices) > 1): - whisperCallable.model_container.ensure_downloaded() - - # Split into a list for each device - # TODO: Split by time instead of by number of chunks - merged_split = list(self._split(merged, len(gpu_devices))) - - # Parameters that will be passed to the transcribe function - parameters = [] - segment_index = config.initial_segment_index - - processing_manager = multiprocessing.Manager() - progress_queue = processing_manager.Queue() - - for i in range(len(gpu_devices)): - # Note that device_segment_list can be empty. But we will still create a process for it, - # as otherwise we run the risk of assigning the same device to multiple processes. - device_segment_list = list(merged_split[i]) if i < len(merged_split) else [] - device_id = gpu_devices[i] - - print("Device " + str(device_id) + " (index " + str(i) + ") has " + str(len(device_segment_list)) + " segments") - - # Create a new config with the given device ID - device_config = ParallelTranscriptionConfig(device_id, device_segment_list, segment_index, config) - segment_index += len(device_segment_list) - - progress_listener_to_queue = _ProgressListenerToQueue(progress_queue) - parameters.append([audio, whisperCallable, device_config, progress_listener_to_queue]); - - merged = { - 'text': '', - 'segments': [], - 'language': None - } - - created_context = False - - perf_start_gpu = time.perf_counter() - - # Spawn a separate process for each device - try: - if (gpu_parallel_context is None): - gpu_parallel_context = ParallelContext(len(gpu_devices)) - created_context = True - - # Get a pool of processes - pool = gpu_parallel_context.get_pool() - - # Run the transcription in parallel - results_async = pool.starmap_async(self.transcribe, parameters) - total_progress = 0 - - while not results_async.ready(): - try: - delta = progress_queue.get(timeout=5) # Set a timeout of 5 seconds - except Empty: - continue - - total_progress += delta - if progress_listener is not None: - progress_listener.on_progress(total_progress, total_duration) - - results = results_async.get() - - # Call the finished callback - if progress_listener is not None: - progress_listener.on_finished() - - for result in results: - # Merge the results - if (result['text'] is not None): - merged['text'] += result['text'] - if (result['segments'] is not None): - merged['segments'].extend(result['segments']) - if (result['language'] is not None): - merged['language'] = result['language'] - - finally: - # Return the pool to the context - if (gpu_parallel_context is not None): - gpu_parallel_context.return_pool(pool) - # Always close the context if we created it - if (created_context): - gpu_parallel_context.close() - - perf_end_gpu = time.perf_counter() - print("Parallel transcription took " + str(perf_end_gpu - perf_start_gpu) + " seconds") - - return merged - - def _get_merged_timestamps_parallel(self, transcription: AbstractTranscription, audio: str, config: TranscriptionConfig, total_duration: float, - cpu_device_count: int, cpu_parallel_context: ParallelContext = None): - parameters = [] - - chunk_size = max(total_duration / cpu_device_count, self.MIN_CPU_CHUNK_SIZE_SECONDS) - chunk_start = 0 - cpu_device_id = 0 - - perf_start_time = time.perf_counter() - - # Create chunks that will be processed on the CPU - while (chunk_start < total_duration): - chunk_end = min(chunk_start + chunk_size, total_duration) - - if (chunk_end - chunk_start < 1): - # No need to process chunks that are less than 1 second - break - - print("Parallel VAD: Executing chunk from " + str(chunk_start) + " to " + - str(chunk_end) + " on CPU device " + str(cpu_device_id)) - parameters.append([audio, config, chunk_start, chunk_end]); - - cpu_device_id += 1 - chunk_start = chunk_end - - created_context = False - - # Spawn a separate process for each device - try: - if (cpu_parallel_context is None): - cpu_parallel_context = ParallelContext(cpu_device_count) - created_context = True - - # Get a pool of processes - pool = cpu_parallel_context.get_pool() - - # Run the transcription in parallel. Note that transcription must be picklable. - results = pool.starmap(transcription.get_transcribe_timestamps, parameters) - - timestamps = [] - - # Flatten the results - for result in results: - timestamps.extend(result) - - merged = transcription.get_merged_timestamps(timestamps, config, total_duration) - - perf_end_time = time.perf_counter() - print("Parallel VAD processing took {} seconds".format(perf_end_time - perf_start_time)) - return merged - - finally: - # Return the pool to the context - if (cpu_parallel_context is not None): - cpu_parallel_context.return_pool(pool) - # Always close the context if we created it - if (created_context): - cpu_parallel_context.close() - - def get_transcribe_timestamps(self, audio: str, config: ParallelTranscriptionConfig, start_time: float, duration: float): - return [] - - def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: ParallelTranscriptionConfig, total_duration: float): - # Override timestamps that will be processed - if (config.override_timestamps is not None): - print("(get_merged_timestamps) Using override timestamps of size " + str(len(config.override_timestamps))) - return config.override_timestamps - return super().get_merged_timestamps(timestamps, config, total_duration) - - def transcribe(self, audio: str, whisperCallable: AbstractWhisperCallback, config: ParallelTranscriptionConfig, - progressListener: ProgressListener = None): - # Override device ID the first time - if (os.environ.get("INITIALIZED", None) is None): - os.environ["INITIALIZED"] = "1" - - # Note that this may be None if the user didn't specify a device. In that case, Whisper will - # just use the default GPU device. - if (config.device_id is not None): - print("Using device " + config.device_id) - os.environ["CUDA_VISIBLE_DEVICES"] = config.device_id - - return super().transcribe(audio, whisperCallable, config, progressListener) - - def _split(self, a, n): - """Split a list into n approximately equal parts.""" - k, m = divmod(len(a), n) - return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)) - diff --git a/spaces/wouaf/WOUAF-Text-to-Image/torch_utils/ops/upfirdn2d.h b/spaces/wouaf/WOUAF-Text-to-Image/torch_utils/ops/upfirdn2d.h deleted file mode 100644 index c9e2032bcac9d2abde7a75eea4d812da348afadd..0000000000000000000000000000000000000000 --- a/spaces/wouaf/WOUAF-Text-to-Image/torch_utils/ops/upfirdn2d.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct upfirdn2d_kernel_params -{ - const void* x; - const float* f; - void* y; - - int2 up; - int2 down; - int2 pad0; - int flip; - float gain; - - int4 inSize; // [width, height, channel, batch] - int4 inStride; - int2 filterSize; // [width, height] - int2 filterStride; - int4 outSize; // [width, height, channel, batch] - int4 outStride; - int sizeMinor; - int sizeMajor; - - int loopMinor; - int loopMajor; - int loopX; - int launchMinor; - int launchMajor; -}; - -//------------------------------------------------------------------------ -// CUDA kernel specialization. - -struct upfirdn2d_kernel_spec -{ - void* kernel; - int tileOutW; - int tileOutH; - int loopMinor; - int loopX; -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); - -//------------------------------------------------------------------------ diff --git a/spaces/wwwwwwww2/bingo/src/components/header.tsx b/spaces/wwwwwwww2/bingo/src/components/header.tsx deleted file mode 100644 index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/components/header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import * as React from 'react' -import { UserMenu } from './user-menu' - -export async function Header() { - return ( -
      -
      - -
      -
      - ) -} diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/docs/Makefile b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/docs/Makefile deleted file mode 100644 index 298ea9e213e8c4c11f0431077510d4e325733c65..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/docs/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/build_adjacency_matrix.cpp b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/build_adjacency_matrix.cpp deleted file mode 100644 index 4c496041e37e525af728794f11627a7e0027a267..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/build_adjacency_matrix.cpp +++ /dev/null @@ -1,19 +0,0 @@ -#include -#include -#include - -at::Tensor build_adjacency_matrix_forward(torch::Tensor initial_rank); - - -#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -at::Tensor build_adjacency_matrix(at::Tensor initial_rank) { - CHECK_INPUT(initial_rank); - return build_adjacency_matrix_forward(initial_rank); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("forward", &build_adjacency_matrix, "build_adjacency_matrix (CUDA)"); -} diff --git a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/generator.py b/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/generator.py deleted file mode 100644 index 6e24cadc882caab9ee439bb3dd288e536878565a..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/generator.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch -import torch.nn as nn -import torch.nn.functional as F -from models.networks.base_network import BaseNetwork -from models.networks.normalization import get_nonspade_norm_layer -from models.networks.architecture import ResnetBlock as ResnetBlock -from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock -from models.networks.architecture import SPADEResnetBlock_non_spade as SPADEResnetBlock_non_spade - - -class SPADEGenerator(BaseNetwork): - @staticmethod - def modify_commandline_options(parser, is_train): - parser.set_defaults(norm_G="spectralspadesyncbatch3x3") - parser.add_argument( - "--num_upsampling_layers", - choices=("normal", "more", "most"), - default="normal", - help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator", - ) - - return parser - - def __init__(self, opt): - super().__init__() - self.opt = opt - nf = opt.ngf - - self.sw, self.sh = self.compute_latent_vector_size(opt) - - print("The size of the latent vector size is [%d,%d]" % (self.sw, self.sh)) - - if opt.use_vae: - # In case of VAE, we will sample from random z vector - self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh) - else: - # Otherwise, we make the network deterministic by starting with - # downsampled segmentation map instead of random z - if self.opt.no_parsing_map: - self.fc = nn.Conv2d(3, 16 * nf, 3, padding=1) - else: - self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1) - - if self.opt.injection_layer == "all" or self.opt.injection_layer == "1": - self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt) - else: - self.head_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt) - - if self.opt.injection_layer == "all" or self.opt.injection_layer == "2": - self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt) - self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt) - - else: - self.G_middle_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt) - self.G_middle_1 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt) - - if self.opt.injection_layer == "all" or self.opt.injection_layer == "3": - self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt) - else: - self.up_0 = SPADEResnetBlock_non_spade(16 * nf, 8 * nf, opt) - - if self.opt.injection_layer == "all" or self.opt.injection_layer == "4": - self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt) - else: - self.up_1 = SPADEResnetBlock_non_spade(8 * nf, 4 * nf, opt) - - if self.opt.injection_layer == "all" or self.opt.injection_layer == "5": - self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt) - else: - self.up_2 = SPADEResnetBlock_non_spade(4 * nf, 2 * nf, opt) - - if self.opt.injection_layer == "all" or self.opt.injection_layer == "6": - self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt) - else: - self.up_3 = SPADEResnetBlock_non_spade(2 * nf, 1 * nf, opt) - - final_nc = nf - - if opt.num_upsampling_layers == "most": - self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt) - final_nc = nf // 2 - - self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) - - self.up = nn.Upsample(scale_factor=2) - - def compute_latent_vector_size(self, opt): - if opt.num_upsampling_layers == "normal": - num_up_layers = 5 - elif opt.num_upsampling_layers == "more": - num_up_layers = 6 - elif opt.num_upsampling_layers == "most": - num_up_layers = 7 - else: - raise ValueError("opt.num_upsampling_layers [%s] not recognized" % opt.num_upsampling_layers) - - sw = opt.load_size // (2 ** num_up_layers) - sh = round(sw / opt.aspect_ratio) - - return sw, sh - - def forward(self, input, degraded_image, z=None): - seg = input - - if self.opt.use_vae: - # we sample z from unit normal and reshape the tensor - if z is None: - z = torch.randn(input.size(0), self.opt.z_dim, dtype=torch.float32, device=input.get_device()) - x = self.fc(z) - x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw) - else: - # we downsample segmap and run convolution - if self.opt.no_parsing_map: - x = F.interpolate(degraded_image, size=(self.sh, self.sw), mode="bilinear") - else: - x = F.interpolate(seg, size=(self.sh, self.sw), mode="nearest") - x = self.fc(x) - - x = self.head_0(x, seg, degraded_image) - - x = self.up(x) - x = self.G_middle_0(x, seg, degraded_image) - - if self.opt.num_upsampling_layers == "more" or self.opt.num_upsampling_layers == "most": - x = self.up(x) - - x = self.G_middle_1(x, seg, degraded_image) - - x = self.up(x) - x = self.up_0(x, seg, degraded_image) - x = self.up(x) - x = self.up_1(x, seg, degraded_image) - x = self.up(x) - x = self.up_2(x, seg, degraded_image) - x = self.up(x) - x = self.up_3(x, seg, degraded_image) - - if self.opt.num_upsampling_layers == "most": - x = self.up(x) - x = self.up_4(x, seg, degraded_image) - - x = self.conv_img(F.leaky_relu(x, 2e-1)) - x = F.tanh(x) - - return x - - -class Pix2PixHDGenerator(BaseNetwork): - @staticmethod - def modify_commandline_options(parser, is_train): - parser.add_argument( - "--resnet_n_downsample", type=int, default=4, help="number of downsampling layers in netG" - ) - parser.add_argument( - "--resnet_n_blocks", - type=int, - default=9, - help="number of residual blocks in the global generator network", - ) - parser.add_argument( - "--resnet_kernel_size", type=int, default=3, help="kernel size of the resnet block" - ) - parser.add_argument( - "--resnet_initial_kernel_size", type=int, default=7, help="kernel size of the first convolution" - ) - # parser.set_defaults(norm_G='instance') - return parser - - def __init__(self, opt): - super().__init__() - input_nc = 3 - - # print("xxxxx") - # print(opt.norm_G) - norm_layer = get_nonspade_norm_layer(opt, opt.norm_G) - activation = nn.ReLU(False) - - model = [] - - # initial conv - model += [ - nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2), - norm_layer(nn.Conv2d(input_nc, opt.ngf, kernel_size=opt.resnet_initial_kernel_size, padding=0)), - activation, - ] - - # downsample - mult = 1 - for i in range(opt.resnet_n_downsample): - model += [ - norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2, kernel_size=3, stride=2, padding=1)), - activation, - ] - mult *= 2 - - # resnet blocks - for i in range(opt.resnet_n_blocks): - model += [ - ResnetBlock( - opt.ngf * mult, - norm_layer=norm_layer, - activation=activation, - kernel_size=opt.resnet_kernel_size, - ) - ] - - # upsample - for i in range(opt.resnet_n_downsample): - nc_in = int(opt.ngf * mult) - nc_out = int((opt.ngf * mult) / 2) - model += [ - norm_layer( - nn.ConvTranspose2d(nc_in, nc_out, kernel_size=3, stride=2, padding=1, output_padding=1) - ), - activation, - ] - mult = mult // 2 - - # final output conv - model += [ - nn.ReflectionPad2d(3), - nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0), - nn.Tanh(), - ] - - self.model = nn.Sequential(*model) - - def forward(self, input, degraded_image, z=None): - return self.model(degraded_image) - diff --git a/spaces/xszqxszq/sovits-svc-mix/commons.py b/spaces/xszqxszq/sovits-svc-mix/commons.py deleted file mode 100644 index 96d3832bf538ca3f84f57db560e230fb4bdc7e50..0000000000000000000000000000000000000000 --- a/spaces/xszqxszq/sovits-svc-mix/commons.py +++ /dev/null @@ -1,160 +0,0 @@ -import math - -import torch -from torch.nn import functional as t_func - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = t_func.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = t_func.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - t_func.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda para: para.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/xu1998hz/sescore/sescore.py b/spaces/xu1998hz/sescore/sescore.py deleted file mode 100644 index a44577f94eefd1fdab3d2362f6fa92cea194c9be..0000000000000000000000000000000000000000 --- a/spaces/xu1998hz/sescore/sescore.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""SEScore: a text generation evaluation metric """ - -import evaluate -import datasets - -import comet -from typing import Dict -import torch -from comet.encoders.base import Encoder -from comet.encoders.bert import BERTEncoder -from transformers import AutoModel, AutoTokenizer - -class robertaEncoder(BERTEncoder): - def __init__(self, pretrained_model: str) -> None: - super(Encoder, self).__init__() - self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model) - self.model = AutoModel.from_pretrained( - pretrained_model, add_pooling_layer=False - ) - self.model.encoder.output_hidden_states = True - - @classmethod - def from_pretrained(cls, pretrained_model: str) -> Encoder: - return robertaEncoder(pretrained_model) - - def forward( - self, input_ids: torch.Tensor, attention_mask: torch.Tensor, **kwargs - ) -> Dict[str, torch.Tensor]: - last_hidden_states, _, all_layers = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - output_hidden_states=True, - return_dict=False, - ) - return { - "sentemb": last_hidden_states[:, 0, :], - "wordemb": last_hidden_states, - "all_layers": all_layers, - "attention_mask": attention_mask, - } - - -# TODO: Add BibTeX citation -_CITATION = """\ -@inproceedings{xu-etal-2022-not, - title={Not All Errors are Equal: Learning Text Generation Metrics using Stratified Error Synthesis}, - author={Xu, Wenda and Tuan, Yi-lin and Lu, Yujie and Saxon, Michael and Li, Lei and Wang, William Yang}, - booktitle ={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing}, - month={dec}, - year={2022}, - url={https://arxiv.org/abs/2210.05035} -} -""" - -_DESCRIPTION = """\ -SEScore is an evaluation metric that trys to compute an overall score to measure text generation quality. -""" - -_KWARGS_DESCRIPTION = """ -Calculates how good are predictions given some references -Args: - predictions: list of candidate outputs - references: list of references -Returns: - {"mean_score": mean_score, "scores": scores} - -Examples: - >>> import evaluate - >>> sescore = evaluate.load("xu1998hz/sescore") - >>> score = sescore.compute( - references=['sescore is a simple but effective next-generation text evaluation metric'], - predictions=['sescore is simple effective text evaluation metric for next generation'] - ) -""" - -# TODO: Define external resources urls if needed -BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class SEScore(evaluate.Metric): - """SEScore""" - - def _info(self): - # TODO: Specifies the evaluate.EvaluationModuleInfo object - return evaluate.MetricInfo( - # This is the description that will appear on the modules page. - module_type="metric", - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - # This defines the format of each prediction and reference - features=datasets.Features({ - 'predictions': datasets.Value("string", id="sequence"), - 'references': datasets.Value("string", id="sequence"), - }), - # Homepage of the module for documentation - homepage="http://module.homepage", - # Additional links to the codebase or references - codebase_urls=["http://github.com/path/to/codebase/of/new_module"], - reference_urls=["http://path.to.reference.url/new_module"] - ) - - def _download_and_prepare(self, dl_manager): - """download SEScore checkpoints to compute the scores""" - # Download SEScore checkpoint - from comet import load_from_checkpoint - import os - from huggingface_hub import snapshot_download - # initialize roberta into str2encoder - comet.encoders.str2encoder['RoBERTa'] = robertaEncoder - destination = snapshot_download(repo_id="xu1998hz/sescore_english_mt", revision="main") - self.scorer = load_from_checkpoint(f'{destination}/checkpoint/sescore_english_mt.ckpt') - - def _compute(self, predictions, references, gpus=None, progress_bar=False): - if gpus is None: - gpus = 1 if torch.cuda.is_available() else 0 - - data = {"src": references, "mt": predictions} - data = [dict(zip(data, t)) for t in zip(*data.values())] - scores, mean_score = self.scorer.predict(data, gpus=gpus, progress_bar=progress_bar) - return {"mean_score": mean_score, "scores": scores} diff --git a/spaces/xuxw98/TAPA/llama/generation.py b/spaces/xuxw98/TAPA/llama/generation.py deleted file mode 100644 index ba1ce513d267f8d958b02a7836ef0470c504f64b..0000000000000000000000000000000000000000 --- a/spaces/xuxw98/TAPA/llama/generation.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# This software may be used and distributed according to the terms of the GNU General Public License version 3. - -from typing import List - -import torch - -from llama.tokenizer import Tokenizer -from llama.model import Transformer - - -class LLaMA: - def __init__(self, model: Transformer, tokenizer: Tokenizer, vision_model = None): - self.model = model - self.tokenizer = tokenizer - self.vision_model = vision_model - - def generate( - self, - prompts: List[str], - imgs = None, - max_gen_len: int = 512, - temperature: float = 0.8, - top_p: float = 0.95, - ) -> List[str]: - bsz = len(prompts) - params = self.model.params - assert bsz <= params.max_batch_size, (bsz, params.max_batch_size) - - mode = 'instruct' - vision_tokens = None - if imgs is not None and self.vision_model is not None: - vision_tokens = self.vision_model(imgs) - mode = 'caption' - - prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts] - - min_prompt_size = min([len(t) for t in prompt_tokens]) - max_prompt_size = max([len(t) for t in prompt_tokens]) - - total_len = min(params.max_seq_len, max_gen_len + max_prompt_size) - - tokens = torch.full((bsz, total_len), self.tokenizer.pad_id).cuda().long() - for k, t in enumerate(prompt_tokens): - tokens[k, : len(t)] = torch.tensor(t).long() - input_text_mask = tokens != self.tokenizer.pad_id - start_pos = min_prompt_size - prev_pos = 0 - for cur_pos in range(start_pos, total_len): - logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos, vision_tokens, mode) - if temperature > 0: - probs = torch.softmax(logits / temperature, dim=-1) - next_token = sample_top_p(probs, top_p) - else: - next_token = torch.argmax(logits, dim=-1) - next_token = next_token.reshape(-1) - # only replace token if prompt has already been generated - next_token = torch.where( - input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token - ) - tokens[:, cur_pos] = next_token - prev_pos = cur_pos - - decoded = [] - for i, t in enumerate(tokens.tolist()): - # cut to max gen len - t = t[len(prompt_tokens[i]) : len(prompt_tokens[i]) + max_gen_len] - # cut to eos tok if any - try: - t = t[: t.index(self.tokenizer.eos_id)] - except ValueError: - pass - decoded.append(self.tokenizer.decode(t)) - return decoded - - -def sample_top_p(probs, p): - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort[mask] = 0.0 - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = torch.multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token diff --git a/spaces/xxccc/gpt-academic/Dockerfile b/spaces/xxccc/gpt-academic/Dockerfile deleted file mode 100644 index 19d988f6d7da77b6473076700c5831d4abb7e2b9..0000000000000000000000000000000000000000 --- a/spaces/xxccc/gpt-academic/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic . -# 如何运行: docker run --rm -it --net=host gpt-academic -FROM python:3.11 - -RUN echo '[global]' > /etc/pip.conf && \ - echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \ - echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf - - -WORKDIR /gpt - -# 装载项目文件 -COPY . . - -# 安装依赖 -RUN pip3 install -r requirements.txt - - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"] diff --git a/spaces/yanli01/wrwj/run_macOS.command b/spaces/yanli01/wrwj/run_macOS.command deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/yanli01/wrwj/run_macOS.command +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/yaoshining/text-generation-webui/docs/DeepSpeed.md b/spaces/yaoshining/text-generation-webui/docs/DeepSpeed.md deleted file mode 100644 index 6170f6819ca072ff50fd1146b64d73f74ab00473..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/docs/DeepSpeed.md +++ /dev/null @@ -1,24 +0,0 @@ -An alternative way of reducing the GPU memory usage of models is to use the `DeepSpeed ZeRO-3` optimization. - -With this, I have been able to load a 6b model (GPT-J 6B) with less than 6GB of VRAM. The speed of text generation is very decent and much better than what would be accomplished with `--auto-devices --gpu-memory 6`. - -As far as I know, DeepSpeed is only available for Linux at the moment. - -### How to use it - -1. Install DeepSpeed: - -``` -conda install -c conda-forge mpi4py mpich -pip install -U deepspeed -``` - -2. Start the web UI replacing `python` with `deepspeed --num_gpus=1` and adding the `--deepspeed` flag. Example: - -``` -deepspeed --num_gpus=1 server.py --deepspeed --chat --model gpt-j-6B -``` - -### Learn more - -For more information, check out [this comment](https://github.com/oobabooga/text-generation-webui/issues/40#issuecomment-1412038622) by 81300, who came up with the DeepSpeed support in this web UI. \ No newline at end of file diff --git a/spaces/yaoshining/text-generation-webui/extensions/silero_tts/test_tts.py b/spaces/yaoshining/text-generation-webui/extensions/silero_tts/test_tts.py deleted file mode 100644 index ebc2c102a9ef29f21141429232f957421989cdd4..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/extensions/silero_tts/test_tts.py +++ /dev/null @@ -1,81 +0,0 @@ -import time -from pathlib import Path - -import torch -import tts_preprocessor - -torch._C._jit_set_profiling_mode(False) - - -params = { - 'activate': True, - 'speaker': 'en_49', - 'language': 'en', - 'model_id': 'v3_en', - 'sample_rate': 48000, - 'device': 'cpu', - 'show_text': True, - 'autoplay': True, - 'voice_pitch': 'medium', - 'voice_speed': 'medium', -} - -current_params = params.copy() -voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115'] -voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high'] -voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast'] - -# Used for making text xml compatible, needed for voice pitch and speed control -table = str.maketrans({ - "<": "<", - ">": ">", - "&": "&", - "'": "'", - '"': """, -}) - - -def xmlesc(txt): - return txt.translate(table) - - -def load_model(): - model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id']) - model.to(params['device']) - return model - - -model = load_model() - - -def output_modifier(string): - """ - This function is applied to the model outputs. - """ - - global model, current_params - - original_string = string - string = tts_preprocessor.preprocess(string) - processed_string = string - - if string == '': - string = '*Empty reply, try regenerating*' - else: - output_file = Path(f'extensions/silero_tts/outputs/test_{int(time.time())}.wav') - prosody = ''.format(params['voice_speed'], params['voice_pitch']) - silero_input = f'{prosody}{xmlesc(string)}' - model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file)) - - autoplay = 'autoplay' if params['autoplay'] else '' - string = f'' - - if params['show_text']: - string += f'\n\n{original_string}\n\nProcessed:\n{processed_string}' - - print(string) - - -if __name__ == '__main__': - import sys - output_modifier(sys.argv[1]) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_sw3/convert_megatron_to_pytorch.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_sw3/convert_megatron_to_pytorch.py deleted file mode 100644 index 5562efa287475be8786c28845124795951f6bfa6..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_sw3/convert_megatron_to_pytorch.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2022 The HuggingFace Inc. team and the AI-Sweden team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Convert GPT-SW3 megatron checkpoints to pytorch""" - -import argparse -import os -from os.path import isfile - -import torch - -from transformers import GPT2Config - - -def recursive_print(name, val, spaces=0): - # Format the message. - if name is None: - msg = None - else: - fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}" - msg = fmt.format(name) - - # Print and recurse (if needed). - if isinstance(val, dict): - if msg is not None: - print(msg) - for k in val.keys(): - recursive_print(k, val[k], spaces + 2) - elif isinstance(val, torch.Tensor): - print(msg, ":", val.size()) - else: - print(msg, ":", val) - - -def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size): - # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] - # for compatibility with later versions of NVIDIA Megatron-LM. - # The inverse operation is performed inside Megatron-LM to read checkpoints: - # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 - # If param is the weight tensor of the self-attention block, the returned tensor - # will have to be transposed one more time to be read by HuggingFace GPT2. - input_shape = param.size() - # other versions store [num_heads * num_splits * hidden_size, :] - saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] - param = param.view(*saved_shape) - param = param.transpose(0, 1).contiguous() - param = param.view(*input_shape) - return param - - -def convert_megatron_checkpoint(sd_megatron, config): - """ - Converts a Megatron checkpoint to a HuggingFace GPT-SW3 checkpoint. - """ - n_positions = config.n_positions - layers = config.n_layer - vocab_size = config.vocab_size - heads = config.n_head - hidden_size_per_head = config.n_embd // config.n_head - - word_embeddings = sd_megatron["model.language_model.embedding.word_embeddings.weight"][:vocab_size, :] - sd_hf = { - "transformer.wte.weight": word_embeddings, - "transformer.wpe.weight": sd_megatron["model.language_model.embedding.position_embeddings.weight"], - "transformer.ln_f.weight": sd_megatron["model.language_model.encoder.final_layernorm.weight"], - "transformer.ln_f.bias": sd_megatron["model.language_model.encoder.final_layernorm.bias"], - } - - pf = "model.language_model.encoder.layers." - for i in range(layers): - causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool)) - causal_mask = causal_mask.view(1, 1, n_positions, n_positions) - sd_hf[f"transformer.h.{i}.attn.bias"] = causal_mask - sd_hf[f"transformer.h.{i}.attn.masked_bias"] = torch.tensor(-1e4, dtype=torch.bfloat16) - - sd_hf[f"transformer.h.{i}.ln_1.weight"] = sd_megatron[f"{pf}{i}.input_layernorm.weight"] - sd_hf[f"transformer.h.{i}.ln_1.bias"] = sd_megatron[f"{pf}{i}.input_layernorm.bias"] - - val1 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.weight"] - val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head) - sd_hf[f"transformer.h.{i}.attn.c_attn.weight"] = val1.transpose(0, 1).contiguous() - - val2 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.bias"] - val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head) - sd_hf[f"transformer.h.{i}.attn.c_attn.bias"] = val2 - - sd_hf[f"transformer.h.{i}.attn.c_proj.weight"] = sd_megatron[f"{pf}{i}.self_attention.dense.weight"].transpose( - 0, 1 - ) - sd_hf[f"transformer.h.{i}.attn.c_proj.bias"] = sd_megatron[f"{pf}{i}.self_attention.dense.bias"] - sd_hf[f"transformer.h.{i}.ln_2.weight"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.weight"] - sd_hf[f"transformer.h.{i}.ln_2.bias"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.bias"] - sd_hf[f"transformer.h.{i}.mlp.c_fc.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.weight"].transpose(0, 1) - sd_hf[f"transformer.h.{i}.mlp.c_fc.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.bias"] - sd_hf[f"transformer.h.{i}.mlp.c_proj.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.weight"].transpose( - 0, 1 - ) - sd_hf[f"transformer.h.{i}.mlp.c_proj.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.bias"] - - # For LM head, transformers' wants the matrix to weight embeddings. - sd_hf["lm_head.weight"] = word_embeddings - - return sd_hf - - -def copy_config(config_hf, config_megatron): - """Copy the config from Megatron to hf.""" - config_hf.vocab_size = 64000 - config_hf.n_positions = config_megatron["encoder_seq_length"] - config_hf.n_embd = config_megatron["hidden_size"] - config_hf.n_layer = config_megatron["num_layers"] - config_hf.n_head = config_megatron["num_attention_heads"] - config_hf.n_inner = config_megatron["ffn_hidden_size"] - config_hf.activation_function = "gelu" - config_hf.resid_pdrop = 0.1 - config_hf.embd_pdrop = 0.1 - config_hf.attn_pdrop = 0.1 - config_hf.layer_norm_epsilon = config_megatron["layernorm_epsilon"] # 1e-5 - config_hf.initializer_range = config_megatron["init_method_std"] # 0.02 - config_hf.apply_query_key_layer_scaling = config_megatron["apply_query_key_layer_scaling"] # True - config_hf.normalize_attention_scores = True - config_hf.use_cache = True - - # This identifies the 6.7B (7B) model which uses a different tokenizer - if config_megatron["hidden_size"] == 4096: - config_hf.bos_token_id = 1 # <|endoftext|> - config_hf.eos_token_id = 1 # <|endoftext|> - config_hf.pad_token_id = 0 # - else: - config_hf.bos_token_id = 2 # - config_hf.eos_token_id = 3 # <|endoftext|> - config_hf.pad_token_id = 0 # - - return config_hf - - -def main(args): - print(args) - - checkpoint_path = args.checkpoint_path - save_path = args.save_path - if isfile(checkpoint_path): - raise FileNotFoundError(f"ERROR! could not find file {checkpoint_path}") - - # Load the model. - checkpoint = torch.load(checkpoint_path, map_location="cpu") - - # Load the config. - config_megatron = checkpoint["hyper_parameters"]["cfg"] - config_hf = GPT2Config() - config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron) - config_hf.architectures = ["GPT2LMHeadModel"] - - sd_megatron = checkpoint["state_dict"] - - # Convert. - print("Converting") - sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf) - - # Print the structure of converted state dict. - if args.print_checkpoint_structure: - recursive_print(None, sd_hf) - - config_hf.tokenizer_class = "GPTSw3Tokenizer" - - # Store the config to file. - print("Saving config") - config_hf.save_pretrained(save_path) - - # Store the state_dict to file. - output_checkpoint_file = os.path.join(save_path, "pytorch_model.bin") - print(f'Saving checkpoint to "{output_checkpoint_file}"') - torch.save(sd_hf, output_checkpoint_file) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--checkpoint_path", - type=str, - required=True, - help="e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000", - ) - parser.add_argument("--save_path", type=str, required=True, help="e.g. /home/user/gpt-sw3/hf") - parser.add_argument("--print-checkpoint-structure", action="store_true") - _args = parser.parse_args() - main(_args) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_roi_heads.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_roi_heads.py deleted file mode 100644 index 90fadf1a9667cf836223945b22c5147b89ad98a4..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_roi_heads.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import numpy as np -import json -import math -import torch -from torch import nn -from torch.autograd.function import Function -from typing import Dict, List, Optional, Tuple, Union - -from detectron2.layers import ShapeSpec -from detectron2.structures import Boxes, Instances, pairwise_iou -from detectron2.utils.events import get_event_storage - -from detectron2.modeling.box_regression import Box2BoxTransform -from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference -from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads -from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads -from detectron2.modeling.roi_heads.box_head import build_box_head -from .custom_fast_rcnn import CustomFastRCNNOutputLayers - - -@ROI_HEADS_REGISTRY.register() -class CustomROIHeads(StandardROIHeads): - @classmethod - def _init_box_head(self, cfg, input_shape): - ret = super()._init_box_head(cfg, input_shape) - del ret['box_predictor'] - ret['box_predictor'] = CustomFastRCNNOutputLayers( - cfg, ret['box_head'].output_shape) - self.debug = cfg.DEBUG - if self.debug: - self.debug_show_name = cfg.DEBUG_SHOW_NAME - self.save_debug = cfg.SAVE_DEBUG - self.vis_thresh = cfg.VIS_THRESH - self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to( - torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1) - self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to( - torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1) - return ret - - def forward(self, images, features, proposals, targets=None): - """ - enable debug - """ - if not self.debug: - del images - if self.training: - assert targets - proposals = self.label_and_sample_proposals(proposals, targets) - del targets - - if self.training: - losses = self._forward_box(features, proposals) - losses.update(self._forward_mask(features, proposals)) - losses.update(self._forward_keypoint(features, proposals)) - return proposals, losses - else: - pred_instances = self._forward_box(features, proposals) - pred_instances = self.forward_with_given_boxes(features, pred_instances) - if self.debug: - from ..debug import debug_second_stage - denormalizer = lambda x: x * self.pixel_std + self.pixel_mean - debug_second_stage( - [denormalizer(images[0].clone())], - pred_instances, proposals=proposals, - debug_show_name=self.debug_show_name) - return pred_instances, {} - - -@ROI_HEADS_REGISTRY.register() -class CustomCascadeROIHeads(CascadeROIHeads): - @classmethod - def _init_box_head(self, cfg, input_shape): - self.mult_proposal_score = cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE - ret = super()._init_box_head(cfg, input_shape) - del ret['box_predictors'] - cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS - box_predictors = [] - for box_head, bbox_reg_weights in zip(ret['box_heads'], cascade_bbox_reg_weights): - box_predictors.append( - CustomFastRCNNOutputLayers( - cfg, box_head.output_shape, - box2box_transform=Box2BoxTransform(weights=bbox_reg_weights) - )) - ret['box_predictors'] = box_predictors - self.debug = cfg.DEBUG - if self.debug: - self.debug_show_name = cfg.DEBUG_SHOW_NAME - self.save_debug = cfg.SAVE_DEBUG - self.vis_thresh = cfg.VIS_THRESH - self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to( - torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1) - self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to( - torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1) - return ret - - - def _forward_box(self, features, proposals, targets=None): - """ - Add mult proposal scores at testing - """ - if (not self.training) and self.mult_proposal_score: - if len(proposals) > 0 and proposals[0].has('scores'): - proposal_scores = [ - p.get('scores') for p in proposals] - else: - proposal_scores = [ - p.get('objectness_logits') for p in proposals] - - features = [features[f] for f in self.box_in_features] - head_outputs = [] # (predictor, predictions, proposals) - prev_pred_boxes = None - image_sizes = [x.image_size for x in proposals] - for k in range(self.num_cascade_stages): - if k > 0: - proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes) - if self.training: - proposals = self._match_and_label_boxes(proposals, k, targets) - predictions = self._run_stage(features, proposals, k) - prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals) - head_outputs.append((self.box_predictor[k], predictions, proposals)) - - if self.training: - losses = {} - storage = get_event_storage() - for stage, (predictor, predictions, proposals) in enumerate(head_outputs): - with storage.name_scope("stage{}".format(stage)): - stage_losses = predictor.losses(predictions, proposals) - losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) - return losses - else: - # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) - scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs] - scores = [ - sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) - for scores_per_image in zip(*scores_per_stage) - ] - - if self.mult_proposal_score: - scores = [(s * ps[:, None]) ** 0.5 \ - for s, ps in zip(scores, proposal_scores)] - - predictor, predictions, proposals = head_outputs[-1] - boxes = predictor.predict_boxes(predictions, proposals) - pred_instances, _ = fast_rcnn_inference( - boxes, - scores, - image_sizes, - predictor.test_score_thresh, - predictor.test_nms_thresh, - predictor.test_topk_per_image, - ) - - return pred_instances - - def forward(self, images, features, proposals, targets=None): - ''' - enable debug - ''' - if not self.debug: - del images - if self.training: - proposals = self.label_and_sample_proposals(proposals, targets) - - if self.training: - losses = self._forward_box(features, proposals, targets) - losses.update(self._forward_mask(features, proposals)) - losses.update(self._forward_keypoint(features, proposals)) - return proposals, losses - else: - # import pdb; pdb.set_trace() - pred_instances = self._forward_box(features, proposals) - pred_instances = self.forward_with_given_boxes(features, pred_instances) - if self.debug: - from ..debug import debug_second_stage - denormalizer = lambda x: x * self.pixel_std + self.pixel_mean - debug_second_stage( - [denormalizer(x.clone()) for x in images], - pred_instances, proposals=proposals, - save_debug=self.save_debug, - debug_show_name=self.debug_show_name, - vis_thresh=self.vis_thresh) - return pred_instances, {} - - diff --git a/spaces/yonikremer/grouped-sampling-demo/app.py b/spaces/yonikremer/grouped-sampling-demo/app.py deleted file mode 100644 index bb9bb54b34c923f189244a9adb553f572036728c..0000000000000000000000000000000000000000 --- a/spaces/yonikremer/grouped-sampling-demo/app.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -The Streamlit app for the project demo. -In the demo, the user can write a prompt - and the model will generate a response using the grouped sampling algorithm. -""" - -import streamlit as st -from torch.cuda import CudaError - -from available_models import AVAILABLE_MODELS -from hanlde_form_submit import on_form_submit - - -st.title("A Single Usage is All You Need - Demo") - -with st.form("request_form"): - selected_model_name: str = st.selectbox( - label="choose a model", - options=AVAILABLE_MODELS, - help="opt-iml-max-30b generates better texts but is slower", - ) - - output_length: int = st.number_input( - label="the length of the output (in tokens)", - min_value=1, - max_value=512, - value=5, - ) - - submitted_prompt: str = st.text_area( - label="prompt", - value=""" - Keywords: cat, look, mouse - What is a sentence that includes all these keywords? - Answer:""", - max_chars=1024, - ) - - submitted: bool = st.form_submit_button( - label="generate text", - disabled=False, - ) - - if submitted: - try: - output = on_form_submit( - selected_model_name, - output_length, - submitted_prompt, - ) - except CudaError as e: - st.error("Out of memory. Please try a smaller model, shorter prompt, or a smaller output length.") - except (ValueError, TypeError, RuntimeError) as e: - st.error(e) - else: - st.write(f"Generated text: {output}") diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/browserslist/cli.js b/spaces/younker/chatgpt-turbo/client/node_modules/browserslist/cli.js deleted file mode 100644 index c54aa9879288e92ea7fb01bc5a569f5796947bd5..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/browserslist/cli.js +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env node - -var updateDb = require('update-browserslist-db') -var fs = require('fs') - -var browserslist = require('./') -var pkg = require('./package.json') - -var args = process.argv.slice(2) - -var USAGE = - 'Usage:\n' + - ' npx browserslist\n' + - ' npx browserslist "QUERIES"\n' + - ' npx browserslist --json "QUERIES"\n' + - ' npx browserslist --config="path/to/browserlist/file"\n' + - ' npx browserslist --coverage "QUERIES"\n' + - ' npx browserslist --coverage=US "QUERIES"\n' + - ' npx browserslist --coverage=US,RU,global "QUERIES"\n' + - ' npx browserslist --env="environment name defined in config"\n' + - ' npx browserslist --stats="path/to/browserlist/stats/file"\n' + - ' npx browserslist --mobile-to-desktop\n' + - ' npx browserslist --ignore-unknown-versions\n' - -function isArg(arg) { - return args.some(function (str) { - return str === arg || str.indexOf(arg + '=') === 0 - }) -} - -function error(msg) { - process.stderr.write('browserslist: ' + msg + '\n') - process.exit(1) -} - -if (isArg('--help') || isArg('-h')) { - process.stdout.write(pkg.description + '.\n\n' + USAGE + '\n') -} else if (isArg('--version') || isArg('-v')) { - process.stdout.write('browserslist ' + pkg.version + '\n') -} else if (isArg('--update-db')) { - /* c8 ignore next 3 */ - updateDb(function (str) { - process.stdout.write(str) - }) -} else { - var mode = 'browsers' - var opts = {} - var queries - var areas - - for (var i = 0; i < args.length; i++) { - if (args[i][0] !== '-') { - queries = args[i].replace(/^["']|["']$/g, '') - continue - } - - var arg = args[i].split('=') - var name = arg[0] - var value = arg[1] - - if (value) value = value.replace(/^["']|["']$/g, '') - - if (name === '--config' || name === '-b') { - opts.config = value - } else if (name === '--env' || name === '-e') { - opts.env = value - } else if (name === '--stats' || name === '-s') { - opts.stats = value - } else if (name === '--coverage' || name === '-c') { - if (mode !== 'json') mode = 'coverage' - if (value) { - areas = value.split(',') - } else { - areas = ['global'] - } - } else if (name === '--json') { - mode = 'json' - } else if (name === '--mobile-to-desktop') { - /* c8 ignore next */ - opts.mobileToDesktop = true - } else if (name === '--ignore-unknown-versions') { - /* c8 ignore next */ - opts.ignoreUnknownVersions = true - } else { - error('Unknown arguments ' + args[i] + '.\n\n' + USAGE) - } - } - - var browsers - try { - browsers = browserslist(queries, opts) - } catch (e) { - if (e.name === 'BrowserslistError') { - error(e.message) - } /* c8 ignore start */ else { - throw e - } /* c8 ignore end */ - } - - var coverage - if (mode === 'browsers') { - browsers.forEach(function (browser) { - process.stdout.write(browser + '\n') - }) - } else if (areas) { - coverage = areas.map(function (area) { - var stats - if (area !== 'global') { - stats = area - } else if (opts.stats) { - stats = JSON.parse(fs.readFileSync(opts.stats)) - } - var result = browserslist.coverage(browsers, stats) - var round = Math.round(result * 100) / 100.0 - - return [area, round] - }) - - if (mode === 'coverage') { - var prefix = 'These browsers account for ' - process.stdout.write(prefix) - coverage.forEach(function (data, index) { - var area = data[0] - var round = data[1] - var end = 'globally' - if (area && area !== 'global') { - end = 'in the ' + area.toUpperCase() - } else if (opts.stats) { - end = 'in custom statistics' - } - - if (index !== 0) { - process.stdout.write(prefix.replace(/./g, ' ')) - } - - process.stdout.write(round + '% of all users ' + end + '\n') - }) - } - } - - if (mode === 'json') { - var data = { browsers: browsers } - if (coverage) { - data.coverage = coverage.reduce(function (object, j) { - object[j[0]] = j[1] - return object - }, {}) - } - process.stdout.write(JSON.stringify(data, null, ' ') + '\n') - } -} diff --git a/spaces/ysharma/Stream_PlaygroundAI_Images/README.md b/spaces/ysharma/Stream_PlaygroundAI_Images/README.md deleted file mode 100644 index a6dd557506ae63de7cb642a0b692d0468f5883c0..0000000000000000000000000000000000000000 --- a/spaces/ysharma/Stream_PlaygroundAI_Images/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stream PlaygroundAI Images -emoji: 📊 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yuzu34/rvc-hololive/infer_pack/models.py b/spaces/yuzu34/rvc-hololive/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/yuzu34/rvc-hololive/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/ywqisok/ysyy/transforms.py b/spaces/ywqisok/ysyy/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/ywqisok/ysyy/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/zej97/AI-Research-Assistant/config/__init__.py b/spaces/zej97/AI-Research-Assistant/config/__init__.py deleted file mode 100644 index 704ba0f8fd18eac290da02e5b1d22d4be54b235f..0000000000000000000000000000000000000000 --- a/spaces/zej97/AI-Research-Assistant/config/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from config.config import Config, check_openai_api_key -from config.singleton import AbstractSingleton, Singleton - -__all__ = [ - "check_openai_api_key", - "AbstractSingleton", - "Config", - "Singleton", -] diff --git a/spaces/zhan66/vits-uma-genshin-honkai/app.py b/spaces/zhan66/vits-uma-genshin-honkai/app.py deleted file mode 100644 index ba29f6a5aff153461017c2e11e03a8765581c0d5..0000000000000000000000000000000000000000 --- a/spaces/zhan66/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,150 +0,0 @@ -# coding=utf-8 -import time -import os -import gradio as gr -import utils -import argparse -import commons -from models import SynthesizerTrn -from text import text_to_sequence -import torch -from torch import no_grad, LongTensor -import webbrowser -import logging -import gradio.processing_utils as gr_processing_utils -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) -gr.Audio.postprocess = audio_postprocess - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 100 and limitation: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - speaker_id = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--colab", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - device = torch.device(args.device) - - hps_ms = utils.get_hparams_from_file(r'./model/config.json') - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) - _ = net_g_ms.eval().to(device) - speakers = hps_ms.speakers - model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - - with gr.Blocks() as app: - gr.Markdown( - "#
      VITS语音在线合成demo\n" - "#
      严禁将模型用于任何商业项目,否则后果自负\n" - "
      主要有赛马娘,原神中文,原神日语,崩坏3的音色
      " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation) " if limitation else "Text", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3]) - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - if args.colab: - webbrowser.open("http://127.0.0.1:7860") - app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share) diff --git a/spaces/zhang-wei-jian/docker/node_modules/koa-compose/Readme.md b/spaces/zhang-wei-jian/docker/node_modules/koa-compose/Readme.md deleted file mode 100644 index 8fa6a392405df7c229bbe727cadef7e16767ab4b..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/koa-compose/Readme.md +++ /dev/null @@ -1,40 +0,0 @@ - -# koa-compose - -[![NPM version][npm-image]][npm-url] -[![Build status][travis-image]][travis-url] -[![Test coverage][codecov-image]][codecov-url] -[![Dependency Status][david-image]][david-url] -[![License][license-image]][license-url] -[![Downloads][downloads-image]][downloads-url] - - Compose middleware. - -## Installation - -```js -$ npm install koa-compose -``` - -## API - -### compose([a, b, c, ...]) - - Compose the given middleware and return middleware. - -## License - - MIT - -[npm-image]: https://img.shields.io/npm/v/koa-compose.svg?style=flat-square -[npm-url]: https://npmjs.org/package/koa-compose -[travis-image]: https://img.shields.io/travis/koajs/compose/next.svg?style=flat-square -[travis-url]: https://travis-ci.org/koajs/compose -[codecov-image]: https://img.shields.io/codecov/c/github/koajs/compose/next.svg?style=flat-square -[codecov-url]: https://codecov.io/github/koajs/compose -[david-image]: http://img.shields.io/david/koajs/compose.svg?style=flat-square -[david-url]: https://david-dm.org/koajs/compose -[license-image]: http://img.shields.io/npm/l/koa-compose.svg?style=flat-square -[license-url]: LICENSE -[downloads-image]: http://img.shields.io/npm/dm/koa-compose.svg?style=flat-square -[downloads-url]: https://npmjs.org/package/koa-compose diff --git a/spaces/zhigangjiang/3D-Room-Layout-Estimation_LGT-Net/dataset/pano_s2d3d_mix_dataset.py b/spaces/zhigangjiang/3D-Room-Layout-Estimation_LGT-Net/dataset/pano_s2d3d_mix_dataset.py deleted file mode 100644 index d8f8444b20f89b1c1b1ad274c7c7d0274ef5aa2f..0000000000000000000000000000000000000000 --- a/spaces/zhigangjiang/3D-Room-Layout-Estimation_LGT-Net/dataset/pano_s2d3d_mix_dataset.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -@date: 2021/6/16 -@description: -""" - -import os - -from dataset.pano_s2d3d_dataset import PanoS2D3DDataset -from utils.logger import get_logger - - -class PanoS2D3DMixDataset(PanoS2D3DDataset): - def __init__(self, root_dir, mode, shape=None, max_wall_num=0, aug=None, camera_height=1.6, logger=None, - split_list=None, patch_num=256, keys=None, for_test_index=None, subset=None): - assert subset == 's2d3d' or subset == 'pano', 'error subset' - super().__init__(root_dir, None, shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, subset) - if logger is None: - logger = get_logger() - self.mode = mode - if mode == 'train': - if subset == 'pano': - s2d3d_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 's2d3d').data - s2d3d_val_data = PanoS2D3DDataset(root_dir, 'val', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 's2d3d').data - s2d3d_test_data = PanoS2D3DDataset(root_dir, 'test', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 's2d3d').data - s2d3d_all_data = s2d3d_train_data + s2d3d_val_data + s2d3d_test_data - - pano_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 'pano').data - self.data = s2d3d_all_data + pano_train_data - elif subset == 's2d3d': - pano_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 'pano').data - pano_val_data = PanoS2D3DDataset(root_dir, 'val', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 'pano').data - pano_test_data = PanoS2D3DDataset(root_dir, 'test', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 'pano').data - pano_all_data = pano_train_data + pano_val_data + pano_test_data - - s2d3d_train_data = PanoS2D3DDataset(root_dir, 'train', shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, 's2d3d').data - self.data = pano_all_data + s2d3d_train_data - else: - self.data = PanoS2D3DDataset(root_dir, mode, shape, max_wall_num, aug, camera_height, logger, - split_list, patch_num, keys, None, subset).data - - if for_test_index is not None: - self.data = self.data[:for_test_index] - logger.info(f"Build dataset mode: {self.mode} valid: {len(self.data)}") - - -if __name__ == '__main__': - import numpy as np - from PIL import Image - - from tqdm import tqdm - from visualization.boundary import draw_boundaries - from visualization.floorplan import draw_floorplan - from utils.boundary import depth2boundaries - from utils.conversion import uv2xyz - - modes = ['test', 'val', 'train'] - for i in range(1): - for mode in modes: - print(mode) - mp3d_dataset = PanoS2D3DMixDataset(root_dir='../src/dataset/pano_s2d3d', mode=mode, aug={ - # 'STRETCH': True, - # 'ROTATE': True, - # 'FLIP': True, - # 'GAMMA': True - }, subset='pano') - continue - save_dir = f'../src/dataset/pano_s2d3d/visualization1/{mode}' - if not os.path.isdir(save_dir): - os.makedirs(save_dir) - - bar = tqdm(mp3d_dataset, ncols=100) - for data in bar: - bar.set_description(f"Processing {data['id']}") - boundary_list = depth2boundaries(data['ratio'], data['depth'], step=None) - pano_img = draw_boundaries(data['image'].transpose(1, 2, 0), boundary_list=boundary_list, show=False) - Image.fromarray((pano_img * 255).astype(np.uint8)).save( - os.path.join(save_dir, f"{data['id']}_boundary.png")) - - floorplan = draw_floorplan(uv2xyz(boundary_list[0])[..., ::2], show=False, - marker_color=None, center_color=0.8, show_radius=None) - Image.fromarray((floorplan.squeeze() * 255).astype(np.uint8)).save( - os.path.join(save_dir, f"{data['id']}_floorplan.png")) diff --git a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/sheet.tsx b/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - - {children} - -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - {children} - - - Close - - - -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/ziguo/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py b/spaces/ziguo/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py deleted file mode 100644 index 386c8d72496245dae8df033c2ebbd76b41ff45f1..0000000000000000000000000000000000000000 --- a/spaces/ziguo/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb -from basicsr.data.transforms import augment, paired_random_crop -from basicsr.utils import FileClient, imfrombytes, img2tensor -from basicsr.utils.registry import DATASET_REGISTRY -from torch.utils import data as data -from torchvision.transforms.functional import normalize - - -@DATASET_REGISTRY.register() -class RealESRGANPairedDataset(data.Dataset): - """Paired image dataset for image restoration. - - Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. - - There are three modes: - 1. 'lmdb': Use lmdb files. - If opt['io_backend'] == lmdb. - 2. 'meta_info': Use meta information file to generate paths. - If opt['io_backend'] != lmdb and opt['meta_info'] is not None. - 3. 'folder': Scan folders to generate paths. - The rest. - - Args: - opt (dict): Config for train datasets. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - meta_info (str): Path for meta information file. - io_backend (dict): IO backend type and other kwarg. - filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. - Default: '{}'. - gt_size (int): Cropped patched size for gt patches. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h - and w for implementation). - - scale (bool): Scale, which will be added automatically. - phase (str): 'train' or 'val'. - """ - - def __init__(self, opt): - super(RealESRGANPairedDataset, self).__init__() - self.opt = opt - self.file_client = None - self.io_backend_opt = opt['io_backend'] - # mean and std for normalizing the input images - self.mean = opt['mean'] if 'mean' in opt else None - self.std = opt['std'] if 'std' in opt else None - - self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] - self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}' - - # file client (lmdb io backend) - if self.io_backend_opt['type'] == 'lmdb': - self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt']) - elif 'meta_info' in self.opt and self.opt['meta_info'] is not None: - # disk backend with meta_info - # Each line in the meta_info describes the relative path to an image - with open(self.opt['meta_info']) as fin: - paths = [line.strip() for line in fin] - self.paths = [] - for path in paths: - gt_path, lq_path = path.split(', ') - gt_path = os.path.join(self.gt_folder, gt_path) - lq_path = os.path.join(self.lq_folder, lq_path) - self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)])) - else: - # disk backend - # it will scan the whole folder to get meta info - # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file - self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - scale = self.opt['scale'] - - # Load gt and lq images. Dimension order: HWC; channel order: BGR; - # image range: [0, 1], float32. - gt_path = self.paths[index]['gt_path'] - img_bytes = self.file_client.get(gt_path, 'gt') - img_gt = imfrombytes(img_bytes, float32=True) - lq_path = self.paths[index]['lq_path'] - img_bytes = self.file_client.get(lq_path, 'lq') - img_lq = imfrombytes(img_bytes, float32=True) - - # augmentation for training - if self.opt['phase'] == 'train': - gt_size = self.opt['gt_size'] - # random crop - img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) - # flip, rotation - img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) - - # BGR to RGB, HWC to CHW, numpy to tensor - img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) - # normalize - if self.mean is not None or self.std is not None: - normalize(img_lq, self.mean, self.std, inplace=True) - normalize(img_gt, self.mean, self.std, inplace=True) - - return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path} - - def __len__(self): - return len(self.paths) diff --git a/spaces/zoheb/yolos_demo/README.md b/spaces/zoheb/yolos_demo/README.md deleted file mode 100644 index 3e3961c2112db24ae47b04702ccd3d8b606be5e8..0000000000000000000000000000000000000000 --- a/spaces/zoheb/yolos_demo/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: YOLOS Demo (Balloons) -emoji: 🎈 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -python_version: 3.9.13 -app_file: app.py -models: zoheb/yolos-small-balloon -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zomehwh/sovits-xiaoke/vdecoder/hifigan/models.py b/spaces/zomehwh/sovits-xiaoke/vdecoder/hifigan/models.py deleted file mode 100644 index bdc3fa2c3447f360472d94c2fad9bd74993f6410..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-xiaoke/vdecoder/hifigan/models.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (torch.diff(tmp_over_one, dim=1)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses