diff --git a/spaces/101-5/gpt4free/g4f/.v1/SECURITY.md b/spaces/101-5/gpt4free/g4f/.v1/SECURITY.md
deleted file mode 100644
index cbc69677a0ec6b0192f1bd61f3eccb7723f8827b..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/SECURITY.md
+++ /dev/null
@@ -1,4 +0,0 @@
-## Reporting a Vulnerability
-
-Reporting a Vulnerability
-Please report (suspected) security vulnerabilities to https://t.me/xtekky. You will receive a response within 48 hours. If the issue is confirmed, we will release a patch as soon as possible depending on complexity but historically within a few days.
diff --git a/spaces/101-5/gpt4free/models_for_langchain/model.py b/spaces/101-5/gpt4free/models_for_langchain/model.py
deleted file mode 100644
index 0fdd170f92f9d03e4065cda5d49a896dfe4cfc94..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/models_for_langchain/model.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from typing import Any, List, Mapping, Optional
-from g4f.Provider import (
- Ails,
- You,
- Bing,
- Yqcloud,
- Theb,
- Aichat,
- Bard,
- Vercel,
- Forefront,
- Lockchat,
- Liaobots,
- H2o,
- ChatgptLogin,
- DeepAi,
- GetGpt
-)
-import g4f
-from langchain.callbacks.manager import CallbackManagerForLLMRun
-from langchain.llms.base import LLM
-provider_dict = {
- 'Ails': Ails,
- 'You': You,
- 'Bing': Bing,
- 'Yqcloud': Yqcloud,
- 'Theb': Theb,
- 'Aichat': Aichat,
- 'Bard': Bard,
- 'Vercel': Vercel,
- 'Forefront': Forefront,
- 'Lockchat': Lockchat,
- 'Liaobots': Liaobots,
- 'H2o': H2o,
- 'ChatgptLogin': ChatgptLogin,
- 'DeepAi': DeepAi,
- 'GetGpt': GetGpt
-}
-
-class CustomLLM(LLM):
- model_name: str="gpt-3.5-turbo"
- provider_name: str="GetGpt"
- @property
- def _llm_type(self) -> str:
- return "custom"
-
- def _call(
- self,
- prompt: str,
- stop: Optional[List[str]] = None,
- run_manager: Optional[CallbackManagerForLLMRun] = None,
- model_name = 'gpt-3.5-turbo',
- provider = GetGpt
- ) -> str:
- if stop is not None:
- raise ValueError("stop kwargs are not permitted.")
- bot_msg = g4f.ChatCompletion.create(model=self.model_name,
- provider=provider_dict[self.provider_name],
- messages=[{"role": "user",
- "content": prompt}],
- stream=False)
- return bot_msg
-
- @property
- def _identifying_params(self) -> Mapping[str, Any]:
- """Get the identifying parameters."""
- return {"model:": "gpt-3.5-turbo"}
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 The Best Software Emulator for DirectX 9.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 The Best Software Emulator for DirectX 9.md
deleted file mode 100644
index 2413b17ebd6162b4074cf73180f6de0692f82f44..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 The Best Software Emulator for DirectX 9.md
+++ /dev/null
@@ -1,122 +0,0 @@
-
-
If you are a gamer who loves playing high-end games on your PC, but you don't have a powerful graphics card, you might have encountered some problems with running some games smoothly. You might have experienced lagging, stuttering, crashing, or even not being able to launch some games at all. This can be very frustrating and disappointing, especially if you have spent a lot of money on buying those games.
-Fortunately, there is a solution that can help you overcome these issues and enjoy your favorite games without any hassle. It is called Swift Shader DX9 SM3, and it is a software that can emulate the DirectX 9 features on your CPU, allowing you to run games that require DirectX 9 support even if your graphics card does not support it. In this article, we will explain what Swift Shader DX9 SM3 is, why you need it, how to download and install it, and how to use it for gaming.
- Swift Shader DX9 SM3 is a software developed by TransGaming Technologies, a company that specializes in creating cross-platform gaming solutions. It is part of their SwiftShader product line, which also includes SwiftShader DX8 and SwiftShader OpenGL.
- SwiftShader is a high-performance CPU-based implementation of various graphics APIs, such as DirectX and OpenGL. It can emulate the features and functions of these APIs on any CPU, regardless of its capabilities or architecture. This means that it can run games that require these APIs even if your graphics card does not support them or if you don't have a graphics card at all.
- SwiftShader DX9 SM3 is specifically designed to emulate the DirectX 9 features on your CPU. It supports all the DirectX 9 features, such as pixel shaders, vertex shaders, texture filtering, alpha blending, fogging, lighting, etc. It also supports Shader Model 3.0, which is required by some games that use advanced graphics effects.
- The main benefit of using Swift Shader DX9 SM3 for gaming is that it allows you to run games that require DirectX 9 support even if your graphics card does not support it or if you don't have a graphics card at all. This means that you can play games that would otherwise be impossible or very difficult to play on your PC.
- Some examples of games that require DirectX 9 support are GTA IV, FIFA 14, Call of Duty 4: Modern Warfare, Assassin's Creed, Bioshock, Crysis, etc. These are some of the most popular and acclaimed games in the history of gaming, and they offer amazing gameplay and graphics that you don't want to miss out on.
- By using Swift Shader DX9 SM3, you can enjoy these games without having to upgrade your hardware or buy a new PC. You can also save money on buying expensive graphics cards that might become obsolete soon. You can also avoid compatibility issues that might arise from using different graphics cards or drivers.
-While using Swift Shader DX9 SM3 for gaming has many benefits, it also has some drawbacks that you should be aware of before using it. The main drawback is that it can affect the performance and quality of your games.
- Since Swift Shader DX9 SM3 emulates the DirectX 9 features on your CPU, it consumes a lot of CPU resources and power. This means that it can slow down your PC and reduce its battery life if you are using a laptop. It can also cause overheating issues if your CPU is not well ventilated or cooled.
- Moreover, since Swift Shader DX9 SM3 emulates the DirectX 9 features on your CPU, it cannot match the quality and accuracy of a real graphics card. This means that it can cause some graphical glitches, artifacts, or distortions in your games. It can also lower the resolution, frame rate, or detail level of your games.
- Therefore, using Swift Shader DX9 SM3 for gaming is not recommended if you have a high-end PC or if you are very particular about the performance and quality of your games. It is only suitable for low-end PCs or casual gamers who just want to play some games without spending too much money or effort.
- If you want to download Swift Shader DX9 SM3 from Google Drive, you can follow these steps:
- Before installing any file from unknown sources, it is always advisable to verify its authenticity and safety. This can prevent potential malware infections or data breaches on your PC. To verify the authenticity and safety of the file named "SwiftShader_DX9_SM3 Build_3383(x86).rar", you can do these things:
-
-Check the file name and extension. The file name should be exactly "SwiftShader_DX9_SM3 Build_3383(x86).rar" and the extension should be ".rar". If it is different or has an extra extension like ".exe" or ".zip", do not open it.
-Check the file size. The file size should be about 1.17 MB. If it is too large or too small, do not open it.
-Check the file source. The file source should be Google Drive. If it is from another website or platform, do not open it.
-Check the file content. You can use a tool like WinRAR or 7-Zip to open the file and see its content. The file should contain only one folder named "SwiftShader_DX9_SM3 Build_3383(x86)". If it contains other files or folders, do not open it.
-How to extract and copy the file to the game folder
- After verifying the authenticity and safety of the file named "SwiftShader_DX9_SM3 Build_3383(x86).rar", you can extract and copy it to the game folder. To do this, you can follow these steps:
-
-Open the file with a tool like WinRAR or 7-Zip. You will see a folder named "SwiftShader_DX9_SM3 Build_3383(x86)" inside the file.
-Extract the folder to a location of your choice on your PC. You can do this by right-clicking on the folder and choosing "Extract to SwiftShader_DX9_SM3 Build_3383(x86)\" or by dragging and dropping the folder to your desired location.
-Locate the game folder of the game that you want to play with Swift Shader DX9 SM3. The game folder is usually located in the "Program Files" or "Program Files (x86)" directory on your PC. For example, if you want to play GTA IV, the game folder might be "C:\Program Files (x86)\Rockstar Games\Grand Theft Auto IV".
-Copy the folder named "SwiftShader_DX9_SM3 Build_3383(x86)" to the game folder. You can do this by right-clicking on the folder and choosing "Copy" or by pressing Ctrl+C on your keyboard, and then right-clicking on the game folder and choosing "Paste" or by pressing Ctrl+V on your keyboard.
-Rename the file named "d3d9.dll" inside the folder "SwiftShader_DX9_SM3 Build_3383(x86)" to something else. You can do this by right-clicking on the file and choosing "Rename" or by pressing F2 on your keyboard, and then typing a new name for the file. For example, you can rename it to "d3d9_swiftshader.dll". This is to avoid conflicts with other files that might have the same name.
-
- The steps to download Swift Shader DX9 SM3 from Clifton Road Car Sal
- If you want to download Swift Shader DX9 SM3 from Clifton Road Car Sal, you can follow these steps:
-
-Go to this link: https://www.cliftonroadcarsales.co.uk/forum/general-discussions/swiftshader-dx9-sm3-build-3383-zip . This is a website that contains a forum post that has a download link for the file named "SwiftShader DX9 SM3 Build 3383.zip".
-Scroll down to the bottom of the page and find the download link. The download link is a blue button that says "Download Now". It is located under a banner that says "Download SwiftShader DX9 SM3 Build 3383.zip".
-Click on the download link. This will open a new tab or window that will redirect you to another website.
-
- How to deal with potential pop-ups and ads
- Before downloading the file from Clifton Road Car Sal, you should be aware that you might encounter some pop-ups and ads on the website. These are not part of the file download process and they might be annoying or harmful. To deal with them, you can do these things:
-
-Use an ad blocker. An ad blocker is a software or extension that can block or remove ads from websites. This can make your browsing experience faster and safer. Some examples of ad blockers are uBlock Origin, AdBlock Plus, or AdGuard.
-Use a pop-up blocker. A pop-up blocker is a software or extension that can block or close pop-up windows that might appear on websites. These are usually unwanted or malicious windows that might contain viruses or malware. Some examples of pop-up blockers are Pop-up Blocker Pro, Pop up Blocker for Chrome, or Poper Blocker.
-Use common sense. If you see any suspicious or irrelevant windows, tabs, messages, or buttons on the website, do not click on them or follow their instructions. They might be scams or phishing attempts that might try to steal your personal or financial information.
-
- How to unzip and copy the file to the game folder
- the file named "SwiftShader DX9 SM3 Build 3383.zip" from the website. To do this, you can follow these steps:
-
-Wait for the download to finish. The file size is about 1.17 MB.
-Open the file with a tool like WinRAR or 7-Zip. You will see a folder named "SwiftShader_DX9_SM3 Build_3383(x86)" inside the file.
-Extract the folder to a location of your choice on your PC. You can do this by right-clicking on the folder and choosing "Extract to SwiftShader_DX9_SM3 Build_3383(x86)\" or by dragging and dropping the folder to your desired location.
-Follow the same steps as above to copy and rename the file to the game folder.
-
- How to use Swift Shader DX9 SM3 for gaming?
- The settings and options available for Swift Shader DX9 SM3
- After installing Swift Shader DX9 SM3 to the game folder, you can use it for gaming. To do this, you need to configure some settings and options for Swift Shader DX9 SM3. These settings and options are located in a file named "SwiftShader.ini" inside the folder "SwiftShader_DX9_SM3 Build_3383(x86)". You can open this file with a text editor like Notepad or WordPad.
- The file contains several sections that correspond to different aspects of Swift Shader DX9 SM3, such as SystemInfo, Renderer, PixelShader, VertexShader, etc. Each section has several parameters that you can modify to change the behavior and performance of Swift Shader DX9 SM3. For example, you can change the value of "NumThreads" under SystemInfo to change the number of CPU threads that Swift Shader DX9 SM3 uses.
- You can find a detailed explanation of each section and parameter in this link: https://github.com/google/swiftshader/blob/master/docs/SwiftShader.ini.md . This is a documentation page from the official GitHub repository of SwiftShader, which is the source code of Swift Shader DX9 SM3 and other versions.
- How to adjust the performance and quality of Swift Shader DX9 SM3
- The most important settings and options that you need to adjust for Swift Shader DX9 SM3 are those that affect its performance and quality. These are mainly located in the sections Renderer, PixelShader, and VertexShader. By changing these settings and options, you can optimize Swift Shader DX9 SM3 for your PC and game.
- The general rule of thumb is that higher values mean higher quality but lower performance, and lower values mean lower quality but higher performance. Therefore, you need to find a balance between quality and performance that suits your preferences and needs. You can also experiment with different values and see how they affect your game.
- Here are some examples of settings and options that you can adjust for performance and quality:
-
-"EnableVSync" under Renderer: This parameter controls whether Swift Shader DX9 SM3 synchronizes its frame rate with your monitor's refresh rate. If you set it to 1, it will enable VSync, which can prevent screen tearing but also limit your frame rate. If you set it to 0, it will disable VSync, which can increase your frame rate but also cause screen tearing.
-"MaxAnisotropy" under Renderer: This parameter controls the level of anisotropic filtering that Swift Shader DX9 SM3 applies to textures. Anisotropic filtering is a technique that improves the quality and sharpness of textures at oblique angles. The higher the value, the better the texture quality but also the more CPU resources required. The valid values are 1, 2, 4, 8, or 16.
-"PixelShaderModel" under PixelShader: This parameter controls the pixel shader model that Swift Shader DX9 SM3 emulates. Pixel shaders are programs that determine how pixels are rendered on the screen. The higher the model number, the more advanced and complex pixel shaders that Swift Shader DX9 SM3 can emulate but also the more CPU resources required. The valid values are 2 or 3.
-SwiftShader emulates, having a lower shader model can reduce its workload and improve its performance and quality.
-
- These are just some tips that might help. You can also experiment with different settings and options and see how they affect your game.
- Can I use SwiftShader with other graphics enhancers or mods?
- Yes, you can use SwiftShader with other graphics enhancers or mods that are compatible with your game and PC. However, you should be careful and cautious when doing so, as some graphics enhancers or mods might conflict or interfere with SwiftShader or cause other problems. You should always backup your game files and settings before installing or using any graphics enhancers or mods.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Csi.column.v8.4.0 Patch.rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Csi.column.v8.4.0 Patch.rar.md
deleted file mode 100644
index 87f3c818ee5b7ba25e7de6800e21733365b6744e..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Csi.column.v8.4.0 Patch.rar.md
+++ /dev/null
@@ -1,6 +0,0 @@
-csi.column.v8.4.0 patch.rar DOWNLOAD » https://imgfil.com/2uy25y
-
-... bz1637248 libvma [8.7.5-1] - Rebase to upstream v8.7.5 release - Resolves: ... Rebase to latest upstream release v4.11.0-5 - mstconfig support for prio_tag and ... usage column - Related: #1300852 gnome-weather [3.26.0-1] - Update to ... error [3.1.2-13] - Fix CVE-2019-18408: RAR use-after-free This update is available ... 4d29de3e1b
-
-
-
diff --git a/spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate APK MOD Unlimited Money and More Features for Bus Lovers.md b/spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate APK MOD Unlimited Money and More Features for Bus Lovers.md
deleted file mode 100644
index 34de936ec9dc6ef0e054aad35cfe03a1548b0784..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate APK MOD Unlimited Money and More Features for Bus Lovers.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-Bus Simulator Ultimate Dinero Infinito APK: How to Get Unlimited Money in the Best Bus Simulator Game
-Do you love driving buses and exploring different cities? Do you want to experience the thrill of running your own bus company and competing with other players? Do you wish you had unlimited money to buy any bus, upgrade, or item you want in the game? If you answered yes to any of these questions, then you need to try Bus Simulator Ultimate Dinero Infinito APK.
-Bus Simulator Ultimate is one of the most popular and realistic bus simulator games on Android. It lets you drive various types of buses across different countries and continents, pick up and drop off passengers, customize your bus interior and exterior, and manage your own bus company. It also has a multiplayer mode where you can race with other players online and rank up in the global leaderboard.
-bus simulator ultimate dinero infinito apk Download Zip ->>->>->> https://jinyurl.com/2uNTiE
-However, as fun as it is, Bus Simulator Ultimate also requires a lot of money to unlock all the features and items in the game. You need to earn money by completing missions, driving carefully, and satisfying your passengers. But this can take a long time and be quite tedious. That's why many players look for ways to get unlimited money in the game without spending real money.
-That's where Bus Simulator Ultimate Dinero Infinito APK comes in. This is a modified version of the original game that gives you unlimited money and resources. You can download and install it on your Android device for free and enjoy all the benefits of having unlimited money in Bus Simulator Ultimate. In this article, we will tell you more about Bus Simulator Ultimate Dinero Infinito APK, its features, how to use it, and where to download it.
- Features of Bus Simulator Ultimate Dinero Infinito APK
-Bus Simulator Ultimate Dinero Infinito APK has all the features of the original game plus some extra ones that make it more enjoyable and convenient. Here are some of the main features of Bus Simulator Ultimate Dinero Infinito APK:
- Realistic bus driving experience One of the best things about Bus Simulator Ultimate is that it gives you a realistic and immersive bus driving experience. You can choose from different types of buses, such as city buses, school buses, double-decker buses, and more. You can also customize your bus with various skins, accessories, and interior designs. You can even change the license plate, horn sound, and driver name of your bus.
-But that's not all. You can also drive your bus across different routes and cities around the world, such as Germany, Turkey, Italy, France, Spain, USA, and more. You can see the landmarks, scenery, and culture of each country as you drive. You can also pick up and drop off passengers at different bus stops and stations. You have to follow the traffic rules, signals, and signs, as well as the speed limit and the time schedule. You also have to deal with different weather conditions, such as rain, snow, fog, and night.
-And if that's not enough, you can also enjoy the realistic sound effects of the engine, brakes, horns, passengers, and traffic. You can also listen to radio stations from different countries while driving. You can even use the voice navigation system to guide you along the way. All these features make Bus Simulator Ultimate a very realistic and fun bus simulator game.
- Multiplayer mode and online ranking
-Another great feature of Bus Simulator Ultimate is that it has a multiplayer mode where you can compete with other players online. You can join or create your own bus races and challenge other drivers to see who is the fastest and the best. You can also chat with other players and make friends or rivals.
-bus simulator ultimate mod apk unlimited money and gold
-descargar bus simulator ultimate hackeado para android
-bus simulator ultimate apk premium unlocked free download
-como tener dinero infinito en bus simulator ultimate
-bus simulator ultimate mod menu apk latest version
-bus simulator ultimate apk full mega mod
-trucos para bus simulator ultimate android
-bus simulator ultimate hack apk 2021
-bus simulator ultimate apk sin anuncios
-bus simulator ultimate mod apk todo desbloqueado
-descargar bus simulator ultimate ultima version apk
-bus simulator ultimate apk mod dinero y oro infinito
-como descargar bus simulator ultimate hackeado
-bus simulator ultimate apk mod menu 2021
-bus simulator ultimate apk obb download
-bus simulator ultimate hack apk mediafıre
-bus simulator ultimate mod apk unlimited xp
-como instalar bus simulator ultimate mod apk
-bus simulator ultimate apk mod offline
-bus simulator ultimate hack apk sin root
-bus simulator ultimate mod apk revdl
-descargar bus simulator ultimate gratis para android
-bus simulator ultimate mod apk unlimited fuel
-como actualizar bus simulator ultimate hackeado
-bus simulator ultimate apk mod no ads
-But that's not all. You can also join or create your own bus company and cooperate with other drivers. You can share your routes, buses, and profits with your teammates. You can also hire or fire drivers as you wish. You can also compete with other bus companies and see who has the most passengers, revenue, and reputation.
-And if that's not enough, you can also earn rewards and rank up in the global leaderboard. You can see your position and stats compared to other players around the world. You can also unlock achievements and trophies as you play. All these features make Bus Simulator Ultimate a very competitive and social bus simulator game.
- Unlimited money and resources
-The best feature of Bus Simulator Ultimate Dinero Infinito APK is that it gives you unlimited money and resources in the game. This means that you can buy any bus, upgrade, or item you want without worrying about the cost. You can also unlock all the achievements and trophies in the game without any effort.
-But that's not all. You can also enjoy the game without ads or limitations. You don't have to watch annoying ads or wait for energy or lives to refill. You can play the game as much as you want without any interruption or restriction.
-And if that's not enough, you can also use the money to support your favorite bus company or driver in the multiplayer mode. You can donate money to your teammates or friends to help them grow their business or improve their performance. You can also use the money to sabotage your rivals or enemies to make them lose their customers or reputation. All these features make Bus Simulator Ultimate Dinero Infinito APK a very enjoyable and convenient bus simulator game.
- How to use Bus Simulator Ultimate Dinero Infinito APK
-If you are interested in using Bus Simulator Ultimate Dinero Infinito APK, here are some simple steps on how to download and install it on your Android device:
- Download and install the APK file from a trusted source
-The first step is to download the APK file from a trusted source. There are many websites that offer Bus Simulator Ultimate Dinero Infinito APK for free download, but not all of them are safe or reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information.
-Therefore, you need to be careful when choosing where to download Bus Simulator Ultimate Dinero Infinito APK from. You need to check the compatibility and security of the file before downloading it. You need to make sure that it is compatible with your device model and Android version. You also need to scan it with an antivirus program or a malware detector to ensure that it is free from any harmful software.
-One of the best sources to download Bus Simulator Ultimate Dinero Infinito APK from is [text]. This website provides safe and secure downloads of various APK files for Android games and apps. It also provides detailed information about each file, such as its size, version, developer, rating, reviews, screenshots, and more. You can easily find Bus Simulator Ultimate Din ero Infinito APK by searching for it on the website or clicking on this link: [Bus Simulator Ultimate Dinero Infinito APK].
- Once you have found the file, you need to download it to your device. You can do this by clicking on the download button or scanning the QR code on the website. The file will be saved in your device's download folder or any other location you choose.
- Enable unknown sources in your device settings to allow installation
-The next step is to enable unknown sources in your device settings to allow installation of Bus Simulator Ultimate Dinero Infinito APK. This is because the file is not from the official Google Play Store and your device may block it by default.
-To enable unknown sources, you need to go to your device settings and look for the security or privacy option. There, you will find a toggle or checkbox that says "allow installation of apps from unknown sources" or something similar. You need to turn it on or check it to enable it.
-If you are not sure how to do this, you can follow this guide: [How to enable unknown sources on Android]. This guide will show you how to enable unknown sources on different Android versions and devices.
- Follow the instructions on the screen to complete the installation
-The final step is to follow the instructions on the screen to complete the installation of Bus Simulator Ultimate Dinero Infinito APK. This is very easy and straightforward. You just need to locate the file in your device and tap on it to open it. Then, you will see a pop-up window that asks you if you want to install the app. You need to tap on "install" or "yes" to confirm.
-The installation process will take a few seconds or minutes depending on your device and internet speed. Once it is done, you will see a message that says "app installed" or "installation successful". You can then tap on "open" or "done" to launch the game or exit the window.
Launch the game and enjoy unlimited money
-The last step is to launch the game and enjoy unlimited money in Bus Simulator Ultimate. This is the most fun and exciting part. You can start the game normally and log in with your account or create a new one. You can then check your balance and see how much money you have. You will be amazed by the amount of money you have in the game.
-You can then spend your money as you wish and have fun. You can buy any bus, upgrade, or item you want without worrying about the cost. You can also unlock all the achievements and trophies in the game without any effort. You can also enjoy the game without ads or limitations. You can play the game as much as you want without any interruption or restriction.
-You can also use the money to support your favorite bus company or driver in the multiplayer mode. You can donate money to your teammates or friends to help them grow their business or improve their performance. You can also use the money to sabotage your rivals or enemies to make them lose their customers or reputation.
-All these features make Bus Simulator Ultimate Dinero Infinito APK a very enjoyable and convenient bus simulator game. You can experience the thrill of driving buses and exploring different cities with unlimited money and resources.
- Conclusion
-In conclusion, Bus Simulator Ultimate Dinero Infinito APK is a modified version of the original game that gives you unlimited money and resources. It has all the features of the original game plus some extra ones that make it more enjoyable and convenient. It lets you drive various types of buses across different countries and continents, pick up and drop off passengers, customize your bus interior and exterior, and manage your own bus company. It also has a multiplayer mode where you can race with other players online and rank up in the global leaderboard.
-But the best feature of Bus Simulator Ultimate Dinero Infinito APK is that it gives you unlimited money and resources in the game. You can buy any bus, upgrade, or item you want without worrying about the cost. You can also unlock all the achievements and trophies in the game without any effort. You can also enjoy the game without ads or limitations. You can play the game as much as you want without any interruption or restriction.
-If you are interested in using Bus Simulator Ultimate Dinero Infinito APK, you just need to download and install it on your Android device from a trusted source. Then, you need to enable unknown sources in your device settings to allow installation. After that, you need to follow the instructions on the screen to complete the installation. Finally, you need to launch the game and enjoy unlimited money.
-Bus Simulator Ultimate Dinero Infinito APK is a great way to have fun and experience the realistic and immersive bus driving experience with unlimited money and resources. It is one of the best bus simulator games on Android that you should not miss. So, what are you waiting for? Download Bus Simulator Ultimate Dinero Infinito APK now and start driving!
- FAQs
-Is Bus Simulator Ultimate Dinero Infinito APK safe to use?
-Yes, Bus Simulator Ultimate Dinero Infinito APK is safe to use if you download it from a trusted source like [text]. However, you should always check the compatibility and security of the file before downloading it. You should also scan it with an antivirus program or a malware detector to ensure that it is free from any harmful software.
- Will I get banned for using Bus Simulator Ultimate Dinero Infinito APK?
-No, you will not get banned for using Bus Simulator Ultimate Dinero Infinito APK if you use it wisely and responsibly. However, you should not abuse or exploit the unlimited money feature in the multiplayer mode or online ranking. You should also not brag or boast about your money or items in the chat or social media. This may annoy other players or attract unwanted attention from the game developers or moderators.
- Can I play online with Bus Simulator Ultimate Dinero Infinito APK?
-Yes, you can play online with Bus Simulator Ultimate Dinero Infinito APK if you have a stable internet connection and a valid account. You can join or create your own bus races and challenge other drivers to see who is the fastest and the best. You can also join or create your own bus company and cooperate with other drivers. You can also earn rewards and rank up in the global leaderboard.
- How can I update Bus Simulator Ultimate Dinero Infinito APK?
-You can update Bus Simulator Ultimate Dinero Infinito APK by downloading and installing the latest version of the file from [text]. However, you should always backup your data before updating to avoid losing your progress or settings. You should also check if there are any changes or new features in the new version before updating. You should also make sure that the new version is compatible with your device and Android version.
- Where can I download Bus Simulator Ultimate Dinero Infinito APK?
-You can download Bus Simulator Ultimate Dinero Infinito APK from [text]. This website provides safe and secure downloads of various APK files for Android games and apps. It also provides detailed information about each file, such as its size, version, developer, rating, reviews, screenshots, and more. You can easily find Bus Simulator Ultimate Dinero Infinito APK by searching for it on the website or clicking on this link: [Bus Simulator Ultimate Dinero Infinito APK].
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download 39 40 Meye Yie.md b/spaces/1phancelerku/anime-remove-background/Download 39 40 Meye Yie.md
deleted file mode 100644
index a01833fef77e90c5d5859a773804812349786c3f..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download 39 40 Meye Yie.md
+++ /dev/null
@@ -1,70 +0,0 @@
-
-Download 39/40 Meye Yie: A New Song by King Plaxma
-If you are looking for a new song to add to your playlist, you might want to check out Meye Yie by King Plaxma featuring 39/40. This is a new song that was released in March 2023 and has been gaining popularity among music lovers. In this article, we will tell you what Meye Yie is, who King Plaxma and 39/40 are, how to download Meye Yie, and why you should listen to it.
- Introduction
-Meye Yie is a Ghanaian phrase that means "I will prosper" or "I will succeed". It is a song that expresses the hope and determination of the artists to achieve their goals and dreams. The song is a fusion of afrobeat, dancehall, and hip hop genres, creating a unique and appealing sound. The song was produced by Kobby Berry, a renowned Ghanaian producer who has worked with many other artists.
-download 39 40 meye yie Download ✶ https://jinyurl.com/2uNSAE
- What is Meye Yie?
-Meye Yie is a song that was released by King Plaxma, a talented artist from Ghana, on March 15, 2023. The song features 39/40, a duo of young rappers who are also from Ghana. The song is part of King Plaxma's upcoming album, which is expected to be released later this year. The song has been well received by fans and critics alike, who have praised its catchy chorus, smooth flow, and motivational message.
- Who is King Plaxma?
-King Plaxma is a Ghanaian singer, songwriter, and rapper who started his music career in 2019. He is known for his versatile style and his ability to blend different genres of music. He has released several singles and collaborations with other artists, such as Tulenkey, Omah Lay, Davido, and more. Some of his popular songs include Breaks, Upgraded, I See You, Pretty Girl, and I Don't Fear Dem. He is also the founder of Plaxma Nation, a music label that supports upcoming artists.
- Who are 39/40?
-39/40 are a Ghanaian rap duo who are making waves in the music industry. They are composed of O'Kenneth and Reggie, two young rappers who are part of the Asakaa Boys collective. They are known for their streetwise lyrics, energetic delivery, and hard-hitting beats. They have collaborated with other artists such as Jay Bahd, Black Sherif, Gyakie, Fameye, and more. Some of their popular songs include This Song, Easy, Mercy, Mama Yie, and Meye Guy.
- How to download Meye Yie?
-If you want to download Meye Yie to your device or computer, there are several ways you can do it. Here are some of the options you can choose from:
- Download from Zacknation.net
-Zacknation.net is a Ghanaian website that offers free mp3 downloads of various songs from different genres and artists. You can download Meye Yie from Zacknation.net by following these steps:
-
-Go to [Zacknation.net](^1^) on your browser.
-Type "Meye Yie" in the search box and click on the magnifying glass icon.
-Scroll down until you find the post titled "Download: King Plaxma Ft 39/40 – Meye Yie Mp3 (New Song)".
-Click on the link that says "Meye Yie By King Pl axma Ft 39/40 – Meye Yie.mp3" to start the download.
-Wait for the download to finish and enjoy the song.
-
- Download from YouTube
-You can also download Meye Yie from YouTube, where you can watch the official video of the song. The video was uploaded by King Plaxma on his YouTube channel on March 16, 2023. It has over 1 million views and thousands of likes and comments. You can download Meye Yie from YouTube by following these steps:
-
-Go to [YouTube.com] on your browser.
-Type "Meye Yie King Plaxma" in the search box and click on the magnifying glass icon.
-Click on the video titled "King Plaxma - Meye Yie ft. 39/40 (Official Video)".
-Copy the URL of the video from the address bar.
-Go to a YouTube to mp3 converter website, such as [ytmp3.cc] or [y2mate.com].
-Paste the URL of the video in the box and click on "Convert" or "Start".
-Choose the format and quality of the mp3 file and click on "Download" or "Save".
-Wait for the download to finish and enjoy the song.
-
- Download from other sources
-If you prefer to download Meye Yie from other sources, you can also find it on various music streaming platforms and websites. Some of the options you can choose from are:
-
-[Spotify]: You can listen to Meye Yie on Spotify, a popular music streaming service that offers millions of songs and podcasts. You can also download Meye Yie to your device if you have a Spotify Premium account, which costs $9.99 per month.
-[Apple Music]: You can listen to Meye Yie on Apple Music, a music streaming service that is integrated with iTunes and iCloud. You can also download Meye Yie to your device if you have an Apple Music subscription, which costs $9.99 per month.
-[Audiomack]: You can listen to Meye Yie on Audiomack, a music streaming and discovery platform that features emerging artists and genres. You can also download Meye Yie to your device for free if you have an Audiomack account, which is also free.
-[SoundCloud]: You can listen to Meye Yie on SoundCloud, a music sharing and social networking platform that allows users to upload and stream their own songs. You can also download Meye Yie to your device for free if you have a SoundCloud account, which is also free.
-
- Why you should listen to Meye Yie?
-Meye Yie is not just a song, but a message of hope and inspiration. Here are some of the reasons why you should listen to Meye Yie:
-
- It is a cool-tempo production
-Meye Yie is a song that has a cool-tempo production, meaning that it has a moderate pace and a relaxed vibe. The song has a smooth beat that is easy to groove to, and a melody that is pleasant to listen to. The song is suitable for any mood and occasion, whether you want to chill out, dance, or work out.
- It features talented artists
-Meye Yie is a song that features talented artists who showcase their skills and creativity. King Plaxma delivers a captivating performance with his vocals and rap verses, while 39/40 add their flair and energy with their rap lines. The artists complement each other well and create a harmonious collaboration.
- It has a catchy chorus and lyrics
-Meye Yie is a song that has a catchy chorus and lyrics that will stick in your head. The chorus repeats the phrase "Meye yie" several times, creating a memorable hook that will make you sing along. The lyrics are also meaningful and motivational, as they express the desire to prosper and overcome challenges. The song will inspire you to pursue your dreams and never give up.
- Conclusion
-Meye Yie is a song that you should not miss out on. It is a song that combines afrobeat, dancehall, and hip hop genres, creating a unique and appealing sound. It is a song that features King Plaxma and 39/40, two talented artists who deliver an amazing performance. It is a song that has a catchy chorus and lyrics that will inspire you to prosper and succeed. It is a song that you can download from various sources, such as Zacknation.net, YouTube, Spotify, Apple Music, Audiomack, and SoundCloud. If you are looking for a new song to add to your playlist, you should definitely download Meye Yie by King Plaxma featuring 39/40.
- FAQs
-Here are some of the frequently asked questions about Meye Yie:
-
-Q: What does Meye Yie mean?
-A: Meye Yie is a Ghanaian phrase that means "I will prosper" or "I will succeed".
-Q: Who produced Meye Yie?
-A: Meye Yie was produced by Kobby Berry, a renowned Ghanaian producer who has worked with many other artists.
-Q: When was Meye Yie released?
-A: Meye Yie was released by King Plaxma on March 15, 2023.
-Q: How long is Meye Yie?
-A: Meye Yie is 3 minutes and 45 seconds long.
-Q: Where can I watch the video of Meye Yie?
-A: You can watch the video of Meye Yie on YouTube, where it was uploaded by King Plaxma on his YouTube channel on March 16, 2023.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Mario Kart Tour Mod Apk and Enjoy Unlimited Coins and Rubies.md b/spaces/1phancelerku/anime-remove-background/Download Mario Kart Tour Mod Apk and Enjoy Unlimited Coins and Rubies.md
deleted file mode 100644
index ec325dd8bc8abee831554958e755229a43da0a24..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Mario Kart Tour Mod Apk and Enjoy Unlimited Coins and Rubies.md
+++ /dev/null
@@ -1,89 +0,0 @@
-
-Mario Kart Tour Mod Apk (Unlimited Money and Gems)
-Mario Kart Tour is a mobile game in the Mario Kart series that lets you race around courses inspired by real-world cities and classic Mario Kart tracks. The game is free to play, but it has a monetization system that involves microtransactions, gacha mechanics, and a Gold Pass subscription. If you want to enjoy the game without spending money or time, you might be interested in Mario Kart Tour mod apk (unlimited money and gems).
-mario kart tour mod apk (unlimited money and gems) Download Zip ===> https://jinyurl.com/2uNJMx
-This is a modified version of the game that gives you unlimited rubies and coins, which are the premium currencies in the game. You can use them to fire off the pipe and get random characters, karts, and gliders. You can also use them to buy items from the shop or play coin rush mode. With unlimited rubies and coins, you can unlock all the content in the game without waiting or grinding.
-Features of Mario Kart Tour Mod Apk (Unlimited Money and Gems)
-Here are some of the features of Mario Kart Tour mod apk (unlimited money and gems) that make it different from the original game:
-
-Unlimited rubies and coins : You can get as many rubies and coins as you want by using a simple hack. You don't need to watch ads or complete surveys to get them. You can use them to unlock all the drivers, karts, and gliders in the game.
-X2 points and coin bonus : You can get double the points and coins for every race you complete. This means you can level up faster and earn more rewards. You can also increase your online rank and compete with other players.
-Frenzy mode activated for any item : You can activate frenzy mode for any item you get from an item box. Frenzy mode gives you an unlimited supply of a certain item and makes you invincible for a short time. You can use this to cause chaos on the track and gain an advantage over your opponents.
-All tours and cups unlocked : You can access all the tours and cups in the game without collecting grand stars or completing challenges. You can play any course you want at any time. You can also enjoy the courses based on real-world cities that rotate every two weeks.
-No ads or subscription required : You can play Mario Kart Tour mod apk (unlimited money and gems) without seeing any ads or paying for a Gold Pass subscription. You can enjoy all the features of the game without any interruptions or limitations.
-
-How to Download and Install Mario Kart Tour Mod Apk (Unlimited Money and Gems)
-If you want to download and install Mario Kart Tour mod apk (unlimited money and gems), here are the steps you need to follow:
-mario kart tour hack apk download free unlimited rubies
-mario kart tour modded apk latest version with x2 points
-how to install mario kart tour mod apk on android device
-mario kart tour cheats apk for unlimited coins and gems
-mario kart tour apk mod unlock all characters and tracks
-mario kart tour mod apk offline mode with no ads
-mario kart tour premium apk with unlimited money and gems
-mario kart tour cracked apk with anti-ban feature
-mario kart tour mod apk unlimited everything 2023
-mario kart tour hack tool apk no root required
-mario kart tour mod menu apk with custom settings
-mario kart tour unlimited rubies and coins generator apk
-mario kart tour mod apk free download for android phone
-mario kart tour hack version apk with unlimited lives and tickets
-mario kart tour modded game apk with high graphics quality
-mario kart tour hack online apk no survey no human verification
-mario kart tour mod apk unlimited money and gems reddit
-mario kart tour hack apk ios compatible with iphone and ipad
-mario kart tour modded app apk with auto win feature
-mario kart tour cheat codes apk for android and ios devices
-mario kart tour mod apk unlimited money and gems 2022 update
-mario kart tour hack download apk with easy installation guide
-mario kart tour modded game download apk with fast speed
-mario kart tour cheats download apk with no password or login required
-mario kart tour apk mod free shopping with unlimited gold and gems
-mario kart tour cracked game download apk with full features unlocked
-mario kart tour hack online download apk with no virus or malware
-mario kart tour modded app download apk with safe and secure link
-mario kart tour cheat codes download apk with working and tested codes
-mario kart tour mod apk unlimited money and gems 2021 latest version
-
-Go to [11](https://play.google.com/store/apps/details?id=com.nintendo.zaka) or [12](https://apps.apple.com/us/app/mario-kart-tour/id1293634699) and download the original Mario Kart Tour game on your device.
-Go to [1](https://apkprox.com/mario-kart-tour-mod-apk/) or another trusted source and download the Mario Kart Tour mod apk file on your device.
-Enable unknown sources on your device settings to allow installation of third-party apps.
-Locate the downloaded mod apk file on your device storage and tap on it to install it.
-Launch the game and enjoy unlimited money and gems.
-
-FAQ FAQs
- Here are some of the frequently asked questions and answers about Mario Kart Tour mod apk (unlimited money and gems):
-
-
-Question
-Answer
-
-
-Is Mario Kart Tour mod apk (unlimited money and gems) safe to use?
-Yes, Mario Kart Tour mod apk (unlimited money and gems) is safe to use as long as you download it from a trusted source and scan it for viruses before installing it. However, you should be aware that using a modded version of the game may violate the terms of service of Nintendo and result in a ban or suspension of your account.
-
-
-Does Mario Kart Tour mod apk (unlimited money and gems) work online?
-Yes, Mario Kart Tour mod apk (unlimited money and gems) works online and offline. You can play the game with other players online or solo offline. However, you should be careful not to use the modded features too obviously or excessively, as this may alert other players or Nintendo and get you reported or banned.
-
-
-Can I update Mario Kart Tour mod apk (unlimited money and gems) to the latest version?
-Yes, you can update Mario Kart Tour mod apk (unlimited money and gems) to the latest version by downloading the new mod apk file from the same source and installing it over the existing one. However, you should always backup your game data before updating, as some updates may cause compatibility issues or data loss.
-
-
-Can I use Mario Kart Tour mod apk (unlimited money and gems) on iOS devices?
-No, Mario Kart Tour mod apk (unlimited money and gems) is only available for Android devices. If you want to use a modded version of the game on iOS devices, you will need to jailbreak your device and use a different method, such as Cydia or iFile. However, this may void your warranty or damage your device, so proceed at your own risk.
-
-
-What are the alternatives to Mario Kart Tour mod apk (unlimited money and gems)?
-If you don't want to use Mario Kart Tour mod apk (unlimited money and gems), you can try some of the alternatives, such as:
-
-Mario Kart Tour cheats: These are codes or commands that you can enter in the game to get extra rubies, coins, items, or other benefits. However, these cheats may not work on all devices or versions of the game, and they may also get you banned or suspended.
-Mario Kart Tour hacks: These are tools or programs that you can use to modify the game files or data to get unlimited money and gems or other features. However, these hacks may require root access or special permissions on your device, and they may also be detected or blocked by Nintendo.
-Mario Kart Tour tips and tricks: These are strategies or techniques that you can use to play the game better and earn more rubies, coins, items, or points. These tips and tricks are legal and ethical, and they do not require any modification or hacking of the game.
-
-
-
- I hope this article has helped you learn more about Mario Kart Tour mod apk (unlimited money and gems) and how to download and install it on your device. If you have any questions or feedback, please leave a comment below. Thank you for reading!
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Mod Incredibox and Explore the Galaxy the Ocean and the Nightmare.md b/spaces/1phancelerku/anime-remove-background/Download Mod Incredibox and Explore the Galaxy the Ocean and the Nightmare.md
deleted file mode 100644
index c72fbe9de21f293bf903ddf787563dbcf2d54ee3..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Mod Incredibox and Explore the Galaxy the Ocean and the Nightmare.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-How to Download Mod Incredibox: A Guide for Music Lovers
- If you love music and creativity, you might have heard of Incredibox, a music app that lets you create your own music with the help of a merry crew of beatboxers. But did you know that you can also download mod Incredibox versions that are made by fans and offer different themes, sounds, and stories? In this article, we will show you what Incredibox is, what mods are, how to download and install them, and how to play and enjoy them.
-download mod incredibox Download ⇒ https://jinyurl.com/2uNSr0
- What is Incredibox and why you should try it
- Incredibox is a fun, interactive music app
- Incredibox is a music app that was created in 2009 by a French company called So Far So Good. It is a part game, part tool, and part audio and visual experience that has quickly become a hit with people of all ages. More than 70 million players worldwide have already enjoyed it.
- You can create your own music with different styles and atmospheres
- Incredibox allows you to create your own music with the help of a merry crew of beatboxers. You can choose your musical style among 8 impressive atmospheres, such as Alpha, Little Miss, Jeevan, Brazil, Alive, The Love, Sunrise, and Voxel. Each atmosphere has its own sounds, graphics, animation, and interactivity. You can drag and drop icons onto the avatars to make them sing and start to compose your own mix. You can also find the right sound combos to unlock animated choruses that will enhance your tune.
- You can share your mix and join the top 50 chart
- Once your composition sounds great, you can save it and get a link to your mix. You can easily share it with anybody so they can listen and even vote for it. If you share your mix a lot and get enough votes from other users, you may go down in Incredibox history by joining the top 50 chart. You can also browse other users' mixes and get inspired by their creativity.
- What are mods and how they can enhance your experience
- Mods are fan-made versions or modifications of the game
- Mods are fan-made versions or modifications of the game that change some aspects of it, such as the look, the sounds, or the gameplay. Mods are not official versions of Incredibox, but they are made by fans who love the game and want to add their own touch to it. Mods are usually free to download and play, but they may not work on all devices or browsers.
-download evadare mod incredibox
-download the bells mod incredibox
-download galaxy mod incredibox
-download evadare demo mod incredibox
-download halloween mod incredibox
-download christmas mod incredibox
-download ocean mod incredibox
-download pirate mod incredibox
-download fan-made versions of incredibox
-download mods from incredirem website
-download mods from rem incredibox website
-download spooky mods for incredibox
-download scary mods for incredibox
-download peaceful mods for incredibox
-download calm mods for incredibox
-download original music mods for incredibox
-download new sounds and bonuses for incredibox
-download monster-themed mods for incredibox
-download human-themed mods for incredibox
-download village-themed mods for incredibox
-download space-themed mods for incredibox
-download star-themed mods for incredibox
-download story-based mods for incredibox
-download chapter-based mods for incredibox
-download full version of evadare mod incredibox
-download demo version of evadare mod incredibox
-download online version of evadare mod incredibox
-download online version of galaxy mod incredibox
-download free mods for incredibox game
-download unofficial mods for incredibox game
-download modification of incredibox game
-how to download mod incredibox on pc
-how to download mod incredibox on android
-how to download mod incredibox on ios
-how to install mod incredibox on pc
-how to install mod incredibox on android
-how to install mod incredibox on ios
-where to find mod incredibox downloads
-where to get mod incredibox downloads
-where to access mod incredibox downloads
-best sites to download mod incredibox
-best sources to download mod incredibox
-best places to download mod incredibox
-latest mods to download for incredibox
-newest mods to download for incredibox
-most popular mods to download for incredibox
-most liked mods to download for incredibox
-most downloaded mods for incredibox
-most played mods for incredibox
- You can find different themes, sounds, and stories in mods
- Mods offer different themes, sounds, and stories that are not available in the original version of Incredibox. For example, some mods have a spooky theme for Halloween, a Christmas theme for the holidays, or a galaxy theme for sci -fi adventure. Some mods have different sounds and instruments that are not in the original game, such as guitars, pianos, or drums. Some mods have different stories and characters that are inspired by movies, books, or games, such as Harry Potter, Star Wars, or Minecraft. You can discover the stories and secrets behind each mod by playing them and finding the right sound combos.
- Some of the most popular mods are Evadare, Galaxy, and The Bells
- Some of the most popular mods for Incredibox are Evadare, Galaxy, and The Bells. Evadare is a mod that has a dark and mysterious theme, with sounds that are inspired by horror movies and games. Galaxy is a mod that has a futuristic and space theme, with sounds that are inspired by sci-fi movies and games. The Bells is a mod that has a festive and cheerful theme, with sounds that are inspired by Christmas songs and bells. You can find these mods and more on websites like Incredirem or Google Sites.
- How to download and install mods for Incredibox
- You need to have the app or the web version of Incredibox first
- To download and install mods for Incredibox, you need to have the app or the web version of Incredibox first. You can download the app from the App Store or Google Play for $3.99, or you can play the web version for free on the official website. The app version has more features and updates than the web version, but both versions are compatible with most mods.
- You can find mods on websites like Incredirem or Google Sites
- You can find mods for Incredibox on websites like Incredirem or Google Sites. These websites have a collection of mods that are made by fans and are free to download and play. You can browse the mods by categories, ratings, or popularity, and you can see screenshots and videos of each mod before downloading them. You can also leave feedback and comments for the mod creators and other players.
- You can download the mod files and replace them in the app folder or use a browser extension
- To download and install mods for Incredibox, you need to download the mod files from the websites and replace them in the app folder or use a browser extension. If you have the app version of Incredibox, you need to locate the app folder on your device and replace the original files with the mod files. You may need to use a file manager app or connect your device to a computer to do this. If you have the web version of Incredibox, you need to use a browser extension like Tampermonkey or Greasemonkey to run the mod files on your browser. You may need to enable some permissions or settings to do this.
- How to play and enjoy mods for Incredibox
- You can drag and drop icons onto the avatars to make them sing
- To play mods for Incredibox, you can drag and drop icons onto the avatars to make them sing. Each icon represents a different sound or instrument that will add to your mix. You can experiment with different combinations and see what sounds good together. You can also mute or solo each avatar by clicking on them.
- You can find the right sound combos to unlock animated choruses
- To enjoy mods for Incredibox, you can find the right sound combos to unlock animated choruses. Each mod has its own set of sound combos that will trigger an animated chorus that will enhance your mix. You can see hints of the sound combos on the top right corner of the screen, or you can try to find them by yourself. Some sound combos may also reveal hidden secrets or stories behind each mod.
- You can discover the stories and secrets behind each mod
- To appreciate mods for Incredibox, you can discover the stories and secrets behind each mod. Each mod has its own theme, atmosphere, and story that is expressed through its sounds, graphics, animation, and interactivity. You can try to understand what each mod is about by playing it and finding its sound combos. You can also read more about each mod on its website or in its description.
- Conclusion and FAQs
- In conclusion, Incredibox is a fun, interactive music app that lets you create your own music with different styles and atmospheres. You can also download mod Incredibox versions that are made by fans and offer different themes, sounds, and stories. To download and install mods for Incredibox, you need to have the app or the web version of Incredibox first, then you need to find mods on websites like Incredirem or Google Sites, and then you need to download the mod files and replace them in the app folder or use a browser extension. To play and enjoy mods for Incredibox, you can drag and drop icons onto the avatars to make them sing, find the right sound combos to unlock animated choruses, and discover the stories and secrets behind each mod. We hope this guide has helped you learn how to download mod Incredibox and have fun with it. If you have any questions, you can check out the FAQs below or contact us for more information.
- FAQs
-
-
-Question
-Answer
-
-
-Is Incredibox safe to download and play?
-Yes, Incredibox is safe to download and play. It does not contain any viruses, malware, or inappropriate content. However, you should always be careful when downloading files from unknown sources and scan them for any potential threats.
-
-
-Is Incredibox free to play?
-Incredibox is free to play on the web version, but you need to pay $3.99 to download the app version. The web version has fewer features and updates than the app version, but both versions are compatible with most mods. Mods are usually free to download and play, but some mod creators may ask for donations or support.
-
-
-How can I create my own mod for Incredibox?
-To create your own mod for Incredibox, you need to have some skills in coding, sound design, and graphic design. You also need to have access to the original files of Incredibox and modify them according to your vision. You can use tools like Audacity, Photoshop, or Unity to create your own sounds, graphics, and animation. You can also use websites like Incredirem or Google Sites to host your mod and share it with other players.
-
-
-How can I support the developers of Incredibox?
-To support the developers of Incredibox, you can buy the app version of the game, leave a positive review or rating on the app store or website, follow them on social media, or donate to their Patreon page. You can also spread the word about Incredibox and invite your friends and family to play it.
-
-
-What are some other music apps that are similar to Incredibox?
-Some other music apps that are similar to Incredibox are Groovepad, Beat Snap, Music Maker Jam, BandLab, or GarageBand. These apps allow you to create your own music with different genres, instruments, loops, effects, and features. You can also share your music with other users and listen to their creations.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/__init__.py b/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/__init__.py
deleted file mode 100644
index 3e7f6a1ef940f2d20830d98336c34cbbc600d905..0000000000000000000000000000000000000000
--- a/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from .core_wrapper import CoreWrapper, load_runtime_lib
-from .make_synthesis_engines import make_synthesis_engines
-from .synthesis_engine import SynthesisEngine
-from .synthesis_engine_base import SynthesisEngineBase
-
-__all__ = [
- "CoreWrapper",
- "load_runtime_lib",
- "make_synthesis_engines",
- "SynthesisEngine",
- "SynthesisEngineBase",
-]
diff --git a/spaces/801artistry/RVC801/Fixes/tensor-launch.py b/spaces/801artistry/RVC801/Fixes/tensor-launch.py
deleted file mode 100644
index cd4ec997fb4b1338d7f29912987865899281b083..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/Fixes/tensor-launch.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import threading
-import time
-from tensorboard import program
-import os
-
-log_path = "logs"
-
-if __name__ == "__main__":
- tb = program.TensorBoard()
- tb.configure(argv=[None, '--logdir', log_path])
- url = tb.launch()
- print(f'Tensorboard can be accessed at: {url}')
-
- while True:
- time.sleep(600) # Keep the main thread running
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/optim/dadam.py b/spaces/AIConsultant/MusicGen/audiocraft/optim/dadam.py
deleted file mode 100644
index a84402f744867610180b9576b2ee3302501fd035..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/optim/dadam.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from typing import TYPE_CHECKING, Any
-
-import torch
-import torch.optim
-import torch.distributed as dist
-
-if TYPE_CHECKING:
- from torch.optim.optimizer import _params_t
-else:
- _params_t = Any
-
-
-logger = logging.getLogger(__name__)
-
-
-def to_real(x):
- if torch.is_complex(x):
- return x.real
- else:
- return x
-
-
-class DAdaptAdam(torch.optim.Optimizer):
- """Adam with D-Adaptation automatic step-sizes.
- Leave LR set to 1 unless you encounter instability.
-
- Args:
- params (iterable):
- Iterable of parameters to optimize or dicts defining parameter groups.
- lr (float):
- Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
- betas (tuple[float, float], optional): coefficients used for computing
- running averages of gradient and its square (default: (0.9, 0.999))
- momentum (float):
- Momentum value in the range [0,1) (default: 0.9).
- eps (float):
- Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
- weight_decay (float):
- Weight decay, i.e. a L2 penalty (default: 0).
- log_every (int):
- Log using print every k steps, default 0 (no logging).
- decouple (boolean):
- Use AdamW style decoupled weight decay
- d0 (float):
- Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
- growth_rate (float):
- prevent the D estimate from growing faster than this multiplicative rate.
- Default is inf, for unrestricted. Values like 1.02 give a kind of learning
- rate warmup effect.
- fsdp_in_use (bool):
- If you're using sharded parameters, this should be set to True. The optimizer
- will attempt to auto-detect this, but if you're using an implementation other
- than PyTorch's builtin version, the auto-detection won't work.
- """
- def __init__(self, params, lr=1.0,
- betas=(0.9, 0.999),
- eps=1e-8,
- weight_decay=0,
- log_every=0,
- decouple=True,
- d0=1e-6,
- growth_rate=float('inf')):
- if not 0.0 < d0:
- raise ValueError("Invalid d0 value: {}".format(d0))
- if not 0.0 < lr:
- raise ValueError("Invalid learning rate: {}".format(lr))
- if not 0.0 < eps:
- raise ValueError("Invalid epsilon value: {}".format(eps))
- if not 0.0 <= betas[0] < 1.0:
- raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
- if not 0.0 <= betas[1] < 1.0:
- raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
-
- if decouple:
- logger.info("Using decoupled weight decay")
-
- from .fsdp import is_fsdp_used
- fsdp_in_use = is_fsdp_used()
- defaults = dict(lr=lr, betas=betas, eps=eps,
- weight_decay=weight_decay,
- d=d0,
- k=0,
- gsq_weighted=0.0,
- log_every=log_every,
- decouple=decouple,
- growth_rate=growth_rate,
- fsdp_in_use=fsdp_in_use)
-
- super().__init__(params, defaults)
-
- @property
- def supports_memory_efficient_fp16(self):
- return False
-
- @property
- def supports_flat_params(self):
- return True
-
- def step(self, closure=None):
- """Performs a single optimization step.
-
- Args:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- g_sq = 0.0
- sksq_weighted = 0.0
- sk_l1 = 0.0
-
- lr = max(group['lr'] for group in self.param_groups)
-
- group = self.param_groups[0]
- gsq_weighted = group['gsq_weighted']
- d = group['d']
- dlr = d*lr
-
- growth_rate = group['growth_rate']
- decouple = group['decouple']
- fsdp_in_use = group['fsdp_in_use']
- log_every = group['log_every']
-
- beta1, beta2 = group['betas']
-
- for group in self.param_groups:
- group_lr = group['lr']
- decay = group['weight_decay']
- k = group['k']
- eps = group['eps']
-
- if group_lr not in [lr, 0.0]:
- raise RuntimeError("Setting different lr values in different parameter "
- "groups is only supported for values of 0")
-
- for p in group['params']:
- if p.grad is None:
- continue
- if hasattr(p, "_fsdp_flattened"):
- fsdp_in_use = True
- grad = p.grad.data
-
- # Apply weight decay (coupled variant)
- if decay != 0 and not decouple:
- grad.add_(p.data, alpha=decay)
-
- state = self.state[p]
-
- # State initialization
- if 'step' not in state:
- state['step'] = 0
- state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
- # Exponential moving average of gradient values
- state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
- # Exponential moving average of squared gradient values
- state['exp_avg_sq'] = torch.zeros_like(
- to_real(p.data), memory_format=torch.preserve_format).detach()
-
- exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
-
- grad_grad = to_real(grad * grad.conj())
-
- # Adam EMA updates
- if group_lr > 0:
- exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1-beta1))
- exp_avg_sq.mul_(beta2).add_(grad_grad, alpha=1-beta2)
-
- denom = exp_avg_sq.sqrt().add_(eps)
-
- g_sq += grad_grad.div_(denom).sum().item()
-
- s = state['s']
- s.mul_(beta2).add_(grad, alpha=dlr*(1-beta2))
- sksq_weighted += to_real(s * s.conj()).div_(denom).sum().item()
- sk_l1 += s.abs().sum().item()
-
- ######
-
- gsq_weighted = beta2*gsq_weighted + g_sq*(dlr**2)*(1-beta2)
- d_hat = d
-
- # if we have not done any progres, return
- # if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
- if sk_l1 == 0:
- return loss
-
- if lr > 0.0:
- if fsdp_in_use:
- dist_tensor = torch.zeros(3, device='cuda')
- dist_tensor[0] = sksq_weighted
- dist_tensor[1] = gsq_weighted
- dist_tensor[2] = sk_l1
- dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
- global_sksq_weighted = dist_tensor[0]
- global_gsq_weighted = dist_tensor[1]
- global_sk_l1 = dist_tensor[2]
- else:
- global_sksq_weighted = sksq_weighted
- global_gsq_weighted = gsq_weighted
- global_sk_l1 = sk_l1
-
- d_hat = (global_sksq_weighted/(1-beta2) - global_gsq_weighted)/global_sk_l1
- d = max(d, min(d_hat, d*growth_rate))
-
- if log_every > 0 and k % log_every == 0:
- logger.info(
- f"(k={k}) dlr: {dlr:1.1e} d_hat: {d_hat:1.1e}, d: {d:1.8}. "
- f"sksq_weighted={global_sksq_weighted:1.1e} gsq_weighted={global_gsq_weighted:1.1e} "
- f"sk_l1={global_sk_l1:1.1e}{' (FSDP)' if fsdp_in_use else ''}")
-
- for group in self.param_groups:
- group['gsq_weighted'] = gsq_weighted
- group['d'] = d
-
- group_lr = group['lr']
- decay = group['weight_decay']
- k = group['k']
- eps = group['eps']
-
- for p in group['params']:
- if p.grad is None:
- continue
- grad = p.grad.data
-
- state = self.state[p]
-
- exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
-
- state['step'] += 1
-
- denom = exp_avg_sq.sqrt().add_(eps)
- denom = denom.type(p.type())
-
- # Apply weight decay (decoupled variant)
- if decay != 0 and decouple and group_lr > 0:
- p.data.add_(p.data, alpha=-decay * dlr)
-
- # Take step
- p.data.addcdiv_(exp_avg, denom, value=-1)
-
- group['k'] = k + 1
-
- return loss
diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/pe.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/pe.py
deleted file mode 100644
index d9fa5098b378bb4ed10f97f05a9ff725d1d2239c..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/pe.py
+++ /dev/null
@@ -1,149 +0,0 @@
-from modules.commons.common_layers import *
-from utils.hparams import hparams
-from modules.fastspeech.tts_modules import PitchPredictor
-from utils.pitch_utils import denorm_f0
-
-
-class Prenet(nn.Module):
- def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None):
- super(Prenet, self).__init__()
- padding = kernel // 2
- self.layers = []
- self.strides = strides if strides is not None else [1] * n_layers
- for l in range(n_layers):
- self.layers.append(nn.Sequential(
- nn.Conv1d(in_dim, out_dim, kernel_size=kernel, padding=padding, stride=self.strides[l]),
- nn.ReLU(),
- nn.BatchNorm1d(out_dim)
- ))
- in_dim = out_dim
- self.layers = nn.ModuleList(self.layers)
- self.out_proj = nn.Linear(out_dim, out_dim)
-
- def forward(self, x):
- """
-
- :param x: [B, T, 80]
- :return: [L, B, T, H], [B, T, H]
- """
- padding_mask = x.abs().sum(-1).eq(0).data # [B, T]
- nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T]
- x = x.transpose(1, 2)
- hiddens = []
- for i, l in enumerate(self.layers):
- nonpadding_mask_TB = nonpadding_mask_TB[:, :, ::self.strides[i]]
- x = l(x) * nonpadding_mask_TB
- hiddens.append(x)
- hiddens = torch.stack(hiddens, 0) # [L, B, H, T]
- hiddens = hiddens.transpose(2, 3) # [L, B, T, H]
- x = self.out_proj(x.transpose(1, 2)) # [B, T, H]
- x = x * nonpadding_mask_TB.transpose(1, 2)
- return hiddens, x
-
-
-class ConvBlock(nn.Module):
- def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0):
- super().__init__()
- self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride)
- self.norm = norm
- if self.norm == 'bn':
- self.norm = nn.BatchNorm1d(n_chans)
- elif self.norm == 'in':
- self.norm = nn.InstanceNorm1d(n_chans, affine=True)
- elif self.norm == 'gn':
- self.norm = nn.GroupNorm(n_chans // 16, n_chans)
- elif self.norm == 'ln':
- self.norm = LayerNorm(n_chans // 16, n_chans)
- elif self.norm == 'wn':
- self.conv = torch.nn.utils.weight_norm(self.conv.conv)
- self.dropout = nn.Dropout(dropout)
- self.relu = nn.ReLU()
-
- def forward(self, x):
- """
-
- :param x: [B, C, T]
- :return: [B, C, T]
- """
- x = self.conv(x)
- if not isinstance(self.norm, str):
- if self.norm == 'none':
- pass
- elif self.norm == 'ln':
- x = self.norm(x.transpose(1, 2)).transpose(1, 2)
- else:
- x = self.norm(x)
- x = self.relu(x)
- x = self.dropout(x)
- return x
-
-
-class ConvStacks(nn.Module):
- def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn',
- dropout=0, strides=None, res=True):
- super().__init__()
- self.conv = torch.nn.ModuleList()
- self.kernel_size = kernel_size
- self.res = res
- self.in_proj = Linear(idim, n_chans)
- if strides is None:
- strides = [1] * n_layers
- else:
- assert len(strides) == n_layers
- for idx in range(n_layers):
- self.conv.append(ConvBlock(
- n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout))
- self.out_proj = Linear(n_chans, odim)
-
- def forward(self, x, return_hiddens=False):
- """
-
- :param x: [B, T, H]
- :return: [B, T, H]
- """
- x = self.in_proj(x)
- x = x.transpose(1, -1) # (B, idim, Tmax)
- hiddens = []
- for f in self.conv:
- x_ = f(x)
- x = x + x_ if self.res else x_ # (B, C, Tmax)
- hiddens.append(x)
- x = x.transpose(1, -1)
- x = self.out_proj(x) # (B, Tmax, H)
- if return_hiddens:
- hiddens = torch.stack(hiddens, 1) # [B, L, C, T]
- return x, hiddens
- return x
-
-
-class PitchExtractor(nn.Module):
- def __init__(self, n_mel_bins=80, conv_layers=2):
- super().__init__()
- self.hidden_size = hparams['hidden_size']
- self.predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
- self.conv_layers = conv_layers
-
- self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1])
- if self.conv_layers > 0:
- self.mel_encoder = ConvStacks(
- idim=self.hidden_size, n_chans=self.hidden_size, odim=self.hidden_size, n_layers=self.conv_layers)
- self.pitch_predictor = PitchPredictor(
- self.hidden_size, n_chans=self.predictor_hidden,
- n_layers=5, dropout_rate=0.1, odim=2,
- padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
-
- def forward(self, mel_input=None):
- ret = {}
- mel_hidden = self.mel_prenet(mel_input)[1]
- if self.conv_layers > 0:
- mel_hidden = self.mel_encoder(mel_hidden)
-
- ret['pitch_pred'] = pitch_pred = self.pitch_predictor(mel_hidden)
-
- pitch_padding = mel_input.abs().sum(-1) == 0
- use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv']
-
- ret['f0_denorm_pred'] = denorm_f0(
- pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None,
- hparams, pitch_padding=pitch_padding)
- return ret
\ No newline at end of file
diff --git a/spaces/Abhilashvj/planogram-compliance/utils/callbacks.py b/spaces/Abhilashvj/planogram-compliance/utils/callbacks.py
deleted file mode 100644
index a6572d1a1f9beea58989b31bda82916fec1f64c0..0000000000000000000000000000000000000000
--- a/spaces/Abhilashvj/planogram-compliance/utils/callbacks.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Callback utils
-"""
-
-import threading
-
-
-class Callbacks:
- """ "
- Handles all registered callbacks for YOLOv5 Hooks
- """
-
- def __init__(self):
- # Define the available callbacks
- self._callbacks = {
- "on_pretrain_routine_start": [],
- "on_pretrain_routine_end": [],
- "on_train_start": [],
- "on_train_epoch_start": [],
- "on_train_batch_start": [],
- "optimizer_step": [],
- "on_before_zero_grad": [],
- "on_train_batch_end": [],
- "on_train_epoch_end": [],
- "on_val_start": [],
- "on_val_batch_start": [],
- "on_val_image_end": [],
- "on_val_batch_end": [],
- "on_val_end": [],
- "on_fit_epoch_end": [], # fit = train + val
- "on_model_save": [],
- "on_train_end": [],
- "on_params_update": [],
- "teardown": [],
- }
- self.stop_training = False # set True to interrupt training
-
- def register_action(self, hook, name="", callback=None):
- """
- Register a new action to a callback hook
-
- Args:
- hook: The callback hook name to register the action to
- name: The name of the action for later reference
- callback: The callback to fire
- """
- assert (
- hook in self._callbacks
- ), f"hook '{hook}' not found in callbacks {self._callbacks}"
- assert callable(callback), f"callback '{callback}' is not callable"
- self._callbacks[hook].append({"name": name, "callback": callback})
-
- def get_registered_actions(self, hook=None):
- """ "
- Returns all the registered actions by callback hook
-
- Args:
- hook: The name of the hook to check, defaults to all
- """
- return self._callbacks[hook] if hook else self._callbacks
-
- def run(self, hook, *args, thread=False, **kwargs):
- """
- Loop through the registered actions and fire all callbacks on main thread
-
- Args:
- hook: The name of the hook to check, defaults to all
- args: Arguments to receive from YOLOv5
- thread: (boolean) Run callbacks in daemon thread
- kwargs: Keyword Arguments to receive from YOLOv5
- """
-
- assert (
- hook in self._callbacks
- ), f"hook '{hook}' not found in callbacks {self._callbacks}"
- for logger in self._callbacks[hook]:
- if thread:
- threading.Thread(
- target=logger["callback"],
- args=args,
- kwargs=kwargs,
- daemon=True,
- ).start()
- else:
- logger["callback"](*args, **kwargs)
diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/prisoner_dilemma.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/prisoner_dilemma.py
deleted file mode 100644
index 0053404bbccee085b70a900a40365d565705e8a7..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/prisoner_dilemma.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import asyncio
-import logging
-from typing import Any, Dict, List
-
-# from agentverse.agents.agent import Agent
-from agentverse.agents.simulation_agent.conversation import BaseAgent
-
-# from agentverse.environments.simulation_env.rules.base import Rule
-from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
-from agentverse.message import Message
-
-from .. import env_registry as EnvironmentRegistry
-from .basic import BasicEnvironment
-
-
-@EnvironmentRegistry.register("prisoner_dilemma")
-class PrisonerDilemmaEnvironment(BasicEnvironment):
- """
- An environment for prisoner dilemma.
- """
-
- async def step(self) -> List[Message]:
- """Run one step of the environment"""
-
- # Get the next agent index
- agent_ids = self.rule.get_next_agent_idx(self)
-
- # Generate current environment description
- env_descriptions = self.rule.get_env_description(self)
-
- # Generate the next message
- messages = await asyncio.gather(
- *[self.agents[i].astep(self, env_descriptions[i]) for i in agent_ids]
- )
-
- # Some rules will select certain messages from all the messages
- selected_messages = self.rule.select_message(self, messages)
- self.last_messages = selected_messages
- self.print_messages(selected_messages)
-
- # Update the memory of the agents
- self.rule.update_memory(self)
-
- # Update the set of visible agents for each agent
- self.rule.update_visible_agents(self)
-
- self.cnt_turn += 1
-
- return selected_messages
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/BBCodeText.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/BBCodeText.d.ts
deleted file mode 100644
index f4364757e1811b0080605bba57631ccde17047b5..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/BBCodeText.d.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-import BBCodeText from '../../../plugins/bbcodetext';
-export default BBCodeText;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/LayoutChildren.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/LayoutChildren.js
deleted file mode 100644
index 619a6c3b40b4270e97ebdfe2aed413852c15d256..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/LayoutChildren.js
+++ /dev/null
@@ -1,29 +0,0 @@
-import ResizeGameObject from '../../../../plugins/utils/size/ResizeGameObject.js';
-
-var LayoutChildren = function () {
- // LayoutChildren child
- var child = this.child;
- var childWidth, childHeight;
- if (!child.rexSizer.hidden) {
- // Set size
- if (this.scrollMode === 0) {
- childWidth = this.width;
- } else {
- childHeight = this.height;
- }
- if (child.isRexSizer) {
- child.runLayout(this, childWidth, childHeight);
- } else {
- ResizeGameObject(child, childWidth, childHeight);
- }
-
- // Update local state
- this.resetChildPosition();
- // Layout children-mask
- this.layoutChildrenMask();
- // Re-mask children
- this.maskChildren();
- }
-}
-
-export default LayoutChildren;
\ No newline at end of file
diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/vits/__init__.py b/spaces/Akmyradov/TurkmenTTSweSTT/vits/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/Akmyradov/TurkmenTTSweSTT/vits/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/AlexWang/lama/bin/gen_debug_mask_dataset.py b/spaces/AlexWang/lama/bin/gen_debug_mask_dataset.py
deleted file mode 100644
index 738f76875c82aa412063bb5bff15e69c46f20362..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/bin/gen_debug_mask_dataset.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python3
-
-import glob
-import os
-
-import PIL.Image as Image
-import cv2
-import numpy as np
-import tqdm
-import shutil
-
-
-from saicinpainting.evaluation.utils import load_yaml
-
-
-def generate_masks_for_img(infile, outmask_pattern, mask_size=200, step=0.5):
- inimg = Image.open(infile)
- width, height = inimg.size
- step_abs = int(mask_size * step)
-
- mask = np.zeros((height, width), dtype='uint8')
- mask_i = 0
-
- for start_vertical in range(0, height - step_abs, step_abs):
- for start_horizontal in range(0, width - step_abs, step_abs):
- mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 255
-
- cv2.imwrite(outmask_pattern.format(mask_i), mask)
-
- mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 0
- mask_i += 1
-
-
-def main(args):
- if not args.indir.endswith('/'):
- args.indir += '/'
- if not args.outdir.endswith('/'):
- args.outdir += '/'
-
- config = load_yaml(args.config)
-
- in_files = list(glob.glob(os.path.join(args.indir, '**', f'*{config.img_ext}'), recursive=True))
- for infile in tqdm.tqdm(in_files):
- outimg = args.outdir + infile[len(args.indir):]
- outmask_pattern = outimg[:-len(config.img_ext)] + '_mask{:04d}.png'
-
- os.makedirs(os.path.dirname(outimg), exist_ok=True)
- shutil.copy2(infile, outimg)
-
- generate_masks_for_img(infile, outmask_pattern, **config.gen_kwargs)
-
-
-if __name__ == '__main__':
- import argparse
-
- aparser = argparse.ArgumentParser()
- aparser.add_argument('config', type=str, help='Path to config for dataset generation')
- aparser.add_argument('indir', type=str, help='Path to folder with images')
- aparser.add_argument('outdir', type=str, help='Path to folder to store aligned images and masks to')
-
- main(aparser.parse_args())
diff --git a/spaces/AlexWang/lama/models/ade20k/__init__.py b/spaces/AlexWang/lama/models/ade20k/__init__.py
deleted file mode 100644
index 773cfc4664eef45a4f6fe05bd3fe2aa2143fdb5c..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/models/ade20k/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .base import *
\ No newline at end of file
diff --git a/spaces/Aloento/9Nine-PITS/pqmf.py b/spaces/Aloento/9Nine-PITS/pqmf.py
deleted file mode 100644
index f6dd0439a22d80a149b855ff9b99a41c53005290..0000000000000000000000000000000000000000
--- a/spaces/Aloento/9Nine-PITS/pqmf.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2020 Tomoki Hayashi
-# MIT License (https://opensource.org/licenses/MIT)
-
-"""Pseudo QMF modules."""
-'''
-Copied from https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/layers/pqmf.py
-'''
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-from scipy.signal.windows import kaiser
-
-
-def design_prototype_filter(taps=62, cutoff_ratio=0.142, beta=9.0):
- """Design prototype filter for PQMF.
- This method is based on `A Kaiser window approach for the design of prototype
- filters of cosine modulated filterbanks`_.
- Args:
- taps (int): The number of filter taps.
- cutoff_ratio (float): Cut-off frequency ratio.
- beta (float): Beta coefficient for kaiser window.
- Returns:
- ndarray: Impluse response of prototype filter (taps + 1,).
- .. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
- https://ieeexplore.ieee.org/abstract/document/681427
- """
- # check the arguments are valid
- assert taps % 2 == 0, "The number of taps mush be even number."
- assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
-
- # make initial filter
- omega_c = np.pi * cutoff_ratio
- with np.errstate(invalid="ignore"):
- h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
- np.pi * (np.arange(taps + 1) - 0.5 * taps)
- )
- h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
-
- # apply kaiser window
- w = kaiser(taps + 1, beta)
- h = h_i * w
-
- return h
-
-
-class PQMF(torch.nn.Module):
- """PQMF module.
- This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
- .. _`Near-perfect-reconstruction pseudo-QMF banks`:
- https://ieeexplore.ieee.org/document/258122
- """
-
- def __init__(self, subbands=4, taps=62, cutoff_ratio=0.142, beta=9.0):
- """Initilize PQMF module.
- The cutoff_ratio and beta parameters are optimized for #subbands = 4.
- See dicussion in https://github.com/kan-bayashi/ParallelWaveGAN/issues/195.
- Args:
- subbands (int): The number of subbands.
- taps (int): The number of filter taps.
- cutoff_ratio (float): Cut-off frequency ratio.
- beta (float): Beta coefficient for kaiser window.
- """
- super(PQMF, self).__init__()
-
- # build analysis & synthesis filter coefficients
- h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
- h_analysis = np.zeros((subbands, len(h_proto)))
- h_synthesis = np.zeros((subbands, len(h_proto)))
- for k in range(subbands):
- h_analysis[k] = (
- 2
- * h_proto
- * np.cos(
- (2 * k + 1)
- * (np.pi / (2 * subbands))
- * (np.arange(taps + 1) - (taps / 2))
- + (-1) ** k * np.pi / 4
- )
- )
- h_synthesis[k] = (
- 2
- * h_proto
- * np.cos(
- (2 * k + 1)
- * (np.pi / (2 * subbands))
- * (np.arange(taps + 1) - (taps / 2))
- - (-1) ** k * np.pi / 4
- )
- )
-
- # convert to tensor
- analysis_filter = torch.Tensor(h_analysis).float().unsqueeze(1)
- synthesis_filter = torch.Tensor(h_synthesis).float().unsqueeze(0)
-
- # register coefficients as beffer
- self.register_buffer("analysis_filter", analysis_filter)
- self.register_buffer("synthesis_filter", synthesis_filter)
-
- # filter for downsampling & upsampling
- updown_filter = torch.zeros((subbands, subbands, subbands)).float()
- for k in range(subbands):
- updown_filter[k, k, 0] = 1.0
- self.register_buffer("updown_filter", updown_filter)
- self.subbands = subbands
-
- # keep padding info
- self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
-
- def analysis(self, x):
- """Analysis with PQMF.
- Args:
- x (Tensor): Input tensor (B, 1, T).
- Returns:
- Tensor: Output tensor (B, subbands, T // subbands).
- """
- x = F.conv1d(self.pad_fn(x), self.analysis_filter)
- return F.conv1d(x, self.updown_filter, stride=self.subbands)
-
- def synthesis(self, x):
- """Synthesis with PQMF.
- Args:
- x (Tensor): Input tensor (B, subbands, T // subbands).
- Returns:
- Tensor: Output tensor (B, 1, T).
- """
- # NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
- # Not sure this is the correct way, it is better to check again.
- # TODO(kan-bayashi): Understand the reconstruction procedure
- x = F.conv_transpose1d(
- x, self.updown_filter * self.subbands, stride=self.subbands
- )
- return F.conv1d(self.pad_fn(x), self.synthesis_filter)
diff --git a/spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatglm.py b/spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatglm.py
deleted file mode 100644
index 7af283562ce3539de9ac1a44ba45f9266308defa..0000000000000000000000000000000000000000
--- a/spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatglm.py
+++ /dev/null
@@ -1,140 +0,0 @@
-
-from transformers import AutoModel, AutoTokenizer
-import time
-import importlib
-from toolbox import update_ui, get_conf
-from multiprocessing import Process, Pipe
-
-load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
-
-#################################################################################
-class GetGLMHandle(Process):
- def __init__(self):
- super().__init__(daemon=True)
- self.parent, self.child = Pipe()
- self.chatglm_model = None
- self.chatglm_tokenizer = None
- self.info = ""
- self.success = True
- self.check_dependency()
- self.start()
-
- def check_dependency(self):
- try:
- import sentencepiece
- self.info = "依赖检测通过"
- self.success = True
- except:
- self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
- self.success = False
-
- def ready(self):
- return self.chatglm_model is not None
-
- def run(self):
- # 第一次运行,加载参数
- retry = 0
- while True:
- try:
- if self.chatglm_model is None:
- self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
- device, = get_conf('LOCAL_MODEL_DEVICE')
- if device=='cpu':
- self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
- else:
- self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
- self.chatglm_model = self.chatglm_model.eval()
- break
- else:
- break
- except:
- retry += 1
- if retry > 3:
- self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
- raise RuntimeError("不能正常加载ChatGLM的参数!")
-
- # 进入任务等待状态
- while True:
- kwargs = self.child.recv()
- try:
- for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
- self.child.send(response)
- except:
- self.child.send('[Local Message] Call ChatGLM fail.')
- self.child.send('[Finish]')
-
- def stream_chat(self, **kwargs):
- self.parent.send(kwargs)
- while True:
- res = self.parent.recv()
- if res != '[Finish]':
- yield res
- else:
- break
- return
-
-global glm_handle
-glm_handle = None
-#################################################################################
-def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
- """
- 多线程方法
- 函数的说明请见 request_llm/bridge_all.py
- """
- global glm_handle
- if glm_handle is None:
- glm_handle = GetGLMHandle()
- observe_window[0] = load_message + "\n\n" + glm_handle.info
- if not glm_handle.success:
- error = glm_handle.info
- glm_handle = None
- raise RuntimeError(error)
-
- # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
- history_feedin = []
- for i in range(len(history)//2):
- history_feedin.append(["What can I do?", sys_prompt] )
- history_feedin.append([history[2*i], history[2*i+1]] )
-
- watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
- response = ""
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- observe_window[0] = response
- if len(observe_window) >= 2:
- if (time.time()-observe_window[1]) > watch_dog_patience:
- raise RuntimeError("程序终止。")
- return response
-
-
-
-def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
- """
- 单线程方法
- 函数的说明请见 request_llm/bridge_all.py
- """
- chatbot.append((inputs, ""))
-
- global glm_handle
- if glm_handle is None:
- glm_handle = GetGLMHandle()
- chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
- yield from update_ui(chatbot=chatbot, history=[])
- if not glm_handle.success:
- glm_handle = None
- return
-
- if additional_fn is not None:
- import core_functional
- importlib.reload(core_functional) # 热更新prompt
- core_functional = core_functional.get_core_functions()
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
-
- history_feedin = []
- for i in range(len(history)//2):
- history_feedin.append(["What can I do?", system_prompt] )
- history_feedin.append([history[2*i], history[2*i+1]] )
-
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- chatbot[-1] = (inputs, response)
- yield from update_ui(chatbot=chatbot, history=history)
\ No newline at end of file
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py
deleted file mode 100644
index ff0c169eabdc579041dac0650fbc6da956646594..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py
+++ /dev/null
@@ -1,781 +0,0 @@
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Helper for managing networks."""
-
-import types
-import inspect
-import re
-import uuid
-import sys
-import copy
-import numpy as np
-import tensorflow as tf
-
-from collections import OrderedDict
-from typing import Any, List, Tuple, Union, Callable
-
-from . import tfutil
-from .. import util
-
-from .tfutil import TfExpression, TfExpressionEx
-
-# pylint: disable=protected-access
-# pylint: disable=attribute-defined-outside-init
-# pylint: disable=too-many-public-methods
-
-_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
-_import_module_src = dict() # Source code for temporary modules created during pickle import.
-
-
-def import_handler(handler_func):
- """Function decorator for declaring custom import handlers."""
- _import_handlers.append(handler_func)
- return handler_func
-
-
-class Network:
- """Generic network abstraction.
-
- Acts as a convenience wrapper for a parameterized network construction
- function, providing several utility methods and convenient access to
- the inputs/outputs/weights.
-
- Network objects can be safely pickled and unpickled for long-term
- archival purposes. The pickling works reliably as long as the underlying
- network construction function is defined in a standalone Python module
- that has no side effects or application-specific imports.
-
- Args:
- name: Network name. Used to select TensorFlow name and variable scopes. Defaults to build func name if None.
- func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
- static_kwargs: Keyword arguments to be passed in to the network construction function.
- """
-
- def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
- # Locate the user-specified build function.
- assert isinstance(func_name, str) or util.is_top_level_function(func_name)
- if util.is_top_level_function(func_name):
- func_name = util.get_top_level_function_name(func_name)
- module, func_name = util.get_module_from_obj_name(func_name)
- func = util.get_obj_from_module(module, func_name)
-
- # Dig up source code for the module containing the build function.
- module_src = _import_module_src.get(module, None)
- if module_src is None:
- module_src = inspect.getsource(module)
-
- # Initialize fields.
- self._init_fields(name=(name or func_name), static_kwargs=static_kwargs, build_func=func, build_func_name=func_name, build_module_src=module_src)
-
- def _init_fields(self, name: str, static_kwargs: dict, build_func: Callable, build_func_name: str, build_module_src: str) -> None:
- tfutil.assert_tf_initialized()
- assert isinstance(name, str)
- assert len(name) >= 1
- assert re.fullmatch(r"[A-Za-z0-9_.\\-]*", name)
- assert isinstance(static_kwargs, dict)
- assert util.is_pickleable(static_kwargs)
- assert callable(build_func)
- assert isinstance(build_func_name, str)
- assert isinstance(build_module_src, str)
-
- # Choose TensorFlow name scope.
- with tf.name_scope(None):
- scope = tf.get_default_graph().unique_name(name, mark_as_used=True)
-
- # Query current TensorFlow device.
- with tfutil.absolute_name_scope(scope), tf.control_dependencies(None):
- device = tf.no_op(name="_QueryDevice").device
-
- # Immutable state.
- self._name = name
- self._scope = scope
- self._device = device
- self._static_kwargs = util.EasyDict(copy.deepcopy(static_kwargs))
- self._build_func = build_func
- self._build_func_name = build_func_name
- self._build_module_src = build_module_src
-
- # State before _init_graph().
- self._var_inits = dict() # var_name => initial_value, set to None by _init_graph()
- self._all_inits_known = False # Do we know for sure that _var_inits covers all the variables?
- self._components = None # subnet_name => Network, None if the components are not known yet
-
- # Initialized by _init_graph().
- self._input_templates = None
- self._output_templates = None
- self._own_vars = None
-
- # Cached values initialized the respective methods.
- self._input_shapes = None
- self._output_shapes = None
- self._input_names = None
- self._output_names = None
- self._vars = None
- self._trainables = None
- self._var_global_to_local = None
- self._run_cache = dict()
-
- def _init_graph(self) -> None:
- assert self._var_inits is not None
- assert self._input_templates is None
- assert self._output_templates is None
- assert self._own_vars is None
-
- # Initialize components.
- if self._components is None:
- self._components = util.EasyDict()
-
- # Choose build func kwargs.
- build_kwargs = dict(self.static_kwargs)
- build_kwargs["is_template_graph"] = True
- build_kwargs["components"] = self._components
-
- # Override scope and device, and ignore surrounding control dependencies.
- with tfutil.absolute_variable_scope(self.scope, reuse=False), tfutil.absolute_name_scope(self.scope), tf.device(self.device), tf.control_dependencies(None):
- assert tf.get_variable_scope().name == self.scope
- assert tf.get_default_graph().get_name_scope() == self.scope
-
- # Create input templates.
- self._input_templates = []
- for param in inspect.signature(self._build_func).parameters.values():
- if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
- self._input_templates.append(tf.placeholder(tf.float32, name=param.name))
-
- # Call build func.
- out_expr = self._build_func(*self._input_templates, **build_kwargs)
-
- # Collect output templates and variables.
- assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
- self._output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
- self._own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
-
- # Check for errors.
- if len(self._input_templates) == 0:
- raise ValueError("Network build func did not list any inputs.")
- if len(self._output_templates) == 0:
- raise ValueError("Network build func did not return any outputs.")
- if any(not tfutil.is_tf_expression(t) for t in self._output_templates):
- raise ValueError("Network outputs must be TensorFlow expressions.")
- if any(t.shape.ndims is None for t in self._input_templates):
- raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
- if any(t.shape.ndims is None for t in self._output_templates):
- raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
- if any(not isinstance(comp, Network) for comp in self._components.values()):
- raise ValueError("Components of a Network must be Networks themselves.")
- if len(self._components) != len(set(comp.name for comp in self._components.values())):
- raise ValueError("Components of a Network must have unique names.")
-
- # Initialize variables.
- if len(self._var_inits):
- tfutil.set_vars({self._get_vars()[name]: value for name, value in self._var_inits.items() if name in self._get_vars()})
- remaining_inits = [var.initializer for name, var in self._own_vars.items() if name not in self._var_inits]
- if self._all_inits_known:
- assert len(remaining_inits) == 0
- else:
- tfutil.run(remaining_inits)
- self._var_inits = None
-
- @property
- def name(self):
- """User-specified name string."""
- return self._name
-
- @property
- def scope(self):
- """Unique TensorFlow scope containing template graph and variables, derived from the user-specified name."""
- return self._scope
-
- @property
- def device(self):
- """Name of the TensorFlow device that the weights of this network reside on. Determined by the current device at construction time."""
- return self._device
-
- @property
- def static_kwargs(self):
- """EasyDict of arguments passed to the user-supplied build func."""
- return copy.deepcopy(self._static_kwargs)
-
- @property
- def components(self):
- """EasyDict of sub-networks created by the build func."""
- return copy.copy(self._get_components())
-
- def _get_components(self):
- if self._components is None:
- self._init_graph()
- assert self._components is not None
- return self._components
-
- @property
- def input_shapes(self):
- """List of input tensor shapes, including minibatch dimension."""
- if self._input_shapes is None:
- self._input_shapes = [t.shape.as_list() for t in self.input_templates]
- return copy.deepcopy(self._input_shapes)
-
- @property
- def output_shapes(self):
- """List of output tensor shapes, including minibatch dimension."""
- if self._output_shapes is None:
- self._output_shapes = [t.shape.as_list() for t in self.output_templates]
- return copy.deepcopy(self._output_shapes)
-
- @property
- def input_shape(self):
- """Short-hand for input_shapes[0]."""
- return self.input_shapes[0]
-
- @property
- def output_shape(self):
- """Short-hand for output_shapes[0]."""
- return self.output_shapes[0]
-
- @property
- def num_inputs(self):
- """Number of input tensors."""
- return len(self.input_shapes)
-
- @property
- def num_outputs(self):
- """Number of output tensors."""
- return len(self.output_shapes)
-
- @property
- def input_names(self):
- """Name string for each input."""
- if self._input_names is None:
- self._input_names = [t.name.split("/")[-1].split(":")[0] for t in self.input_templates]
- return copy.copy(self._input_names)
-
- @property
- def output_names(self):
- """Name string for each output."""
- if self._output_names is None:
- self._output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
- return copy.copy(self._output_names)
-
- @property
- def input_templates(self):
- """Input placeholders in the template graph."""
- if self._input_templates is None:
- self._init_graph()
- assert self._input_templates is not None
- return copy.copy(self._input_templates)
-
- @property
- def output_templates(self):
- """Output tensors in the template graph."""
- if self._output_templates is None:
- self._init_graph()
- assert self._output_templates is not None
- return copy.copy(self._output_templates)
-
- @property
- def own_vars(self):
- """Variables defined by this network (local_name => var), excluding sub-networks."""
- return copy.copy(self._get_own_vars())
-
- def _get_own_vars(self):
- if self._own_vars is None:
- self._init_graph()
- assert self._own_vars is not None
- return self._own_vars
-
- @property
- def vars(self):
- """All variables (local_name => var)."""
- return copy.copy(self._get_vars())
-
- def _get_vars(self):
- if self._vars is None:
- self._vars = OrderedDict(self._get_own_vars())
- for comp in self._get_components().values():
- self._vars.update((comp.name + "/" + name, var) for name, var in comp._get_vars().items())
- return self._vars
-
- @property
- def trainables(self):
- """All trainable variables (local_name => var)."""
- return copy.copy(self._get_trainables())
-
- def _get_trainables(self):
- if self._trainables is None:
- self._trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
- return self._trainables
-
- @property
- def var_global_to_local(self):
- """Mapping from variable global names to local names."""
- return copy.copy(self._get_var_global_to_local())
-
- def _get_var_global_to_local(self):
- if self._var_global_to_local is None:
- self._var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
- return self._var_global_to_local
-
- def reset_own_vars(self) -> None:
- """Re-initialize all variables of this network, excluding sub-networks."""
- if self._var_inits is None or self._components is None:
- tfutil.run([var.initializer for var in self._get_own_vars().values()])
- else:
- self._var_inits.clear()
- self._all_inits_known = False
-
- def reset_vars(self) -> None:
- """Re-initialize all variables of this network, including sub-networks."""
- if self._var_inits is None:
- tfutil.run([var.initializer for var in self._get_vars().values()])
- else:
- self._var_inits.clear()
- self._all_inits_known = False
- if self._components is not None:
- for comp in self._components.values():
- comp.reset_vars()
-
- def reset_trainables(self) -> None:
- """Re-initialize all trainable variables of this network, including sub-networks."""
- tfutil.run([var.initializer for var in self._get_trainables().values()])
-
- def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
- """Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s).
- The graph is placed on the current TensorFlow device."""
- assert len(in_expr) == self.num_inputs
- assert not all(expr is None for expr in in_expr)
- self._get_vars() # ensure that all variables have been created
-
- # Choose build func kwargs.
- build_kwargs = dict(self.static_kwargs)
- build_kwargs.update(dynamic_kwargs)
- build_kwargs["is_template_graph"] = False
- build_kwargs["components"] = self._components
-
- # Build TensorFlow graph to evaluate the network.
- with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
- assert tf.get_variable_scope().name == self.scope
- valid_inputs = [expr for expr in in_expr if expr is not None]
- final_inputs = []
- for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
- if expr is not None:
- expr = tf.identity(expr, name=name)
- else:
- expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
- final_inputs.append(expr)
- out_expr = self._build_func(*final_inputs, **build_kwargs)
-
- # Propagate input shapes back to the user-specified expressions.
- for expr, final in zip(in_expr, final_inputs):
- if isinstance(expr, tf.Tensor):
- expr.set_shape(final.shape)
-
- # Express outputs in the desired format.
- assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
- if return_as_list:
- out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
- return out_expr
-
- def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
- """Get the local name of a given variable, without any surrounding name scopes."""
- assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
- global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
- return self._get_var_global_to_local()[global_name]
-
- def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
- """Find variable by local or global name."""
- assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
- return self._get_vars()[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
-
- def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
- """Get the value of a given variable as NumPy array.
- Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
- return self.find_var(var_or_local_name).eval()
-
- def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
- """Set the value of a given variable based on the given NumPy array.
- Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
- tfutil.set_vars({self.find_var(var_or_local_name): new_value})
-
- def __getstate__(self) -> dict:
- """Pickle export."""
- state = dict()
- state["version"] = 5
- state["name"] = self.name
- state["static_kwargs"] = dict(self.static_kwargs)
- state["components"] = dict(self.components)
- state["build_module_src"] = self._build_module_src
- state["build_func_name"] = self._build_func_name
- state["variables"] = list(zip(self._get_own_vars().keys(), tfutil.run(list(self._get_own_vars().values()))))
- state["input_shapes"] = self.input_shapes
- state["output_shapes"] = self.output_shapes
- state["input_names"] = self.input_names
- state["output_names"] = self.output_names
- return state
-
- def __setstate__(self, state: dict) -> None:
- """Pickle import."""
-
- # Execute custom import handlers.
- for handler in _import_handlers:
- state = handler(state)
-
- # Get basic fields.
- assert state["version"] in [2, 3, 4, 5]
- name = state["name"]
- static_kwargs = state["static_kwargs"]
- build_module_src = state["build_module_src"]
- build_func_name = state["build_func_name"]
-
- # Create temporary module from the imported source code.
- module_name = "_tflib_network_import_" + uuid.uuid4().hex
- module = types.ModuleType(module_name)
- sys.modules[module_name] = module
- _import_module_src[module] = build_module_src
- exec(build_module_src, module.__dict__) # pylint: disable=exec-used
- build_func = util.get_obj_from_module(module, build_func_name)
-
- # Initialize fields.
- self._init_fields(name=name, static_kwargs=static_kwargs, build_func=build_func, build_func_name=build_func_name, build_module_src=build_module_src)
- self._var_inits.update(copy.deepcopy(state["variables"]))
- self._all_inits_known = True
- self._components = util.EasyDict(state.get("components", {}))
- self._input_shapes = copy.deepcopy(state.get("input_shapes", None))
- self._output_shapes = copy.deepcopy(state.get("output_shapes", None))
- self._input_names = copy.deepcopy(state.get("input_names", None))
- self._output_names = copy.deepcopy(state.get("output_names", None))
-
- def clone(self, name: str = None, **new_static_kwargs) -> "Network":
- """Create a clone of this network with its own copy of the variables."""
- static_kwargs = dict(self.static_kwargs)
- static_kwargs.update(new_static_kwargs)
- net = object.__new__(Network)
- net._init_fields(name=(name or self.name), static_kwargs=static_kwargs, build_func=self._build_func, build_func_name=self._build_func_name, build_module_src=self._build_module_src)
- net.copy_vars_from(self)
- return net
-
- def copy_own_vars_from(self, src_net: "Network") -> None:
- """Copy the values of all variables from the given network, excluding sub-networks."""
-
- # Source has unknown variables or unknown components => init now.
- if (src_net._var_inits is not None and not src_net._all_inits_known) or src_net._components is None:
- src_net._get_vars()
-
- # Both networks are inited => copy directly.
- if src_net._var_inits is None and self._var_inits is None:
- names = [name for name in self._get_own_vars().keys() if name in src_net._get_own_vars()]
- tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names}))
- return
-
- # Read from source.
- if src_net._var_inits is None:
- value_dict = tfutil.run(src_net._get_own_vars())
- else:
- value_dict = src_net._var_inits
-
- # Write to destination.
- if self._var_inits is None:
- tfutil.set_vars({self._get_vars()[name]: value for name, value in value_dict.items() if name in self._get_vars()})
- else:
- self._var_inits.update(value_dict)
-
- def copy_vars_from(self, src_net: "Network") -> None:
- """Copy the values of all variables from the given network, including sub-networks."""
-
- # Source has unknown variables or unknown components => init now.
- if (src_net._var_inits is not None and not src_net._all_inits_known) or src_net._components is None:
- src_net._get_vars()
-
- # Source is inited, but destination components have not been created yet => set as initial values.
- if src_net._var_inits is None and self._components is None:
- self._var_inits.update(tfutil.run(src_net._get_vars()))
- return
-
- # Destination has unknown components => init now.
- if self._components is None:
- self._get_vars()
-
- # Both networks are inited => copy directly.
- if src_net._var_inits is None and self._var_inits is None:
- names = [name for name in self._get_vars().keys() if name in src_net._get_vars()]
- tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names}))
- return
-
- # Copy recursively, component by component.
- self.copy_own_vars_from(src_net)
- for name, src_comp in src_net._components.items():
- if name in self._components:
- self._components[name].copy_vars_from(src_comp)
-
- def copy_trainables_from(self, src_net: "Network") -> None:
- """Copy the values of all trainable variables from the given network, including sub-networks."""
- names = [name for name in self._get_trainables().keys() if name in src_net._get_trainables()]
- tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names}))
-
- def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
- """Create new network with the given parameters, and copy all variables from this network."""
- if new_name is None:
- new_name = self.name
- static_kwargs = dict(self.static_kwargs)
- static_kwargs.update(new_static_kwargs)
- net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
- net.copy_vars_from(self)
- return net
-
- def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
- """Construct a TensorFlow op that updates the variables of this network
- to be slightly closer to those of the given network."""
- with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
- ops = []
- for name, var in self._get_vars().items():
- if name in src_net._get_vars():
- cur_beta = beta if var.trainable else beta_nontrainable
- new_value = tfutil.lerp(src_net._get_vars()[name], var, cur_beta)
- ops.append(var.assign(new_value))
- return tf.group(*ops)
-
- def run(self,
- *in_arrays: Tuple[Union[np.ndarray, None], ...],
- input_transform: dict = None,
- output_transform: dict = None,
- return_as_list: bool = False,
- print_progress: bool = False,
- minibatch_size: int = None,
- num_gpus: int = 1,
- assume_frozen: bool = False,
- **dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
- """Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
-
- Args:
- input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
- The dict must contain a 'func' field that points to a top-level function. The function is called with the input
- TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
- output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
- The dict must contain a 'func' field that points to a top-level function. The function is called with the output
- TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
- return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
- print_progress: Print progress to the console? Useful for very large input arrays.
- minibatch_size: Maximum minibatch size to use, None = disable batching.
- num_gpus: Number of GPUs to use.
- assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
- dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
- """
- assert len(in_arrays) == self.num_inputs
- assert not all(arr is None for arr in in_arrays)
- assert input_transform is None or util.is_top_level_function(input_transform["func"])
- assert output_transform is None or util.is_top_level_function(output_transform["func"])
- output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
- num_items = in_arrays[0].shape[0]
- if minibatch_size is None:
- minibatch_size = num_items
-
- # Construct unique hash key from all arguments that affect the TensorFlow graph.
- key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
- def unwind_key(obj):
- if isinstance(obj, dict):
- return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
- if callable(obj):
- return util.get_top_level_function_name(obj)
- return obj
- key = repr(unwind_key(key))
-
- # Build graph.
- if key not in self._run_cache:
- with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
- with tf.device("/cpu:0"):
- in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
- in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
-
- out_split = []
- for gpu in range(num_gpus):
- with tf.device(self.device if num_gpus == 1 else "/gpu:%d" % gpu):
- net_gpu = self.clone() if assume_frozen else self
- in_gpu = in_split[gpu]
-
- if input_transform is not None:
- in_kwargs = dict(input_transform)
- in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
- in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
-
- assert len(in_gpu) == self.num_inputs
- out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
-
- if output_transform is not None:
- out_kwargs = dict(output_transform)
- out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
- out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
-
- assert len(out_gpu) == self.num_outputs
- out_split.append(out_gpu)
-
- with tf.device("/cpu:0"):
- out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
- self._run_cache[key] = in_expr, out_expr
-
- # Run minibatches.
- in_expr, out_expr = self._run_cache[key]
- out_arrays = [np.empty([num_items] + expr.shape.as_list()[1:], expr.dtype.name) for expr in out_expr]
-
- for mb_begin in range(0, num_items, minibatch_size):
- if print_progress:
- print("\r%d / %d" % (mb_begin, num_items), end="")
-
- mb_end = min(mb_begin + minibatch_size, num_items)
- mb_num = mb_end - mb_begin
- mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
- mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
-
- for dst, src in zip(out_arrays, mb_out):
- dst[mb_begin: mb_end] = src
-
- # Done.
- if print_progress:
- print("\r%d / %d" % (num_items, num_items))
-
- if not return_as_list:
- out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
- return out_arrays
-
- def list_ops(self) -> List[TfExpression]:
- _ = self.output_templates # ensure that the template graph has been created
- include_prefix = self.scope + "/"
- exclude_prefix = include_prefix + "_"
- ops = tf.get_default_graph().get_operations()
- ops = [op for op in ops if op.name.startswith(include_prefix)]
- ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
- return ops
-
- def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
- """Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
- individual layers of the network. Mainly intended to be used for reporting."""
- layers = []
-
- def recurse(scope, parent_ops, parent_vars, level):
- if len(parent_ops) == 0 and len(parent_vars) == 0:
- return
-
- # Ignore specific patterns.
- if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
- return
-
- # Filter ops and vars by scope.
- global_prefix = scope + "/"
- local_prefix = global_prefix[len(self.scope) + 1:]
- cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
- cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
- if not cur_ops and not cur_vars:
- return
-
- # Filter out all ops related to variables.
- for var in [op for op in cur_ops if op.type.startswith("Variable")]:
- var_prefix = var.name + "/"
- cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
-
- # Scope does not contain ops as immediate children => recurse deeper.
- contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type not in ["Identity", "Cast", "Transpose"] for op in cur_ops)
- if (level == 0 or not contains_direct_ops) and (len(cur_ops) != 0 or len(cur_vars) != 0):
- visited = set()
- for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
- token = rel_name.split("/")[0]
- if token not in visited:
- recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
- visited.add(token)
- return
-
- # Report layer.
- layer_name = scope[len(self.scope) + 1:]
- layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
- layer_trainables = [var for _name, var in cur_vars if var.trainable]
- layers.append((layer_name, layer_output, layer_trainables))
-
- recurse(self.scope, self.list_ops(), list(self._get_vars().items()), 0)
- return layers
-
- def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
- """Print a summary table of the network structure."""
- rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
- rows += [["---"] * 4]
- total_params = 0
-
- for layer_name, layer_output, layer_trainables in self.list_layers():
- num_params = sum(int(np.prod(var.shape.as_list())) for var in layer_trainables)
- weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
- weights.sort(key=lambda x: len(x.name))
- if len(weights) == 0 and len(layer_trainables) == 1:
- weights = layer_trainables
- total_params += num_params
-
- if not hide_layers_with_no_params or num_params != 0:
- num_params_str = str(num_params) if num_params > 0 else "-"
- output_shape_str = str(layer_output.shape)
- weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
- rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
-
- rows += [["---"] * 4]
- rows += [["Total", str(total_params), "", ""]]
-
- widths = [max(len(cell) for cell in column) for column in zip(*rows)]
- print()
- for row in rows:
- print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
- print()
-
- def setup_weight_histograms(self, title: str = None) -> None:
- """Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
- if title is None:
- title = self.name
-
- with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
- for local_name, var in self._get_trainables().items():
- if "/" in local_name:
- p = local_name.split("/")
- name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
- else:
- name = title + "_toplevel/" + local_name
-
- tf.summary.histogram(name, var)
-
-#----------------------------------------------------------------------------
-# Backwards-compatible emulation of legacy output transformation in Network.run().
-
-_print_legacy_warning = True
-
-def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
- global _print_legacy_warning
- legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
- if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
- return output_transform, dynamic_kwargs
-
- if _print_legacy_warning:
- _print_legacy_warning = False
- print()
- print("WARNING: Old-style output transformations in Network.run() are deprecated.")
- print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
- print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
- print()
- assert output_transform is None
-
- new_kwargs = dict(dynamic_kwargs)
- new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
- new_transform["func"] = _legacy_output_transform_func
- return new_transform, new_kwargs
-
-def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
- if out_mul != 1.0:
- expr = [x * out_mul for x in expr]
-
- if out_add != 0.0:
- expr = [x + out_add for x in expr]
-
- if out_shrink > 1:
- ksize = [1, 1, out_shrink, out_shrink]
- expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
-
- if out_dtype is not None:
- if tf.as_dtype(out_dtype).is_integer:
- expr = [tf.round(x) for x in expr]
- expr = [tf.saturate_cast(x, out_dtype) for x in expr]
- return expr
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_sde.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_sde.md
deleted file mode 100644
index 33ec514cef649c66631914b2ef936ff6a3688844..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_sde.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# DPM Stochastic Scheduler inspired by Karras et. al paper
-
-## Overview
-
-Inspired by Stochastic Sampler from [Karras et. al](https://arxiv.org/abs/2206.00364).
-Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library:
-
-All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/)
-
-## DPMSolverSDEScheduler
-[[autodoc]] DPMSolverSDEScheduler
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py
deleted file mode 100644
index b0add92c398b62aa8fd2141f595cf0941f55d421..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py
+++ /dev/null
@@ -1,65 +0,0 @@
-_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
-model = dict(
- rpn_head=dict(
- _delete_=True,
- type='GARPNHead',
- in_channels=256,
- feat_channels=256,
- approx_anchor_generator=dict(
- type='AnchorGenerator',
- octave_base_scale=8,
- scales_per_octave=3,
- ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64]),
- square_anchor_generator=dict(
- type='AnchorGenerator',
- ratios=[1.0],
- scales=[8],
- strides=[4, 8, 16, 32, 64]),
- anchor_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[0.07, 0.07, 0.14, 0.14]),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[0.07, 0.07, 0.11, 0.11]),
- loc_filter_thr=0.01,
- loss_loc=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
- roi_head=dict(
- bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
- # model training and testing settings
- train_cfg=dict(
- rpn=dict(
- ga_assigner=dict(
- type='ApproxMaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- ignore_iof_thr=-1),
- ga_sampler=dict(
- type='RandomSampler',
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False),
- allowed_border=-1,
- center_ratio=0.2,
- ignore_ratio=0.5),
- rpn_proposal=dict(nms_post=1000, max_per_img=300),
- rcnn=dict(
- assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
- sampler=dict(type='RandomSampler', num=256))),
- test_cfg=dict(
- rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
-optimizer_config = dict(
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py
deleted file mode 100644
index cef0668ad8f1b767db0dc8deeb688d67005af1e4..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './scnet_r50_fpn_20e_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/losses.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/losses.py
deleted file mode 100644
index 251e42e4f36a31bb5e1aeda874b3a45d722000a2..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/losses.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-Helpers for various likelihood-based losses. These are ported from the original
-Ho et al. diffusion models codebase:
-https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
-"""
-
-import numpy as np
-
-import torch as th
-
-
-def normal_kl(mean1, logvar1, mean2, logvar2):
- """
- Compute the KL divergence between two gaussians.
-
- Shapes are automatically broadcasted, so batches can be compared to
- scalars, among other use cases.
- """
- tensor = None
- for obj in (mean1, logvar1, mean2, logvar2):
- if isinstance(obj, th.Tensor):
- tensor = obj
- break
- assert tensor is not None, "at least one argument must be a Tensor"
-
- # Force variances to be Tensors. Broadcasting helps convert scalars to
- # Tensors, but it does not work for th.exp().
- logvar1, logvar2 = [
- x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
- for x in (logvar1, logvar2)
- ]
-
- return 0.5 * (
- -1.0
- + logvar2
- - logvar1
- + th.exp(logvar1 - logvar2)
- + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
- )
-
-
-def approx_standard_normal_cdf(x):
- """
- A fast approximation of the cumulative distribution function of the
- standard normal.
- """
- return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
-
-
-def discretized_gaussian_log_likelihood(x, *, means, log_scales):
- """
- Compute the log-likelihood of a Gaussian distribution discretizing to a
- given image.
-
- :param x: the target images. It is assumed that this was uint8 values,
- rescaled to the range [-1, 1].
- :param means: the Gaussian mean Tensor.
- :param log_scales: the Gaussian log stddev Tensor.
- :return: a tensor like x of log probabilities (in nats).
- """
- assert x.shape == means.shape == log_scales.shape
- centered_x = x - means
- inv_stdv = th.exp(-log_scales)
- plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
- cdf_plus = approx_standard_normal_cdf(plus_in)
- min_in = inv_stdv * (centered_x - 1.0 / 255.0)
- cdf_min = approx_standard_normal_cdf(min_in)
- log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
- log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
- cdf_delta = cdf_plus - cdf_min
- log_probs = th.where(
- x < -0.999,
- log_cdf_plus,
- th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
- )
- assert log_probs.shape == x.shape
- return log_probs
diff --git a/spaces/AnshuK23/Customer-review-analysis/README.md b/spaces/AnshuK23/Customer-review-analysis/README.md
deleted file mode 100644
index 9d74889a88d88efa42b477ab913b262cb32db553..0000000000000000000000000000000000000000
--- a/spaces/AnshuK23/Customer-review-analysis/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Customer Review Analysis
-emoji: 📚
-colorFrom: blue
-colorTo: green
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Anthony7906/MengHuiMXD_GPT/modules/presets.py b/spaces/Anthony7906/MengHuiMXD_GPT/modules/presets.py
deleted file mode 100644
index 969f122198a360f8c3eb126b156d056ab81d53e1..0000000000000000000000000000000000000000
--- a/spaces/Anthony7906/MengHuiMXD_GPT/modules/presets.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# -*- coding:utf-8 -*-
-import os
-from pathlib import Path
-import gradio as gr
-from .webui_locale import I18nAuto
-
-i18n = I18nAuto() # internationalization
-
-CHATGLM_MODEL = None
-CHATGLM_TOKENIZER = None
-LLAMA_MODEL = None
-LLAMA_INFERENCER = None
-
-# ChatGPT 设置
-INITIAL_SYSTEM_PROMPT = "You are a helpful assistant."
-API_HOST = "api.openai.com"
-COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
-BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
-USAGE_API_URL="https://api.openai.com/dashboard/billing/usage"
-HISTORY_DIR = Path("history")
-HISTORY_DIR = "history"
-TEMPLATES_DIR = "templates"
-
-# 错误信息
-STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀
-GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志")
-ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。")
-CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时
-READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时
-PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误
-SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误
-NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位
-NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容
-BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息
-
-TIMEOUT_STREAMING = 60 # 流式对话时的超时时间
-TIMEOUT_ALL = 200 # 非流式对话时的超时时间
-ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框
-HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
-CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
-
-SIM_K = 5
-INDEX_QUERY_TEMPRATURE = 1.0
-
-CHUANHU_TITLE = i18n("川虎Chat 🚀")
-
-CHUANHU_DESCRIPTION = i18n("由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发 访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本")
-
-FOOTER = """{versions}
"""
-
-APPEARANCE_SWITCHER = """
-
-
"""+ i18n("切换亮暗色主题") + """
-
-
-
-
-
-"""
-
-SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
-
-ONLINE_MODELS = [
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0301",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "xmchat",
-]
-
-LOCAL_MODELS = [
- "chatglm-6b",
- "chatglm-6b-int4",
- "chatglm-6b-int4-qe",
- "llama-7b-hf",
- "llama-13b-hf",
- "llama-30b-hf",
- "llama-65b-hf"
-]
-
-if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
- MODELS = ONLINE_MODELS
-else:
- MODELS = ONLINE_MODELS + LOCAL_MODELS
-
-DEFAULT_MODEL = 0
-
-os.makedirs("models", exist_ok=True)
-os.makedirs("lora", exist_ok=True)
-os.makedirs("history", exist_ok=True)
-for dir_name in os.listdir("models"):
- if os.path.isdir(os.path.join("models", dir_name)):
- if dir_name not in MODELS:
- MODELS.append(dir_name)
-
-MODEL_TOKEN_LIMIT = {
- "gpt-3.5-turbo": 4096,
- "gpt-3.5-turbo-0301": 4096,
- "gpt-4": 8192,
- "gpt-4-0314": 8192,
- "gpt-4-32k": 32768,
- "gpt-4-32k-0314": 32768
-}
-
-TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
-DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限
-REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。
-
-REPLY_LANGUAGES = [
- "简体中文",
- "繁體中文",
- "English",
- "日本語",
- "Español",
- "Français",
- "Deutsch",
- "跟随问题语言(不稳定)"
-]
-
-
-WEBSEARCH_PTOMPT_TEMPLATE = """\
-Web search results:
-
-{web_results}
-Current date: {current_date}
-
-Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
-Query: {query}
-Reply in {reply_language}
-"""
-
-PROMPT_TEMPLATE = """\
-Context information is below.
----------------------
-{context_str}
----------------------
-Current date: {current_date}.
-Using the provided context information, write a comprehensive reply to the given query.
-Make sure to cite results using [number] notation after the reference.
-If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
-Use prior knowledge only if the given context didn't provide enough information.
-Answer the question: {query_str}
-Reply in {reply_language}
-"""
-
-REFINE_TEMPLATE = """\
-The original question is as follows: {query_str}
-We have provided an existing answer: {existing_answer}
-We have the opportunity to refine the existing answer
-(only if needed) with some more context below.
-------------
-{context_msg}
-------------
-Given the new context, refine the original answer to better
-Reply in {reply_language}
-If the context isn't useful, return the original answer.
-"""
-
-ALREADY_CONVERTED_MARK = ""
-
-small_and_beautiful_theme = gr.themes.Soft(
- primary_hue=gr.themes.Color(
- c50="#02C160",
- c100="rgba(2, 193, 96, 0.2)",
- c200="#02C160",
- c300="rgba(2, 193, 96, 0.32)",
- c400="rgba(2, 193, 96, 0.32)",
- c500="rgba(2, 193, 96, 1.0)",
- c600="rgba(2, 193, 96, 1.0)",
- c700="rgba(2, 193, 96, 0.32)",
- c800="rgba(2, 193, 96, 0.32)",
- c900="#02C160",
- c950="#02C160",
- ),
- secondary_hue=gr.themes.Color(
- c50="#576b95",
- c100="#576b95",
- c200="#576b95",
- c300="#576b95",
- c400="#576b95",
- c500="#576b95",
- c600="#576b95",
- c700="#576b95",
- c800="#576b95",
- c900="#576b95",
- c950="#576b95",
- ),
- neutral_hue=gr.themes.Color(
- name="gray",
- c50="#f9fafb",
- c100="#f3f4f6",
- c200="#e5e7eb",
- c300="#d1d5db",
- c400="#B2B2B2",
- c500="#808080",
- c600="#636363",
- c700="#515151",
- c800="#393939",
- c900="#272727",
- c950="#171717",
- ),
- radius_size=gr.themes.sizes.radius_sm,
- ).set(
- button_primary_background_fill="#06AE56",
- button_primary_background_fill_dark="#06AE56",
- button_primary_background_fill_hover="#07C863",
- button_primary_border_color="#06AE56",
- button_primary_border_color_dark="#06AE56",
- button_primary_text_color="#FFFFFF",
- button_primary_text_color_dark="#FFFFFF",
- button_secondary_background_fill="#F2F2F2",
- button_secondary_background_fill_dark="#2B2B2B",
- button_secondary_text_color="#393939",
- button_secondary_text_color_dark="#FFFFFF",
- # background_fill_primary="#F7F7F7",
- # background_fill_primary_dark="#1F1F1F",
- block_title_text_color="*primary_500",
- block_title_background_fill="*primary_100",
- input_background_fill="#F6F6F6",
- )
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py
deleted file mode 100644
index c66ac354deb035405fe0e4040dac539d28570257..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""Metadata generation logic for source distributions.
-"""
-
-import os
-
-from pip._vendor.pyproject_hooks import BuildBackendHookCaller
-
-from pip._internal.build_env import BuildEnvironment
-from pip._internal.exceptions import (
- InstallationSubprocessError,
- MetadataGenerationFailed,
-)
-from pip._internal.utils.subprocess import runner_with_spinner_message
-from pip._internal.utils.temp_dir import TempDirectory
-
-
-def generate_metadata(
- build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str
-) -> str:
- """Generate metadata using mechanisms described in PEP 517.
-
- Returns the generated metadata directory.
- """
- metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)
-
- metadata_dir = metadata_tmpdir.path
-
- with build_env:
- # Note that BuildBackendHookCaller implements a fallback for
- # prepare_metadata_for_build_wheel, so we don't have to
- # consider the possibility that this hook doesn't exist.
- runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)")
- with backend.subprocess_runner(runner):
- try:
- distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir)
- except InstallationSubprocessError as error:
- raise MetadataGenerationFailed(package_details=details) from error
-
- return os.path.join(metadata_dir, distinfo_dir)
diff --git a/spaces/BAAI/AltDiffusion-m9/README.md b/spaces/BAAI/AltDiffusion-m9/README.md
deleted file mode 100644
index 54aee197b8e4c4018731b5e02a59726ef7014ebe..0000000000000000000000000000000000000000
--- a/spaces/BAAI/AltDiffusion-m9/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AltDiffusion M9
-emoji: 💓
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.10.1
-app_file: app.py
-pinned: false
-license: creativeml-openrail-m
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Benson/text-generation/Examples/Descarga Zktime.net Lite 2.0.3.md b/spaces/Benson/text-generation/Examples/Descarga Zktime.net Lite 2.0.3.md
deleted file mode 100644
index 6dbd37a5b9a910fe06bf7e54c7d66d42d9bf1486..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descarga Zktime.net Lite 2.0.3.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-ZKTime.Net Lite: un software de tiempo y asistencia para pequeñas y medianas empresas
-Si está buscando un software de tiempo y asistencia simple, confiable y asequible para su pequeña o mediana empresa, es posible que desee consultar ZKTime.Net Lite. Este es un software de escritorio basado en Windows Lite que funciona con dispositivos independientes de ZKTeco para ayudarlo a administrar el tiempo y la asistencia de sus empleados de manera eficiente y precisa.
-¿Qué es ZKTime.Net Lite?
-ZKTime.Net Lite es un software desarrollado por ZKTeco, un proveedor global líder de soluciones biométricas y RFID. Está diseñado para proporcionar una comunicación estable para los dispositivos independientes de ZKTeco a través de Ethernet/ Wi-Fi/ USB y conectar todos los dispositivos para descargar transacciones, sincronizar la información de los empleados, calcular los registros de asistencia y generar más de 15 tipos de informes. También tiene un módulo de nómina que proporciona una función de cálculo de nómina correspondiente.
-descarga zktime.net lite 2.0.3 Download File ↔ https://bltlly.com/2v6KKs
-Características y beneficios de ZKTime.Net Lite
-Algunas de las características y beneficios de ZKTime.Net Lite son:
-
-Soporta múltiples idiomas, incluyendo inglés, español, portugués, árabe, francés, alemán, ruso, turco, tailandés, vietnamita, indonesio y chino.
- Tiene una interfaz fácil de usar que le permite configurar fácilmente la configuración del software, administrar los dispositivos y ver los datos.
- Tiene una política de asistencia flexible que le permite configurar diferentes turnos, horarios, vacaciones, reglas de horas extras, tipos de licencia, etc.
- Tiene una función de informe de gran alcance que le permite generar varios informes, tales como informe de asistencia diaria, informe de asistencia mensual, informe de asistencia del departamento, informe de asistencia de los empleados, informe de asistencia tarde/ temprano, informe de ausencia, informe de horas extras, etc.
-
- Tiene una función de copia de seguridad y restauración de datos que le permite hacer copias de seguridad de sus datos regularmente y restaurarlos en caso de pérdida de datos o corrupción.
- Tiene una función de actualización en línea que le permite comprobar la última versión del software y actualizarlo automáticamente.
- Tiene una función de soporte técnico que le permite ponerse en contacto con el equipo de soporte de ZKTeco directamente desde el software si encuentra algún problema o tiene alguna pregunta.
-
-Cómo descargar e instalar ZKTime.Net Lite 2.0.3
-Para descargar e instalar ZKTime.Net Lite 2.0.3 en su computadora, debe seguir estos pasos:
-
-Ir a la página web [ZKTeco]( 1 ) y haga clic en la pestaña "Descargar".
-Seleccione "Smart Office" de la categoría de productos y luego seleccione "ZKBio Time.Net" de la lista de productos.
-Haga clic en el botón "Descargar" junto al nombre del archivo "ZKBio Time.Net V2.0.3".
-Guarde el archivo en su computadora y luego ejecútelo como administrador.
-Siga las instrucciones en la pantalla para completar el proceso de instalación.
-Inicie el software e introduzca el nombre de usuario predeterminado (admin) y la contraseña (123456) para iniciar sesión.
-
- Cómo usar ZKTime.Net Lite para la gestión de tiempo y asistencia
-Una vez
Una vez que haya instalado e iniciado sesión en ZKTime.Net Lite, puede comenzar a usarlo para la gestión de tiempo y asistencia. Estas son algunas de las principales funciones que puede realizar con el software:
-Cómo conectar dispositivos ZKTeco a ZKTime.Net Lite
-Para conectar sus dispositivos ZKTeco a ZKTime.Net Lite, debe seguir estos pasos:
-
-Haga clic en la pestaña "Dispositivo" en el menú principal y luego haga clic en el botón "Agregar dispositivo".
-Seleccione el modelo de dispositivo, el tipo de comunicación (Ethernet/ Wi-Fi/ USB) y el nombre del dispositivo.
-Introduzca la dirección IP del dispositivo, el número de puerto y la contraseña (si la hay).
- Haga clic en el botón "Probar conexión" para comprobar si el dispositivo está conectado correctamente.
-
-
-Puede agregar varios dispositivos al software y administrarlos desde la lista de dispositivos. También puede editar, eliminar o actualizar la información del dispositivo de la lista de dispositivos.
-
-Cómo descargar transacciones y sincronizar la información de los empleados
-Para descargar transacciones y sincronizar la información de los empleados de sus dispositivos a ZKTime.Net Lite, debe seguir estos pasos:
-
-Seleccione los dispositivos que desea descargar o sincronizar de la lista de dispositivos.
-Haga clic en el botón "Descargar transacciones" para descargar los registros de asistencia de los dispositivos al software.
-Haga clic en el botón "Sincronizar información del empleado" para sincronizar la información del empleado (como nombre, ID, huella digital, cara, etc.) desde los dispositivos al software o viceversa.
-
-También puede configurar un horario para la descarga automática o sincronización de datos desde los dispositivos al software. Para hacer esto, haga clic en la pestaña "Programar" en el menú principal y luego haga clic en el botón "Agregar Horario". Puede seleccionar los dispositivos, el tipo de datos, el intervalo de tiempo y la frecuencia para la programación.
Cómo calcular los registros de asistencia y generar informes
-Para calcular los registros de asistencia y generar informes con ZKTime.Net Lite, debe seguir estos pasos:
-
-Haga clic en la pestaña "Asistencia" en el menú principal y luego haga clic en el botón "Calcular asistencia".
- Seleccione los dispositivos, los empleados y el rango de fechas para el cálculo.
-Haga clic en el botón "OK" para iniciar el proceso de cálculo.
- Espere a que el cálculo termine y luego haga clic en la pestaña "Informe" en el menú principal.
- Seleccione el tipo de informe, los dispositivos, los empleados y el rango de fechas para el informe.
-Haga clic en el botón "Generar informe" para crear el informe.
-Ver el informe en la pantalla o exportarlo a formato Excel o PDF.
-
-
-Cómo usar el módulo de nómina
-Para utilizar el módulo de nómina con ZKTime.Net Lite, debe seguir estos pasos:
-
-Haga clic en la pestaña "Nómina" en el menú principal y luego haga clic en el botón "Configuración de nómina".
- Configurar los parámetros de nómina, tales como el período de pago, tasa de pago, deducciones, bonos, impuestos, etc.
-Haga clic en el botón "OK" para guardar la configuración de la nómina.
-Haga clic en el botón "Calcular nómina" para calcular el salario basado en los datos de asistencia.
- Seleccione los dispositivos, los empleados y el rango de fechas para el cálculo de la nómina.
-Haga clic en el botón "OK" para iniciar el proceso de cálculo.
- Ver los datos de nómina en la pantalla o exportarlo a formato Excel o PDF.
-
- Cómo solucionar problemas comunes con ZKTime.Net Lite
-Si encuentra algún problema o tiene alguna pregunta durante el uso de ZKTime.Net Lite, puede probar algunos de estos consejos de solución de problemas:
-Cómo actualizar ZKTime.Net Lite a la última versión
-Para actualizar ZKTime.Net Lite a la última versión, debe seguir estos pasos:
-
-Haga clic en la pestaña "Ayuda" en el menú principal y luego haga clic en el botón "Actualización en línea".
-El software comprobará si hay actualizaciones disponibles y le pedirá que las descargue.
-Haga clic en el botón "Descargar" para descargar las actualizaciones y luego haga clic en el botón "Instalar" para instalarlas.
-Reiniciar el software y disfrutar de las nuevas características y mejoras.
-
- Cómo hacer copias de seguridad y restaurar datos
-Para respaldar y restaurar datos con ZKTime.Net Lite, debe seguir estos pasos:
-
-Haga clic en la pestaña "Sistema" en el menú principal y luego haga clic en el botón "Copia de seguridad de datos".
- Seleccione una ubicación y un nombre de archivo para su archivo de copia de seguridad y luego haga clic en el botón "OK" para iniciar el proceso de copia de seguridad.
-
- Haga clic en el botón "OK" para iniciar el proceso de restauración y esperar a que termine.
-
- Cómo ponerse en contacto con el soporte de ZKTeco
-Si necesita algún soporte técnico o tiene algún comentario o sugerencia para ZKTime.Net Lite, puede ponerse en contacto con el equipo de soporte de ZKTeco directamente desde
el software. Para hacer esto, haga clic en la pestaña "Ayuda" en el menú principal y luego haga clic en el botón "Contáctenos". Puede rellenar su nombre, correo electrónico, número de teléfono y mensaje y luego hacer clic en el botón "Enviar" para enviar su consulta. También puede visitar el [sitio web de ZKTeco] o llamar a la línea directa de ZKTeco (+86-755-8960 2345) para obtener más información.
- Conclusión
-ZKTime.Net Lite es un software de tiempo y asistencia que funciona con dispositivos independientes de ZKTeco para ayudarlo a administrar el tiempo y la asistencia de sus empleados de manera eficiente y precisa. Tiene muchas características y beneficios, como múltiples idiomas, interfaz fácil de usar, política de asistencia flexible, potente función de informe, módulo de nómina, función de copia de seguridad y restauración de datos, función de actualización en línea y función de soporte técnico. Es fácil de descargar, instalar y usar. Es adecuado para pequeñas y medianas empresas que necesitan una solución de tiempo y asistencia simple, confiable y asequible.
-Preguntas frecuentes
-Aquí están algunas de las preguntas más frecuentes sobre ZKTime.Net Lite:
-
-Q: ¿Cuántos dispositivos y empleados puede el soporte de ZKTime.Net Lite?
-A: ZKTime.Net Lite puede admitir hasta 50 dispositivos y 500 empleados. Si necesita admitir más dispositivos o empleados, puede actualizar a ZKTime.Net 3.0 o ZKBio Time.Net.
-Q: ¿Cuáles son los requisitos del sistema para ZKTime.Net Lite?
-A: ZKTime.Net Lite requiere un equipo basado en Windows con al menos 2 GB de RAM, 500 MB de espacio libre en disco y una conexión de red. Soporta sistemas operativos Windows XP/ Vista/ 7/ 8/ 10.
-Q: ¿Cómo puedo obtener una licencia para ZKTime.Net Lite?
-
-Q: ¿Cómo puedo obtener más capacitación u orientación sobre cómo usar ZKTime.Net Lite?
-A: Puede acceder al manual del usuario y tutoriales de vídeo desde el software haciendo clic en la pestaña "Ayuda" y luego haciendo clic en el "Manual del usuario" o "Video Tutorial" botón. También puede visitar el [sitio web de ZKTeco] o ponerse en contacto con el equipo de soporte de ZKTeco para obtener más ayuda.
-Q: ¿Cómo puedo dar comentarios o sugerencias para ZKTime.Net Lite?
-A: Puede dar comentarios o sugerencias para ZKTime.Net Lite haciendo clic en la pestaña "Ayuda" y luego haciendo clic en el botón "Comentarios". Puede rellenar su nombre, correo electrónico, número de teléfono y mensaje y luego hacer clic en el botón "Enviar" para enviar sus comentarios o sugerencias. También puede visitar el [sitio web de ZKTeco] o ponerse en contacto con el equipo de soporte de ZKTeco para más comunicación.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/BhagatSurya/convet_pdf_to_txt/app.py b/spaces/BhagatSurya/convet_pdf_to_txt/app.py
deleted file mode 100644
index ae901d55712e7c8f466a129eec81185a45daf5a7..0000000000000000000000000000000000000000
--- a/spaces/BhagatSurya/convet_pdf_to_txt/app.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import gradio as gr
-import tempfile
-import re
-import os
-import spacy
-import pytesseract
-import pdf2image
-import subprocess
-from pdf2image.exceptions import (
- PDFInfoNotInstalledError,
- PDFPageCountError,
- PDFSyntaxError
-)
-import fitz # PyMuPDF
-from PIL import Image, UnidentifiedImageError
-import io
-import base64
-
-def clean_text(text):
- nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser", "ner", "textcat"])
- text = re.sub(r'\n+', '\n', text)
- text = re.sub(r'\s+', ' ', text)
- return text.strip()
-
-def safe_base64_decode(s):
- # add missing padding if necessary
- missing_padding = len(s) % 4
- if missing_padding:
- s += '='* (4 - missing_padding)
- try:
- return base64.b64decode(s)
- except binascii.Error as e:
- print("Error decoding base64 string:", e)
- return None
-
-def image_to_latex(image):
- image_path = "/tmp/equation.png" # Modify as needed
- image.save(image_path)
- result = subprocess.run(["pix2tex", image_path], capture_output=True, text=True)
- return result.stdout
-
-def pdf_to_text(file):
- doc = fitz.open(file.name)
- full_text = ''
- for i, page in enumerate(doc):
- # Extract text
- page_text = page.get_text()
-
- # Extract images and convert to LaTeX
- image_list = page.get_images(full=True)
- for img in image_list:
- xref, name, ext, color_space, width, height, bpc, image_data, image_mask, smask_data = img
- # Check if image_data is base64 encoded string
- if isinstance(image_data, str) and re.match(r'^[A-Za-z0-9+/]+[=]{0,2}$', image_data):
- image_data = safe_base64_decode(image_data)
- try:
- image = Image.open(io.BytesIO(image_data))
- latex_code = image_to_latex(image)
- page_text += "\n" + latex_code # Add LaTeX code to page text
- except UnidentifiedImageError:
- print(f"Could not identify image on page {i+1}")
-
- page_text = clean_text(page_text)
- if len(page_text.split()) > 5:
- page_number = i + 1
- page_text = "## Metadata: Page Number " + str(page_number) + "\n" + page_text
- full_text += page_text + "\n\n"
-
- base_name = os.path.splitext(os.path.basename(file.name))[0]
- output_file_name = base_name + ".txt"
- with open(output_file_name, 'w') as f:
- f.write(full_text)
-
- return output_file_name
-
-iface = gr.Interface(fn=pdf_to_text,
- inputs=gr.inputs.File(label="Your PDF"),
- outputs=gr.outputs.File(label="Download TXT"),
- title="PDF to TXT",
- description="Convert your PDF files to clean text")
-iface.launch()
-
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/main.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/main.py
deleted file mode 100644
index 33c6d24cd85b55a9fb1b1e6ab784f471e2b135f0..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/main.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from typing import List, Optional
-
-
-def main(args: Optional[List[str]] = None) -> int:
- """This is preserved for old console scripts that may still be referencing
- it.
-
- For additional details, see https://github.com/pypa/pip/issues/7498.
- """
- from pip._internal.utils.entrypoints import _wrapper
-
- return _wrapper(args)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/unicode.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/unicode.py
deleted file mode 100644
index 06526203911de55da3c2a8c5ae73f48024c3f018..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/unicode.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# unicode.py
-
-import sys
-from itertools import filterfalse
-from typing import List, Tuple, Union
-
-
-class _lazyclassproperty:
- def __init__(self, fn):
- self.fn = fn
- self.__doc__ = fn.__doc__
- self.__name__ = fn.__name__
-
- def __get__(self, obj, cls):
- if cls is None:
- cls = type(obj)
- if not hasattr(cls, "_intern") or any(
- cls._intern is getattr(superclass, "_intern", [])
- for superclass in cls.__mro__[1:]
- ):
- cls._intern = {}
- attrname = self.fn.__name__
- if attrname not in cls._intern:
- cls._intern[attrname] = self.fn(cls)
- return cls._intern[attrname]
-
-
-UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
-
-
-class unicode_set:
- """
- A set of Unicode characters, for language-specific strings for
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
- A unicode_set is defined by a list of ranges in the Unicode character
- set, in a class attribute ``_ranges``. Ranges can be specified using
- 2-tuples or a 1-tuple, such as::
-
- _ranges = [
- (0x0020, 0x007e),
- (0x00a0, 0x00ff),
- (0x0100,),
- ]
-
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
-
- A unicode set can also be defined using multiple inheritance of other unicode sets::
-
- class CJK(Chinese, Japanese, Korean):
- pass
- """
-
- _ranges: UnicodeRangeList = []
-
- @_lazyclassproperty
- def _chars_for_ranges(cls):
- ret = []
- for cc in cls.__mro__:
- if cc is unicode_set:
- break
- for rr in getattr(cc, "_ranges", ()):
- ret.extend(range(rr[0], rr[-1] + 1))
- return [chr(c) for c in sorted(set(ret))]
-
- @_lazyclassproperty
- def printables(cls):
- "all non-whitespace characters in this range"
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def alphas(cls):
- "all alphabetic characters in this range"
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def nums(cls):
- "all numeric digit characters in this range"
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def alphanums(cls):
- "all alphanumeric characters in this range"
- return cls.alphas + cls.nums
-
- @_lazyclassproperty
- def identchars(cls):
- "all characters in this range that are valid identifier characters, plus underscore '_'"
- return "".join(
- sorted(
- set(
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
- + "_"
- )
- )
- )
-
- @_lazyclassproperty
- def identbodychars(cls):
- """
- all characters in this range that are valid identifier body characters,
- plus the digits 0-9
- """
- return "".join(
- sorted(
- set(
- cls.identchars
- + "0123456789"
- + "".join(
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
- )
- )
- )
- )
-
-
-class pyparsing_unicode(unicode_set):
- """
- A namespace class for defining common language unicode_sets.
- """
-
- # fmt: off
-
- # define ranges in language character sets
- _ranges: UnicodeRangeList = [
- (0x0020, sys.maxunicode),
- ]
-
- class BasicMultilingualPlane(unicode_set):
- "Unicode set for the Basic Multilingual Plane"
- _ranges: UnicodeRangeList = [
- (0x0020, 0xFFFF),
- ]
-
- class Latin1(unicode_set):
- "Unicode set for Latin-1 Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0020, 0x007E),
- (0x00A0, 0x00FF),
- ]
-
- class LatinA(unicode_set):
- "Unicode set for Latin-A Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0100, 0x017F),
- ]
-
- class LatinB(unicode_set):
- "Unicode set for Latin-B Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0180, 0x024F),
- ]
-
- class Greek(unicode_set):
- "Unicode set for Greek Unicode Character Ranges"
- _ranges: UnicodeRangeList = [
- (0x0342, 0x0345),
- (0x0370, 0x0377),
- (0x037A, 0x037F),
- (0x0384, 0x038A),
- (0x038C,),
- (0x038E, 0x03A1),
- (0x03A3, 0x03E1),
- (0x03F0, 0x03FF),
- (0x1D26, 0x1D2A),
- (0x1D5E,),
- (0x1D60,),
- (0x1D66, 0x1D6A),
- (0x1F00, 0x1F15),
- (0x1F18, 0x1F1D),
- (0x1F20, 0x1F45),
- (0x1F48, 0x1F4D),
- (0x1F50, 0x1F57),
- (0x1F59,),
- (0x1F5B,),
- (0x1F5D,),
- (0x1F5F, 0x1F7D),
- (0x1F80, 0x1FB4),
- (0x1FB6, 0x1FC4),
- (0x1FC6, 0x1FD3),
- (0x1FD6, 0x1FDB),
- (0x1FDD, 0x1FEF),
- (0x1FF2, 0x1FF4),
- (0x1FF6, 0x1FFE),
- (0x2129,),
- (0x2719, 0x271A),
- (0xAB65,),
- (0x10140, 0x1018D),
- (0x101A0,),
- (0x1D200, 0x1D245),
- (0x1F7A1, 0x1F7A7),
- ]
-
- class Cyrillic(unicode_set):
- "Unicode set for Cyrillic Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0400, 0x052F),
- (0x1C80, 0x1C88),
- (0x1D2B,),
- (0x1D78,),
- (0x2DE0, 0x2DFF),
- (0xA640, 0xA672),
- (0xA674, 0xA69F),
- (0xFE2E, 0xFE2F),
- ]
-
- class Chinese(unicode_set):
- "Unicode set for Chinese Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x2E80, 0x2E99),
- (0x2E9B, 0x2EF3),
- (0x31C0, 0x31E3),
- (0x3400, 0x4DB5),
- (0x4E00, 0x9FEF),
- (0xA700, 0xA707),
- (0xF900, 0xFA6D),
- (0xFA70, 0xFAD9),
- (0x16FE2, 0x16FE3),
- (0x1F210, 0x1F212),
- (0x1F214, 0x1F23B),
- (0x1F240, 0x1F248),
- (0x20000, 0x2A6D6),
- (0x2A700, 0x2B734),
- (0x2B740, 0x2B81D),
- (0x2B820, 0x2CEA1),
- (0x2CEB0, 0x2EBE0),
- (0x2F800, 0x2FA1D),
- ]
-
- class Japanese(unicode_set):
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
- _ranges: UnicodeRangeList = []
-
- class Kanji(unicode_set):
- "Unicode set for Kanji Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x4E00, 0x9FBF),
- (0x3000, 0x303F),
- ]
-
- class Hiragana(unicode_set):
- "Unicode set for Hiragana Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x3041, 0x3096),
- (0x3099, 0x30A0),
- (0x30FC,),
- (0xFF70,),
- (0x1B001,),
- (0x1B150, 0x1B152),
- (0x1F200,),
- ]
-
- class Katakana(unicode_set):
- "Unicode set for Katakana Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x3099, 0x309C),
- (0x30A0, 0x30FF),
- (0x31F0, 0x31FF),
- (0x32D0, 0x32FE),
- (0xFF65, 0xFF9F),
- (0x1B000,),
- (0x1B164, 0x1B167),
- (0x1F201, 0x1F202),
- (0x1F213,),
- ]
-
- class Hangul(unicode_set):
- "Unicode set for Hangul (Korean) Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x1100, 0x11FF),
- (0x302E, 0x302F),
- (0x3131, 0x318E),
- (0x3200, 0x321C),
- (0x3260, 0x327B),
- (0x327E,),
- (0xA960, 0xA97C),
- (0xAC00, 0xD7A3),
- (0xD7B0, 0xD7C6),
- (0xD7CB, 0xD7FB),
- (0xFFA0, 0xFFBE),
- (0xFFC2, 0xFFC7),
- (0xFFCA, 0xFFCF),
- (0xFFD2, 0xFFD7),
- (0xFFDA, 0xFFDC),
- ]
-
- Korean = Hangul
-
- class CJK(Chinese, Japanese, Hangul):
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
-
- class Thai(unicode_set):
- "Unicode set for Thai Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0E01, 0x0E3A),
- (0x0E3F, 0x0E5B)
- ]
-
- class Arabic(unicode_set):
- "Unicode set for Arabic Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0600, 0x061B),
- (0x061E, 0x06FF),
- (0x0700, 0x077F),
- ]
-
- class Hebrew(unicode_set):
- "Unicode set for Hebrew Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0591, 0x05C7),
- (0x05D0, 0x05EA),
- (0x05EF, 0x05F4),
- (0xFB1D, 0xFB36),
- (0xFB38, 0xFB3C),
- (0xFB3E,),
- (0xFB40, 0xFB41),
- (0xFB43, 0xFB44),
- (0xFB46, 0xFB4F),
- ]
-
- class Devanagari(unicode_set):
- "Unicode set for Devanagari Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0900, 0x097F),
- (0xA8E0, 0xA8FF)
- ]
-
- # fmt: on
-
-
-pyparsing_unicode.Japanese._ranges = (
- pyparsing_unicode.Japanese.Kanji._ranges
- + pyparsing_unicode.Japanese.Hiragana._ranges
- + pyparsing_unicode.Japanese.Katakana._ranges
-)
-
-pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
-
-# add language identifiers using language Unicode
-pyparsing_unicode.العربية = pyparsing_unicode.Arabic
-pyparsing_unicode.中文 = pyparsing_unicode.Chinese
-pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
-pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
-pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
-pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
-pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
-pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
-pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
-pyparsing_unicode.한국어 = pyparsing_unicode.Korean
-pyparsing_unicode.ไทย = pyparsing_unicode.Thai
-pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
diff --git a/spaces/Billyosoro/ESRGAN/inference_realesrgan_video.py b/spaces/Billyosoro/ESRGAN/inference_realesrgan_video.py
deleted file mode 100644
index 639b848e6578a2480ee0784e664c7751e325c477..0000000000000000000000000000000000000000
--- a/spaces/Billyosoro/ESRGAN/inference_realesrgan_video.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import argparse
-import glob
-import mimetypes
-import os
-import queue
-import shutil
-import torch
-from basicsr.archs.rrdbnet_arch import RRDBNet
-from basicsr.utils.logger import AvgTimer
-from tqdm import tqdm
-
-from realesrgan import IOConsumer, PrefetchReader, RealESRGANer
-from realesrgan.archs.srvgg_arch import SRVGGNetCompact
-
-
-def main():
- """Inference demo for Real-ESRGAN.
- It mainly for restoring anime videos.
-
- """
- parser = argparse.ArgumentParser()
- parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
- parser.add_argument(
- '-n',
- '--model_name',
- type=str,
- default='RealESRGAN_x4plus',
- help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
- 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
- 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
- parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
- parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
- parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
- parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
- parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
- parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
- parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
- parser.add_argument('--half', action='store_true', help='Use half precision during inference')
- parser.add_argument('-v', '--video', action='store_true', help='Output a video using ffmpeg')
- parser.add_argument('-a', '--audio', action='store_true', help='Keep audio')
- parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
- parser.add_argument('--consumer', type=int, default=4, help='Number of IO consumers')
-
- parser.add_argument(
- '--alpha_upsampler',
- type=str,
- default='realesrgan',
- help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
- parser.add_argument(
- '--ext',
- type=str,
- default='auto',
- help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
- args = parser.parse_args()
-
- # ---------------------- determine models according to model names ---------------------- #
- args.model_name = args.model_name.split('.')[0]
- if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- netscale = 4
- elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
- netscale = 4
- elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
- netscale = 2
- elif args.model_name in [
- 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
- ]: # x2 VGG-style model (XS size)
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
- netscale = 2
- elif args.model_name in [
- 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
- ]: # x4 VGG-style model (XS size)
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
- netscale = 4
-
- # ---------------------- determine model paths ---------------------- #
- model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
- if not os.path.isfile(model_path):
- model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
- if not os.path.isfile(model_path):
- raise ValueError(f'Model {args.model_name} does not exist.')
-
- # restorer
- upsampler = RealESRGANer(
- scale=netscale,
- model_path=model_path,
- model=model,
- tile=args.tile,
- tile_pad=args.tile_pad,
- pre_pad=args.pre_pad,
- half=args.half)
-
- if args.face_enhance: # Use GFPGAN for face enhancement
- from gfpgan import GFPGANer
- face_enhancer = GFPGANer(
- model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
- upscale=args.outscale,
- arch='clean',
- channel_multiplier=2,
- bg_upsampler=upsampler)
- os.makedirs(args.output, exist_ok=True)
- # for saving restored frames
- save_frame_folder = os.path.join(args.output, 'frames_tmpout')
- os.makedirs(save_frame_folder, exist_ok=True)
-
- if mimetypes.guess_type(args.input)[0].startswith('video'): # is a video file
- video_name = os.path.splitext(os.path.basename(args.input))[0]
- frame_folder = os.path.join('tmp_frames', video_name)
- os.makedirs(frame_folder, exist_ok=True)
- # use ffmpeg to extract frames
- os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {frame_folder}/frame%08d.png')
- # get image path list
- paths = sorted(glob.glob(os.path.join(frame_folder, '*')))
- if args.video:
- if args.fps is None:
- # get input video fps
- import ffmpeg
- probe = ffmpeg.probe(args.input)
- video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
- args.fps = eval(video_streams[0]['avg_frame_rate'])
- elif mimetypes.guess_type(args.input)[0].startswith('image'): # is an image file
- paths = [args.input]
- video_name = 'video'
- else:
- paths = sorted(glob.glob(os.path.join(args.input, '*')))
- video_name = 'video'
-
- timer = AvgTimer()
- timer.start()
- pbar = tqdm(total=len(paths), unit='frame', desc='inference')
- # set up prefetch reader
- reader = PrefetchReader(paths, num_prefetch_queue=4)
- reader.start()
-
- que = queue.Queue()
- consumers = [IOConsumer(args, que, f'IO_{i}') for i in range(args.consumer)]
- for consumer in consumers:
- consumer.start()
-
- for idx, (path, img) in enumerate(zip(paths, reader)):
- imgname, extension = os.path.splitext(os.path.basename(path))
- if len(img.shape) == 3 and img.shape[2] == 4:
- img_mode = 'RGBA'
- else:
- img_mode = None
-
- try:
- if args.face_enhance:
- _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
- else:
- output, _ = upsampler.enhance(img, outscale=args.outscale)
- except RuntimeError as error:
- print('Error', error)
- print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
-
- else:
- if args.ext == 'auto':
- extension = extension[1:]
- else:
- extension = args.ext
- if img_mode == 'RGBA': # RGBA images should be saved in png format
- extension = 'png'
- save_path = os.path.join(save_frame_folder, f'{imgname}_out.{extension}')
-
- que.put({'output': output, 'save_path': save_path})
-
- pbar.update(1)
- torch.cuda.synchronize()
- timer.record()
- avg_fps = 1. / (timer.get_avg_time() + 1e-7)
- pbar.set_description(f'idx {idx}, fps {avg_fps:.2f}')
-
- for _ in range(args.consumer):
- que.put('quit')
- for consumer in consumers:
- consumer.join()
- pbar.close()
-
- # merge frames to video
- if args.video:
- video_save_path = os.path.join(args.output, f'{video_name}_{args.suffix}.mp4')
- if args.audio:
- os.system(
- f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} -i {args.input}'
- f' -map 0:v:0 -map 1:a:0 -c:a copy -c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
- else:
- os.system(f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} '
- f'-c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
-
- # delete tmp file
- shutil.rmtree(save_frame_folder)
- if os.path.isdir(frame_folder):
- shutil.rmtree(frame_folder)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/complex/ctanh.h b/spaces/CVPR/LIVE/thrust/thrust/detail/complex/ctanh.h
deleted file mode 100644
index 6ef1590920db65c50582787fc953af1ffb582099..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/detail/complex/ctanh.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- * Copyright 2013 Filipe RNC Maia
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*-
- * Copyright (c) 2011 David Schultz
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Adapted from FreeBSD by Filipe Maia :
- * freebsd/lib/msun/src/s_ctanh.c
- */
-
-/*
- * Hyperbolic tangent of a complex argument z = x + i y.
- *
- * The algorithm is from:
- *
- * W. Kahan. Branch Cuts for Complex Elementary Functions or Much
- * Ado About Nothing's Sign Bit. In The State of the Art in
- * Numerical Analysis, pp. 165 ff. Iserles and Powell, eds., 1987.
- *
- * Method:
- *
- * Let t = tan(x)
- * beta = 1/cos^2(y)
- * s = sinh(x)
- * rho = cosh(x)
- *
- * We have:
- *
- * tanh(z) = sinh(z) / cosh(z)
- *
- * sinh(x) cos(y) + i cosh(x) sin(y)
- * = ---------------------------------
- * cosh(x) cos(y) + i sinh(x) sin(y)
- *
- * cosh(x) sinh(x) / cos^2(y) + i tan(y)
- * = -------------------------------------
- * 1 + sinh^2(x) / cos^2(y)
- *
- * beta rho s + i t
- * = ----------------
- * 1 + beta s^2
- *
- * Modifications:
- *
- * I omitted the original algorithm's handling of overflow in tan(x) after
- * verifying with nearpi.c that this can't happen in IEEE single or double
- * precision. I also handle large x differently.
- */
-
-#pragma once
-
-#include
-#include
-#include
-
-namespace thrust{
-namespace detail{
-namespace complex{
-
-using thrust::complex;
-
-__host__ __device__ inline
-complex ctanh(const complex& z){
- double x, y;
- double t, beta, s, rho, denom;
- uint32_t hx, ix, lx;
-
- x = z.real();
- y = z.imag();
-
- extract_words(hx, lx, x);
- ix = hx & 0x7fffffff;
-
- /*
- * ctanh(NaN + i 0) = NaN + i 0
- *
- * ctanh(NaN + i y) = NaN + i NaN for y != 0
- *
- * The imaginary part has the sign of x*sin(2*y), but there's no
- * special effort to get this right.
- *
- * ctanh(+-Inf +- i Inf) = +-1 +- 0
- *
- * ctanh(+-Inf + i y) = +-1 + 0 sin(2y) for y finite
- *
- * The imaginary part of the sign is unspecified. This special
- * case is only needed to avoid a spurious invalid exception when
- * y is infinite.
- */
- if (ix >= 0x7ff00000) {
- if ((ix & 0xfffff) | lx) /* x is NaN */
- return (complex(x, (y == 0 ? y : x * y)));
- set_high_word(x, hx - 0x40000000); /* x = copysign(1, x) */
- return (complex(x, copysign(0.0, isinf(y) ? y : sin(y) * cos(y))));
- }
-
- /*
- * ctanh(x + i NAN) = NaN + i NaN
- * ctanh(x +- i Inf) = NaN + i NaN
- */
- if (!isfinite(y))
- return (complex(y - y, y - y));
-
- /*
- * ctanh(+-huge + i +-y) ~= +-1 +- i 2sin(2y)/exp(2x), using the
- * approximation sinh^2(huge) ~= exp(2*huge) / 4.
- * We use a modified formula to avoid spurious overflow.
- */
- if (ix >= 0x40360000) { /* x >= 22 */
- double exp_mx = exp(-fabs(x));
- return (complex(copysign(1.0, x),
- 4.0 * sin(y) * cos(y) * exp_mx * exp_mx));
- }
-
- /* Kahan's algorithm */
- t = tan(y);
- beta = 1.0 + t * t; /* = 1 / cos^2(y) */
- s = sinh(x);
- rho = sqrt(1.0 + s * s); /* = cosh(x) */
- denom = 1.0 + beta * s * s;
- return (complex((beta * rho * s) / denom, t / denom));
-}
-
-__host__ __device__ inline
-complex ctan(complex z){
- /* ctan(z) = -I * ctanh(I * z) */
- z = ctanh(complex(-z.imag(), z.real()));
- return (complex(z.imag(), -z.real()));
-}
-
-} // namespace complex
-
-} // namespace detail
-
-
-template
-__host__ __device__
-inline complex tan(const complex& z){
- return sin(z)/cos(z);
-}
-
-template
-__host__ __device__
-inline complex tanh(const complex& z){
- // This implementation seems better than the simple sin/cos
- return (thrust::exp(ValueType(2)*z)-ValueType(1))/
- (thrust::exp(ValueType(2)*z)+ValueType(1));
-}
-
-template <>
-__host__ __device__
-inline complex tan(const complex& z){
- return detail::complex::ctan(z);
-}
-
-template <>
-__host__ __device__
-inline complex tanh(const complex& z){
- return detail::complex::ctanh(z);
-}
-
-} // namespace thrust
diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/modern_gcc_required.h b/spaces/CVPR/LIVE/thrust/thrust/detail/modern_gcc_required.h
deleted file mode 100644
index a8c3d98ba996eec9d6b010dabad65d2261d7e7bc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/detail/modern_gcc_required.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright 2018 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-#ifndef THRUST_MODERN_GCC_REQUIRED_NO_ERROR
-# if defined(THRUST_GCC_VERSION) && !defined(THRUST_MODERN_GCC)
-# error GCC 5 or later is required for this Thrust feature; please upgrade your compiler.
-# endif
-#endif
-
diff --git a/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene-checkpoint.py b/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene-checkpoint.py
deleted file mode 100644
index bc4d020729b6698887055771439f87a491572bd1..0000000000000000000000000000000000000000
--- a/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene-checkpoint.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import pytorch_lightning as pl
-import torch
-import torch.nn as nn
-from monoscene.unet3d_nyu import UNet3D as UNet3DNYU
-from monoscene.unet3d_kitti import UNet3D as UNet3DKitti
-from monoscene.flosp import FLoSP
-import numpy as np
-import torch.nn.functional as F
-from monoscene.unet2d import UNet2D
-
-
-class MonoScene(pl.LightningModule):
- def __init__(
- self,
- n_classes,
- feature,
- project_scale,
- full_scene_size,
- dataset,
- n_relations=4,
- context_prior=True,
- fp_loss=True,
- project_res=[],
- frustum_size=4,
- relation_loss=False,
- CE_ssc_loss=True,
- geo_scal_loss=True,
- sem_scal_loss=True,
- lr=1e-4,
- weight_decay=1e-4,
- ):
- super().__init__()
-
- self.project_res = project_res
- self.fp_loss = fp_loss
- self.dataset = dataset
- self.context_prior = context_prior
- self.frustum_size = frustum_size
- self.relation_loss = relation_loss
- self.CE_ssc_loss = CE_ssc_loss
- self.sem_scal_loss = sem_scal_loss
- self.geo_scal_loss = geo_scal_loss
- self.project_scale = project_scale
- self.lr = lr
- self.weight_decay = weight_decay
-
- self.projects = {}
- self.scale_2ds = [1, 2, 4, 8] # 2D scales
- for scale_2d in self.scale_2ds:
- self.projects[str(scale_2d)] = FLoSP(
- full_scene_size, project_scale=self.project_scale, dataset=self.dataset
- )
- self.projects = nn.ModuleDict(self.projects)
-
- self.n_classes = n_classes
- if self.dataset == "NYU":
- self.net_3d_decoder = UNet3DNYU(
- self.n_classes,
- nn.BatchNorm3d,
- n_relations=n_relations,
- feature=feature,
- full_scene_size=full_scene_size,
- context_prior=context_prior,
- )
- elif self.dataset == "kitti":
- self.net_3d_decoder = UNet3DKitti(
- self.n_classes,
- nn.BatchNorm3d,
- project_scale=project_scale,
- feature=feature,
- full_scene_size=full_scene_size,
- context_prior=context_prior,
- )
- self.net_rgb = UNet2D.build(out_feature=feature, use_decoder=True)
-
- def forward(self, batch):
-
- img = batch["img"]
- bs = len(img)
-
- out = {}
-
- x_rgb = self.net_rgb(img)
-
- x3ds = []
- for i in range(bs):
- x3d = None
- for scale_2d in self.project_res:
-
- # project features at each 2D scale to target 3D scale
- scale_2d = int(scale_2d)
- projected_pix = batch["projected_pix_{}".format(self.project_scale)][i].cuda()
- fov_mask = batch["fov_mask_{}".format(self.project_scale)][i].cuda()
-
- # Sum all the 3D features
- if x3d is None:
- x3d = self.projects[str(scale_2d)](
- x_rgb["1_" + str(scale_2d)][i],
- projected_pix // scale_2d,
- fov_mask,
- )
- else:
- x3d += self.projects[str(scale_2d)](
- x_rgb["1_" + str(scale_2d)][i],
- projected_pix // scale_2d,
- fov_mask,
- )
- x3ds.append(x3d)
-
- input_dict = {
- "x3d": torch.stack(x3ds),
- }
-
- out_dict = self.net_3d_decoder(input_dict)
-
- ssc_pred = out_dict["ssc_logit"]
-
- y_pred = ssc_pred.detach().cpu().numpy()
- y_pred = np.argmax(y_pred, axis=1)
-
- return y_pred
-
-
diff --git a/spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnet.py b/spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnet.py
deleted file mode 100644
index 519db464493c7c7b60fc34be1d21add2235ec341..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnet.py
+++ /dev/null
@@ -1,305 +0,0 @@
-import torch.nn as nn
-import torch.utils.checkpoint as cp
-from mmcv.cnn import build_conv_layer, build_norm_layer, constant_init
-
-from ..builder import BACKBONES
-from .resnet import Bottleneck as _Bottleneck
-from .resnet import ResNet
-
-
-class Bottleneck(_Bottleneck):
- r"""Bottleneck for the ResNet backbone in `DetectoRS
- `_.
-
- This bottleneck allows the users to specify whether to use
- SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
-
- Args:
- inplanes (int): The number of input channels.
- planes (int): The number of output channels before expansion.
- rfp_inplanes (int, optional): The number of channels from RFP.
- Default: None. If specified, an additional conv layer will be
- added for ``rfp_feat``. Otherwise, the structure is the same as
- base class.
- sac (dict, optional): Dictionary to construct SAC. Default: None.
- """
- expansion = 4
-
- def __init__(self,
- inplanes,
- planes,
- rfp_inplanes=None,
- sac=None,
- **kwargs):
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
-
- assert sac is None or isinstance(sac, dict)
- self.sac = sac
- self.with_sac = sac is not None
- if self.with_sac:
- self.conv2 = build_conv_layer(
- self.sac,
- planes,
- planes,
- kernel_size=3,
- stride=self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- bias=False)
-
- self.rfp_inplanes = rfp_inplanes
- if self.rfp_inplanes:
- self.rfp_conv = build_conv_layer(
- None,
- self.rfp_inplanes,
- planes * self.expansion,
- 1,
- stride=1,
- bias=True)
- self.init_weights()
-
- def init_weights(self):
- """Initialize the weights."""
- if self.rfp_inplanes:
- constant_init(self.rfp_conv, 0)
-
- def rfp_forward(self, x, rfp_feat):
- """The forward function that also takes the RFP features as input."""
-
- def _inner_forward(x):
- identity = x
-
- out = self.conv1(x)
- out = self.norm1(out)
- out = self.relu(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv1_plugin_names)
-
- out = self.conv2(out)
- out = self.norm2(out)
- out = self.relu(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv2_plugin_names)
-
- out = self.conv3(out)
- out = self.norm3(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv3_plugin_names)
-
- if self.downsample is not None:
- identity = self.downsample(x)
-
- out += identity
-
- return out
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(_inner_forward, x)
- else:
- out = _inner_forward(x)
-
- if self.rfp_inplanes:
- rfp_feat = self.rfp_conv(rfp_feat)
- out = out + rfp_feat
-
- out = self.relu(out)
-
- return out
-
-
-class ResLayer(nn.Sequential):
- """ResLayer to build ResNet style backbone for RPF in detectoRS.
-
- The difference between this module and base class is that we pass
- ``rfp_inplanes`` to the first block.
-
- Args:
- block (nn.Module): block used to build ResLayer.
- inplanes (int): inplanes of block.
- planes (int): planes of block.
- num_blocks (int): number of blocks.
- stride (int): stride of the first block. Default: 1
- avg_down (bool): Use AvgPool instead of stride conv when
- downsampling in the bottleneck. Default: False
- conv_cfg (dict): dictionary to construct and config conv layer.
- Default: None
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: dict(type='BN')
- downsample_first (bool): Downsample at the first block or last block.
- False for Hourglass, True for ResNet. Default: True
- rfp_inplanes (int, optional): The number of channels from RFP.
- Default: None. If specified, an additional conv layer will be
- added for ``rfp_feat``. Otherwise, the structure is the same as
- base class.
- """
-
- def __init__(self,
- block,
- inplanes,
- planes,
- num_blocks,
- stride=1,
- avg_down=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- downsample_first=True,
- rfp_inplanes=None,
- **kwargs):
- self.block = block
- assert downsample_first, f'downsample_first={downsample_first} is ' \
- 'not supported in DetectoRS'
-
- downsample = None
- if stride != 1 or inplanes != planes * block.expansion:
- downsample = []
- conv_stride = stride
- if avg_down and stride != 1:
- conv_stride = 1
- downsample.append(
- nn.AvgPool2d(
- kernel_size=stride,
- stride=stride,
- ceil_mode=True,
- count_include_pad=False))
- downsample.extend([
- build_conv_layer(
- conv_cfg,
- inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=conv_stride,
- bias=False),
- build_norm_layer(norm_cfg, planes * block.expansion)[1]
- ])
- downsample = nn.Sequential(*downsample)
-
- layers = []
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=stride,
- downsample=downsample,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- rfp_inplanes=rfp_inplanes,
- **kwargs))
- inplanes = planes * block.expansion
- for _ in range(1, num_blocks):
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- **kwargs))
-
- super(ResLayer, self).__init__(*layers)
-
-
-@BACKBONES.register_module()
-class DetectoRS_ResNet(ResNet):
- """ResNet backbone for DetectoRS.
-
- Args:
- sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
- Convolution). Default: None.
- stage_with_sac (list): Which stage to use sac. Default: (False, False,
- False, False).
- rfp_inplanes (int, optional): The number of channels from RFP.
- Default: None. If specified, an additional conv layer will be
- added for ``rfp_feat``. Otherwise, the structure is the same as
- base class.
- output_img (bool): If ``True``, the input image will be inserted into
- the starting position of output. Default: False.
- pretrained (str, optional): The pretrained model to load.
- """
-
- arch_settings = {
- 50: (Bottleneck, (3, 4, 6, 3)),
- 101: (Bottleneck, (3, 4, 23, 3)),
- 152: (Bottleneck, (3, 8, 36, 3))
- }
-
- def __init__(self,
- sac=None,
- stage_with_sac=(False, False, False, False),
- rfp_inplanes=None,
- output_img=False,
- pretrained=None,
- **kwargs):
- self.sac = sac
- self.stage_with_sac = stage_with_sac
- self.rfp_inplanes = rfp_inplanes
- self.output_img = output_img
- self.pretrained = pretrained
- super(DetectoRS_ResNet, self).__init__(**kwargs)
-
- self.inplanes = self.stem_channels
- self.res_layers = []
- for i, num_blocks in enumerate(self.stage_blocks):
- stride = self.strides[i]
- dilation = self.dilations[i]
- dcn = self.dcn if self.stage_with_dcn[i] else None
- sac = self.sac if self.stage_with_sac[i] else None
- if self.plugins is not None:
- stage_plugins = self.make_stage_plugins(self.plugins, i)
- else:
- stage_plugins = None
- planes = self.base_channels * 2**i
- res_layer = self.make_res_layer(
- block=self.block,
- inplanes=self.inplanes,
- planes=planes,
- num_blocks=num_blocks,
- stride=stride,
- dilation=dilation,
- style=self.style,
- avg_down=self.avg_down,
- with_cp=self.with_cp,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- dcn=dcn,
- sac=sac,
- rfp_inplanes=rfp_inplanes if i > 0 else None,
- plugins=stage_plugins)
- self.inplanes = planes * self.block.expansion
- layer_name = f'layer{i + 1}'
- self.add_module(layer_name, res_layer)
- self.res_layers.append(layer_name)
-
- self._freeze_stages()
-
- def make_res_layer(self, **kwargs):
- """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
- return ResLayer(**kwargs)
-
- def forward(self, x):
- """Forward function."""
- outs = list(super(DetectoRS_ResNet, self).forward(x))
- if self.output_img:
- outs.insert(0, x)
- return tuple(outs)
-
- def rfp_forward(self, x, rfp_feats):
- """Forward function for RFP."""
- if self.deep_stem:
- x = self.stem(x)
- else:
- x = self.conv1(x)
- x = self.norm1(x)
- x = self.relu(x)
- x = self.maxpool(x)
- outs = []
- for i, layer_name in enumerate(self.res_layers):
- res_layer = getattr(self, layer_name)
- rfp_feat = rfp_feats[i] if i > 0 else None
- for layer in res_layer:
- x = layer.rfp_forward(x, rfp_feat)
- if i in self.out_indices:
- outs.append(x)
- return tuple(outs)
diff --git a/spaces/CVPR/WALT/mmdet/models/detectors/scnet.py b/spaces/CVPR/WALT/mmdet/models/detectors/scnet.py
deleted file mode 100644
index 04a2347c4ec1efcbfda59a134cddd8bde620d983..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/models/detectors/scnet.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from ..builder import DETECTORS
-from .cascade_rcnn import CascadeRCNN
-
-
-@DETECTORS.register_module()
-class SCNet(CascadeRCNN):
- """Implementation of `SCNet `_"""
-
- def __init__(self, **kwargs):
- super(SCNet, self).__init__(**kwargs)
diff --git a/spaces/CVPR/drawings-to-human/.github/README.md b/spaces/CVPR/drawings-to-human/.github/README.md
deleted file mode 100644
index 7b66623f8899ea4fcd29a601c1306980a3aaa97d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/drawings-to-human/.github/README.md
+++ /dev/null
@@ -1 +0,0 @@
-../ALT-README.md
\ No newline at end of file
diff --git a/spaces/Cat125/text-generator-v2/generation/generators.py b/spaces/Cat125/text-generator-v2/generation/generators.py
deleted file mode 100644
index 04fa046972a3522f5f8815b0a1fe30036b784c0f..0000000000000000000000000000000000000000
--- a/spaces/Cat125/text-generator-v2/generation/generators.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from random import random
-
-import gradio as gr
-
-from datamanager import get_data_v3, models
-from generation.words import get_next_word
-
-
-def find_model(model_name):
- for key, model in models.items():
- if model['name'] == model_name:
- return get_data_v3(key)
- raise ValueError('Model %s not found' % model_name)
-
-
-def generate(user_message, word_count, model_name, stop_chance):
- db = find_model(model_name)
- message = user_message.lower().strip()
- if word_count < 0 or word_count > 300:
- return gr.Warning("Invalid word count. It must be between 0 and 300.")
- text = ""
- curword = ""
- prevword = ""
- while len(text.split()) < word_count:
- prevword = curword
- curword = get_next_word(db, message, prevword, text, {})
- text += curword + " "
- if '.' in curword and random() < stop_chance:
- yield text.strip()
- break
- yield text
-
-
-def cont(user_message, word_count, model_name):
- db = find_model(model_name)
- message = user_message.lower().strip()
- if not message:
- return gr.Warning('No message')
- if word_count < 0 or word_count > 450:
- raise gr.Error("Invalid word count. It must be between 0 and 450.")
- text = message
- curword = text.split()[-1]
- text += " "
- while len(text.split()) < word_count:
- prevword = curword
- curword = get_next_word(db, message, prevword, text, {})
- text += curword + " "
- yield text.strip()
diff --git a/spaces/Chrysoula/voice_to_text_swedish/app.py b/spaces/Chrysoula/voice_to_text_swedish/app.py
deleted file mode 100644
index aad092eb599ad2ac1885d4d8d35cdedc8e0b0295..0000000000000000000000000000000000000000
--- a/spaces/Chrysoula/voice_to_text_swedish/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from transformers import pipeline
-import gradio as gr
-import pytube as pt
-
-pipe = pipeline(model="Hoft/whisper-small-swedish-asr") # change to "your-username/the-name-you-picked"
-sa = pipeline('sentiment-analysis', model='marma/bert-base-swedish-cased-sentiment')
-
-def get_emoji(feeling):
- if feeling == 'POSITIVE':
- return '😊'
- else:
- return '😔'
-def microphone_or_file_transcribe(audio):
- text = pipe(audio)["text"]
- sa_result = sa(text)[0]
- return text, get_emoji(sa_result['label'])
-
-def youtube_transcribe(url):
- yt = pt.YouTube(url)
-
- stream = yt.streams.filter(only_audio=True)[0]
- stream.download(filename="audio.mp3")
-
- text = pipe("audio.mp3")["text"]
-
- sa_result = sa(text)[0]
- return text, get_emoji(sa_result['label'])
-
-
-app = gr.Blocks()
-
-microphone_tab = gr.Interface(
- fn=microphone_or_file_transcribe,
- inputs=gr.Audio(source="microphone", type="filepath"),
- outputs=[gr.Textbox(label="Text"), gr.Textbox(label="Feeling")],
- title="Whisper Small Swedish: Microphone ",
- description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model and Sentiment Analysis.",
-)
-
-youtube_tab = gr.Interface(
- fn=youtube_transcribe,
- inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video", label="URL")],
- outputs=[gr.Textbox(label="Text"), gr.Textbox(label="Feeling")],
- title="Whisper Small Swedish: Youtube",
- description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model and Sentiment Analysis.",
-)
-
-file_tab = gr.Interface(
- fn=microphone_or_file_transcribe,
- inputs= gr.inputs.Audio(source="upload", type="filepath"),
- outputs=[gr.Textbox(label="Text"), gr.Textbox(label="Feeling")],
- title="Whisper Small Swedish: File",
- description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model and Sentiment Analysis.",
-)
-
-with app:
- gr.TabbedInterface([microphone_tab, youtube_tab, file_tab], ["Microphone", "YouTube", "File"])
-
-app.launch(enable_queue=True)
\ No newline at end of file
diff --git a/spaces/CikeyQI/Yunzai/Yunzai/lib/events/connect.js b/spaces/CikeyQI/Yunzai/Yunzai/lib/events/connect.js
deleted file mode 100644
index 46ef1e231a9564c4dd8cae17214bc3d1c7968dce..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/Yunzai/Yunzai/lib/events/connect.js
+++ /dev/null
@@ -1,23 +0,0 @@
-import EventListener from "../listener/listener.js"
-import cfg from "../config/config.js"
-
-/**
- * 监听连接事件
- */
-export default class connectEvent extends EventListener {
- constructor() {
- super({ event: "connect" })
- }
-
- async execute(e) {
- if (!Bot.uin.includes(e.self_id))
- Bot.uin.push(e.self_id)
-
- if (!cfg.bot.online_msg) return
- const key = `Yz:loginMsg:${e.self_id}`
- if (await redis.get(key)) return
- redis.set(key, "1", { EX: cfg.bot.online_msg_exp })
- for (const i of cfg.master[e.self_id] || [])
- e.bot.pickFriend(i).sendMsg(`欢迎使用【TRSS-Yunzai v${cfg.package.version}】\n【#帮助】查看指令说明\n【#状态】查看运行状态\n【#日志】查看运行日志\n【#重启】重新启动\n【#更新】拉取 Git 更新\n【#全部更新】更新全部插件\n【#更新日志】查看更新日志\n【#设置主人】设置主人账号\n【#安装插件】查看可安装插件`)
- }
-}
\ No newline at end of file
diff --git a/spaces/Cloudyy/bark-voice-cloning/hubert/hubert_manager.py b/spaces/Cloudyy/bark-voice-cloning/hubert/hubert_manager.py
deleted file mode 100644
index 857f2af29886fca6eb4df506853f446066af7c04..0000000000000000000000000000000000000000
--- a/spaces/Cloudyy/bark-voice-cloning/hubert/hubert_manager.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import os.path
-import shutil
-import urllib.request
-
-import huggingface_hub
-
-
-class HuBERTManager:
- @staticmethod
- def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
- install_dir = os.path.join('data', 'models', 'hubert')
- if not os.path.isdir(install_dir):
- os.makedirs(install_dir, exist_ok=True)
- install_file = os.path.join(install_dir, file_name)
- if not os.path.isfile(install_file):
- print('Downloading HuBERT base model')
- urllib.request.urlretrieve(download_url, install_file)
- print('Downloaded HuBERT')
- return install_file
-
-
- @staticmethod
- def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', local_file: str = 'tokenizer.pth'):
- install_dir = os.path.join('data', 'models', 'hubert')
- if not os.path.isdir(install_dir):
- os.makedirs(install_dir, exist_ok=True)
- install_file = os.path.join(install_dir, local_file)
- if not os.path.isfile(install_file):
- print('Downloading HuBERT custom tokenizer')
- huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
- shutil.move(os.path.join(install_dir, model), install_file)
- print('Downloaded tokenizer')
- return install_file
diff --git a/spaces/CofAI/chat.b4/client/html/index.html b/spaces/CofAI/chat.b4/client/html/index.html
deleted file mode 100644
index f5fa671a6b0197a6f51058c0e673a3ce9ebb989d..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat.b4/client/html/index.html
+++ /dev/null
@@ -1,126 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Chat.CofAI
-
-
-
-
-
-
-
-
- Остановить генерацию
-
-
-
-
-
-
-
-
-
-
- Доступ в интернет
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/Cpp4App/Cpp4App/SEM/retention_pp_processing.py b/spaces/Cpp4App/Cpp4App/SEM/retention_pp_processing.py
deleted file mode 100644
index 657c51b3a3f8fcc220a7b6fefd04dbdacbe9c932..0000000000000000000000000000000000000000
--- a/spaces/Cpp4App/Cpp4App/SEM/retention_pp_processing.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from types_pp_processing import cleanHtml
-import spacy
-nlp = spacy.load('en_core_web_sm')
-def retention_process(txt):
- text = ""
- result = cleanHtml(txt)
- for sen in result:
- text += sen
- time = ""
- doc = nlp(text)
- flag = 0
- for token in doc:
- if flag == 1:
- if token.text == "year" or token.text == "month" or token.text == "week" or token.text == "day" or token.text == "hour":
- time += " " + token.text
- break
- else:
- flag = 0
- if token.pos_ == "NUM":
- flag = 1
- time = token.text
- if time == "":
- time = "The privacy policy does not specify how long the data will be retained"
- return time,text
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-5605d000.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-5605d000.js
deleted file mode 100644
index 648d939bf24db896e1dff64f3b9a504b0465951a..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-5605d000.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as v,e as T,s as S,N as K,k as j,K as _,L as C,p as L,o as w,z as r,v as d,A as M,x as A,B as N,at as G,a4 as k,C as H,a7 as J,a9 as B,ab as q,ac as z,ad as D,F as O}from"./index-1d65707a.js";import{a as P}from"./TabItem.svelte_svelte_type_style_lang-1276453b.js";import{C as Q}from"./Column-6c43afc7.js";/* empty css */function R(a){let e;const n=a[8].default,t=B(n,a,a[9],null);return{c(){t&&t.c()},m(s,l){t&&t.m(s,l),e=!0},p(s,l){t&&t.p&&(!e||l&512)&&q(t,n,s,s[9],e?D(n,s[9],l,null):z(s[9]),null)},i(s){e||(r(t,s),e=!0)},o(s){d(t,s),e=!1},d(s){t&&t.d(s)}}}function U(a){let e,n,t,s;return n=new Q({props:{$$slots:{default:[R]},$$scope:{ctx:a}}}),{c(){e=K("div"),j(n.$$.fragment),_(e,"id",a[0]),_(e,"class",t="tabitem "+a[1].join(" ")+" svelte-19hvt5v"),C(e,"display",a[3]===a[2]?"block":"none")},m(l,m){L(l,e,m),w(n,e,null),s=!0},p(l,[m]){const c={};m&512&&(c.$$scope={dirty:m,ctx:l}),n.$set(c),(!s||m&1)&&_(e,"id",l[0]),(!s||m&2&&t!==(t="tabitem "+l[1].join(" ")+" svelte-19hvt5v"))&&_(e,"class",t),m&12&&C(e,"display",l[3]===l[2]?"block":"none")},i(l){s||(r(n.$$.fragment,l),s=!0)},o(l){d(n.$$.fragment,l),s=!1},d(l){l&&M(e),A(n)}}}function V(a,e,n){let t,s,{$$slots:l={},$$scope:m}=e,{elem_id:c=""}=e,{elem_classes:f=[]}=e,{name:u}=e,{id:i={}}=e;const E=N(),{register_tab:F,unregister_tab:I,selected_tab:b,selected_tab_index:g}=G(P);k(a,b,o=>n(3,s=o)),k(a,g,o=>n(7,t=o));let h=F({name:u,id:i});return H(()=>()=>I({name:u,id:i})),a.$$set=o=>{"elem_id"in o&&n(0,c=o.elem_id),"elem_classes"in o&&n(1,f=o.elem_classes),"name"in o&&n(6,u=o.name),"id"in o&&n(2,i=o.id),"$$scope"in o&&n(9,m=o.$$scope)},a.$$.update=()=>{a.$$.dirty&192&&t===h&&J().then(()=>E("select",{value:u,index:h}))},[c,f,i,s,b,g,u,t,l,m]}class W extends v{constructor(e){super(),T(this,e,V,U,S,{elem_id:0,elem_classes:1,name:6,id:2})}}function X(a){let e;const n=a[4].default,t=B(n,a,a[6],null);return{c(){t&&t.c()},m(s,l){t&&t.m(s,l),e=!0},p(s,l){t&&t.p&&(!e||l&64)&&q(t,n,s,s[6],e?D(n,s[6],l,null):z(s[6]),null)},i(s){e||(r(t,s),e=!0)},o(s){d(t,s),e=!1},d(s){t&&t.d(s)}}}function Y(a){let e,n;return e=new W({props:{elem_id:a[0],elem_classes:a[1],name:a[2],id:a[3],$$slots:{default:[X]},$$scope:{ctx:a}}}),e.$on("select",a[5]),{c(){j(e.$$.fragment)},m(t,s){w(e,t,s),n=!0},p(t,[s]){const l={};s&1&&(l.elem_id=t[0]),s&2&&(l.elem_classes=t[1]),s&4&&(l.name=t[2]),s&8&&(l.id=t[3]),s&64&&(l.$$scope={dirty:s,ctx:t}),e.$set(l)},i(t){n||(r(e.$$.fragment,t),n=!0)},o(t){d(e.$$.fragment,t),n=!1},d(t){A(e,t)}}}function Z(a,e,n){let{$$slots:t={},$$scope:s}=e,{elem_id:l=""}=e,{elem_classes:m=[]}=e,{label:c}=e,{id:f}=e;function u(i){O.call(this,a,i)}return a.$$set=i=>{"elem_id"in i&&n(0,l=i.elem_id),"elem_classes"in i&&n(1,m=i.elem_classes),"label"in i&&n(2,c=i.label),"id"in i&&n(3,f=i.id),"$$scope"in i&&n(6,s=i.$$scope)},[l,m,c,f,t,u,s]}class y extends v{constructor(e){super(),T(this,e,Z,Y,S,{elem_id:0,elem_classes:1,label:2,id:3})}}const te=y,se=["static"];export{te as Component,se as modes};
-//# sourceMappingURL=index-5605d000.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py
deleted file mode 100644
index 5dd64fa51435b97142bb61cfe12f9369e6f1488b..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# coding=utf-8
-# Copyright 2022-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Contains utilities to validate argument values in `huggingface_hub`."""
-import inspect
-import re
-import warnings
-from functools import wraps
-from itertools import chain
-from typing import Any, Dict
-
-from ._typing import CallableT
-
-
-REPO_ID_REGEX = re.compile(
- r"""
- ^
- (\b[\w\-.]+\b/)? # optional namespace (username or organization)
- \b # starts with a word boundary
- [\w\-.]{1,96} # repo_name: alphanumeric + . _ -
- \b # ends with a word boundary
- $
- """,
- flags=re.VERBOSE,
-)
-
-
-class HFValidationError(ValueError):
- """Generic exception thrown by `huggingface_hub` validators.
-
- Inherits from [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError).
- """
-
-
-def validate_hf_hub_args(fn: CallableT) -> CallableT:
- """Validate values received as argument for any public method of `huggingface_hub`.
-
- The goal of this decorator is to harmonize validation of arguments reused
- everywhere. By default, all defined validators are tested.
-
- Validators:
- - [`~utils.validate_repo_id`]: `repo_id` must be `"repo_name"`
- or `"namespace/repo_name"`. Namespace is a username or an organization.
- - [`~utils.smoothly_deprecate_use_auth_token`]: Use `token` instead of
- `use_auth_token` (only if `use_auth_token` is not expected by the decorated
- function - in practice, always the case in `huggingface_hub`).
-
- Example:
- ```py
- >>> from huggingface_hub.utils import validate_hf_hub_args
-
- >>> @validate_hf_hub_args
- ... def my_cool_method(repo_id: str):
- ... print(repo_id)
-
- >>> my_cool_method(repo_id="valid_repo_id")
- valid_repo_id
-
- >>> my_cool_method("other..repo..id")
- huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
-
- >>> my_cool_method(repo_id="other..repo..id")
- huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
-
- >>> @validate_hf_hub_args
- ... def my_cool_auth_method(token: str):
- ... print(token)
-
- >>> my_cool_auth_method(token="a token")
- "a token"
-
- >>> my_cool_auth_method(use_auth_token="a use_auth_token")
- "a use_auth_token"
-
- >>> my_cool_auth_method(token="a token", use_auth_token="a use_auth_token")
- UserWarning: Both `token` and `use_auth_token` are passed (...)
- "a token"
- ```
-
- Raises:
- [`~utils.HFValidationError`]:
- If an input is not valid.
- """
- # TODO: add an argument to opt-out validation for specific argument?
- signature = inspect.signature(fn)
-
- # Should the validator switch `use_auth_token` values to `token`? In practice, always
- # True in `huggingface_hub`. Might not be the case in a downstream library.
- check_use_auth_token = "use_auth_token" not in signature.parameters and "token" in signature.parameters
-
- @wraps(fn)
- def _inner_fn(*args, **kwargs):
- has_token = False
- for arg_name, arg_value in chain(
- zip(signature.parameters, args), # Args values
- kwargs.items(), # Kwargs values
- ):
- if arg_name in ["repo_id", "from_id", "to_id"]:
- validate_repo_id(arg_value)
-
- elif arg_name == "token" and arg_value is not None:
- has_token = True
-
- if check_use_auth_token:
- kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
-
- return fn(*args, **kwargs)
-
- return _inner_fn # type: ignore
-
-
-def validate_repo_id(repo_id: str) -> None:
- """Validate `repo_id` is valid.
-
- This is not meant to replace the proper validation made on the Hub but rather to
- avoid local inconsistencies whenever possible (example: passing `repo_type` in the
- `repo_id` is forbidden).
-
- Rules:
- - Between 1 and 96 characters.
- - Either "repo_name" or "namespace/repo_name"
- - [a-zA-Z0-9] or "-", "_", "."
- - "--" and ".." are forbidden
-
- Valid: `"foo"`, `"foo/bar"`, `"123"`, `"Foo-BAR_foo.bar123"`
-
- Not valid: `"datasets/foo/bar"`, `".repo_id"`, `"foo--bar"`, `"foo.git"`
-
- Example:
- ```py
- >>> from huggingface_hub.utils import validate_repo_id
- >>> validate_repo_id(repo_id="valid_repo_id")
- >>> validate_repo_id(repo_id="other..repo..id")
- huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
- ```
-
- Discussed in https://github.com/huggingface/huggingface_hub/issues/1008.
- In moon-landing (internal repository):
- - https://github.com/huggingface/moon-landing/blob/main/server/lib/Names.ts#L27
- - https://github.com/huggingface/moon-landing/blob/main/server/views/components/NewRepoForm/NewRepoForm.svelte#L138
- """
- if not isinstance(repo_id, str):
- # Typically, a Path is not a repo_id
- raise HFValidationError(f"Repo id must be a string, not {type(repo_id)}: '{repo_id}'.")
-
- if repo_id.count("/") > 1:
- raise HFValidationError(
- "Repo id must be in the form 'repo_name' or 'namespace/repo_name':"
- f" '{repo_id}'. Use `repo_type` argument if needed."
- )
-
- if not REPO_ID_REGEX.match(repo_id):
- raise HFValidationError(
- "Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are"
- " forbidden, '-' and '.' cannot start or end the name, max length is 96:"
- f" '{repo_id}'."
- )
-
- if "--" in repo_id or ".." in repo_id:
- raise HFValidationError(f"Cannot have -- or .. in repo_id: '{repo_id}'.")
-
- if repo_id.endswith(".git"):
- raise HFValidationError(f"Repo_id cannot end by '.git': '{repo_id}'.")
-
-
-def smoothly_deprecate_use_auth_token(fn_name: str, has_token: bool, kwargs: Dict[str, Any]) -> Dict[str, Any]:
- """Smoothly deprecate `use_auth_token` in the `huggingface_hub` codebase.
-
- The long-term goal is to remove any mention of `use_auth_token` in the codebase in
- favor of a unique and less verbose `token` argument. This will be done a few steps:
-
- 0. Step 0: methods that require a read-access to the Hub use the `use_auth_token`
- argument (`str`, `bool` or `None`). Methods requiring write-access have a `token`
- argument (`str`, `None`). This implicit rule exists to be able to not send the
- token when not necessary (`use_auth_token=False`) even if logged in.
-
- 1. Step 1: we want to harmonize everything and use `token` everywhere (supporting
- `token=False` for read-only methods). In order not to break existing code, if
- `use_auth_token` is passed to a function, the `use_auth_token` value is passed
- as `token` instead, without any warning.
- a. Corner case: if both `use_auth_token` and `token` values are passed, a warning
- is thrown and the `use_auth_token` value is ignored.
-
- 2. Step 2: Once it is release, we should push downstream libraries to switch from
- `use_auth_token` to `token` as much as possible, but without throwing a warning
- (e.g. manually create issues on the corresponding repos).
-
- 3. Step 3: After a transitional period (6 months e.g. until April 2023?), we update
- `huggingface_hub` to throw a warning on `use_auth_token`. Hopefully, very few
- users will be impacted as it would have already been fixed.
- In addition, unit tests in `huggingface_hub` must be adapted to expect warnings
- to be thrown (but still use `use_auth_token` as before).
-
- 4. Step 4: After a normal deprecation cycle (3 releases ?), remove this validator.
- `use_auth_token` will definitely not be supported.
- In addition, we update unit tests in `huggingface_hub` to use `token` everywhere.
-
- This has been discussed in:
- - https://github.com/huggingface/huggingface_hub/issues/1094.
- - https://github.com/huggingface/huggingface_hub/pull/928
- - (related) https://github.com/huggingface/huggingface_hub/pull/1064
- """
- new_kwargs = kwargs.copy() # do not mutate input !
-
- use_auth_token = new_kwargs.pop("use_auth_token", None) # remove from kwargs
- if use_auth_token is not None:
- if has_token:
- warnings.warn(
- "Both `token` and `use_auth_token` are passed to"
- f" `{fn_name}` with non-None values. `token` is now the"
- " preferred argument to pass a User Access Token."
- " `use_auth_token` value will be ignored."
- )
- else:
- # `token` argument is not passed and a non-None value is passed in
- # `use_auth_token` => use `use_auth_token` value as `token` kwarg.
- new_kwargs["token"] = use_auth_token
-
- return new_kwargs
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/fma.py b/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/fma.py
deleted file mode 100644
index 2eeac58a626c49231e04122b93e321ada954c5d3..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/fma.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
-
-import torch
-
-#----------------------------------------------------------------------------
-
-def fma(a, b, c): # => a * b + c
- return _FusedMultiplyAdd.apply(a, b, c)
-
-#----------------------------------------------------------------------------
-
-class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
- @staticmethod
- def forward(ctx, a, b, c): # pylint: disable=arguments-differ
- out = torch.addcmul(c, a, b)
- ctx.save_for_backward(a, b)
- ctx.c_shape = c.shape
- return out
-
- @staticmethod
- def backward(ctx, dout): # pylint: disable=arguments-differ
- a, b = ctx.saved_tensors
- c_shape = ctx.c_shape
- da = None
- db = None
- dc = None
-
- if ctx.needs_input_grad[0]:
- da = _unbroadcast(dout * b, a.shape)
-
- if ctx.needs_input_grad[1]:
- db = _unbroadcast(dout * a, b.shape)
-
- if ctx.needs_input_grad[2]:
- dc = _unbroadcast(dout, c_shape)
-
- return da, db, dc
-
-#----------------------------------------------------------------------------
-
-def _unbroadcast(x, shape):
- extra_dims = x.ndim - len(shape)
- assert extra_dims >= 0
- dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
- if len(dim):
- x = x.sum(dim=dim, keepdim=True)
- if extra_dims:
- x = x.reshape(-1, *x.shape[extra_dims+1:])
- assert x.shape == shape
- return x
-
-#----------------------------------------------------------------------------
diff --git a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/scripts/generate_meta_info.py b/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/scripts/generate_meta_info.py
deleted file mode 100644
index 9c3b7a37e85f534075c50e6c33d7cca999d8b836..0000000000000000000000000000000000000000
--- a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/scripts/generate_meta_info.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import argparse
-import cv2
-import glob
-import os
-
-
-def main(args):
- txt_file = open(args.meta_info, 'w')
- for folder, root in zip(args.input, args.root):
- img_paths = sorted(glob.glob(os.path.join(folder, '*')))
- for img_path in img_paths:
- status = True
- if args.check:
- # read the image once for check, as some images may have errors
- try:
- img = cv2.imread(img_path)
- except (IOError, OSError) as error:
- print(f'Read {img_path} error: {error}')
- status = False
- if img is None:
- status = False
- print(f'Img is None: {img_path}')
- if status:
- # get the relative path
- img_name = os.path.relpath(img_path, root)
- print(img_name)
- txt_file.write(f'{img_name}\n')
-
-
-if __name__ == '__main__':
- """Generate meta info (txt file) for only Ground-Truth images.
-
- It can also generate meta info from several folders into one txt file.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--input',
- nargs='+',
- default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
- help='Input folder, can be a list')
- parser.add_argument(
- '--root',
- nargs='+',
- default=['datasets/DF2K', 'datasets/DF2K'],
- help='Folder root, should have the length as input folders')
- parser.add_argument(
- '--meta_info',
- type=str,
- default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
- help='txt path for meta info')
- parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
- args = parser.parse_args()
-
- assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
- f'{len(args.input)} and {len(args.root)}.')
- os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
-
- main(args)
diff --git a/spaces/EuroPython2022/latr-vqa/modeling.py b/spaces/EuroPython2022/latr-vqa/modeling.py
deleted file mode 100644
index c5e7d6f5167a90071fee86566710fa458f5c082c..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/latr-vqa/modeling.py
+++ /dev/null
@@ -1,251 +0,0 @@
-import torch.nn as nn
-import torch
-from transformers import T5ForConditionalGeneration, ViTModel
-
-import pytorch_lightning as pl
-
-# Defining the pytorch model
-
-
-class LaTr_for_pretraining(nn.Module):
- def __init__(self, config, classify=False):
-
- super(LaTr_for_pretraining, self).__init__()
- self.vocab_size = config['vocab_size']
-
- model = T5ForConditionalGeneration.from_pretrained(config['t5_model'])
- # Removing the Embedding layer
- dummy_encoder = list(nn.Sequential(
- *list(model.encoder.children())[1:]).children())
- # Removing the Embedding Layer
- dummy_decoder = list(nn.Sequential(
- *list(model.decoder.children())[1:]).children())
-
- # Using the T5 Encoder
-
- self.list_encoder = nn.Sequential(*list(dummy_encoder[0]))
- self.residue_encoder = nn.Sequential(*list(dummy_encoder[1:]))
- self.list_decoder = nn.Sequential(*list(dummy_decoder[0]))
- self.residue_decoder = nn.Sequential(*list(dummy_decoder[1:]))
-
- # We use the embeddings of T5 for encoding the tokenized words
- self.language_emb = nn.Embedding.from_pretrained(model.shared.weight)
-
- self.top_left_x = nn.Embedding(
- config['max_2d_position_embeddings'], config['hidden_state'])
- self.bottom_right_x = nn.Embedding(
- config['max_2d_position_embeddings'], config['hidden_state'])
- self.top_left_y = nn.Embedding(
- config['max_2d_position_embeddings'], config['hidden_state'])
- self.bottom_right_y = nn.Embedding(
- config['max_2d_position_embeddings'], config['hidden_state'])
- self.width_emb = nn.Embedding(
- config['max_2d_position_embeddings'], config['hidden_state'])
- self.height_emb = nn.Embedding(
- config['max_2d_position_embeddings'], config['hidden_state'])
-
- self.classify = classify
- self.classification_layer = nn.Linear(
- config['hidden_state'], config['classes'])
-
- def forward(self, tokens, coordinates, predict_proba=False, predict_class=False):
-
- batch_size = len(tokens)
- embeded_feature = self.language_emb(tokens)
-
- top_left_x_feat = self.top_left_x(coordinates[:, :, 0])
- top_left_y_feat = self.top_left_y(coordinates[:, :, 1])
- bottom_right_x_feat = self.bottom_right_x(coordinates[:, :, 2])
- bottom_right_y_feat = self.bottom_right_y(coordinates[:, :, 3])
- width_feat = self.width_emb(coordinates[:, :, 4])
- height_feat = self.height_emb(coordinates[:, :, 5])
-
- total_feat = embeded_feature + top_left_x_feat + top_left_y_feat + \
- bottom_right_x_feat + bottom_right_y_feat + width_feat + height_feat
-
- # Extracting the feature
-
- for layer in self.list_encoder:
- total_feat = layer(total_feat)[0]
- total_feat = self.residue_encoder(total_feat)
-
- for layer in self.list_decoder:
- total_feat = layer(total_feat)[0]
- total_feat = self.residue_decoder(total_feat)
-
- if self.classify:
- total_feat = self.classification_layer(total_feat)
-
- if predict_proba:
- return total_feat.softmax(axis=-1)
-
- if predict_class:
- return total_feat.argmax(axis=-1)
-
- return total_feat
-
-
-class LaTr_for_finetuning(nn.Module):
- def __init__(self, config, address_to_pre_trained_weights=None):
- super(LaTr_for_finetuning, self).__init__()
-
- self.config = config
- self.vocab_size = config['vocab_size']
-
- self.pre_training_model = LaTr_for_pretraining(config)
- if address_to_pre_trained_weights is not None:
- self.pre_training_model.load_state_dict(
- torch.load(address_to_pre_trained_weights))
- self.vit = ViTModel.from_pretrained(
- "google/vit-base-patch16-224-in21k")
-
- # In the fine-tuning stage of vit, except the last layer, all the layers were freezed
-
- self.classification_head = nn.Linear(
- config['hidden_state'], config['classes'])
-
- def forward(self, lang_vect, spatial_vect, quest_vect, img_vect):
-
- # The below block of code calculates the language and spatial featuer
- embeded_feature = self.pre_training_model.language_emb(lang_vect)
- top_left_x_feat = self.pre_training_model.top_left_x(
- spatial_vect[:, :, 0])
- top_left_y_feat = self.pre_training_model.top_left_y(
- spatial_vect[:, :, 1])
- bottom_right_x_feat = self.pre_training_model.bottom_right_x(
- spatial_vect[:, :, 2])
- bottom_right_y_feat = self.pre_training_model.bottom_right_y(
- spatial_vect[:, :, 3])
- width_feat = self.pre_training_model.width_emb(spatial_vect[:, :, 4])
- height_feat = self.pre_training_model.height_emb(spatial_vect[:, :, 5])
-
- spatial_lang_feat = embeded_feature + top_left_x_feat + top_left_y_feat + \
- bottom_right_x_feat + bottom_right_y_feat + width_feat + height_feat
-
- # Extracting the image feature, using the Vision Transformer
- img_feat = self.vit(img_vect).last_hidden_state
-
- # Extracting the question vector
- quest_feat = self.pre_training_model.language_emb(quest_vect)
-
- # Concating the three features, and then passing it through the T5 Transformer
- final_feat = torch.cat(
- [img_feat, spatial_lang_feat, quest_feat], axis=-2)
-
- # Passing through the T5 Transformer
- for layer in self.pre_training_model.list_encoder:
- final_feat = layer(final_feat)[0]
-
- final_feat = self.pre_training_model.residue_encoder(final_feat)
-
- for layer in self.pre_training_model.list_decoder:
- final_feat = layer(final_feat)[0]
- final_feat = self.pre_training_model.residue_decoder(final_feat)
-
- answer_vector = self.classification_head(
- final_feat)[:, :self.config['seq_len'], :]
-
- return answer_vector
-
-
-def polynomial(base_lr, iter, max_iter=1e5, power=1):
- return base_lr * ((1 - float(iter) / max_iter) ** power)
-
-
-class LaTrForVQA(pl.LightningModule):
- def __init__(self, config, learning_rate=1e-4, max_steps=100000//2):
- super(LaTrForVQA, self).__init__()
-
- self.config = config
- self.save_hyperparameters()
- self.latr = LaTr_for_finetuning(config)
- self.training_losses = []
- self.validation_losses = []
- self.max_steps = max_steps
-
- def configure_optimizers(self):
- return torch.optim.AdamW(self.parameters(), lr=self.hparams['learning_rate'])
-
- def forward(self, batch_dict):
- boxes = batch_dict['boxes']
- img = batch_dict['img']
- question = batch_dict['question']
- words = batch_dict['tokenized_words']
- answer_vector = self.latr(lang_vect=words,
- spatial_vect=boxes,
- img_vect=img,
- quest_vect=question
- )
- return answer_vector
-
- def calculate_metrics(self, prediction, labels):
-
- # Calculate the accuracy score between the prediction and ground label for a batch, with considering the pad sequence
- batch_size = len(prediction)
- ac_score = 0
-
- for (pred, gt) in zip(prediction, labels):
- ac_score += calculate_acc_score(pred.detach().cpu(),
- gt.detach().cpu())
- ac_score = ac_score/batch_size
- return ac_score
-
- def training_step(self, batch, batch_idx):
- answer_vector = self.forward(batch)
-
- # https://discuss.huggingface.co/t/bertformaskedlm-s-loss-and-scores-how-the-loss-is-computed/607/2
- loss = nn.CrossEntropyLoss(ignore_index=0)(
- answer_vector.reshape(-1, self.config['classes']), batch['answer'].reshape(-1))
- _, preds = torch.max(answer_vector, dim=-1)
-
- # Calculating the accuracy score
- train_acc = self.calculate_metrics(preds, batch['answer'])
- train_acc = torch.tensor(train_acc)
-
- # Logging
- self.log('train_ce_loss', loss, prog_bar=True)
- self.log('train_acc', train_acc, prog_bar=True)
- self.training_losses.append(loss.item())
-
- return loss
-
- def validation_step(self, batch, batch_idx):
- logits = self.forward(batch)
- loss = nn.CrossEntropyLoss(ignore_index=0)(
- logits.reshape(-1, self.config['classes']), batch['answer'].reshape(-1))
- _, preds = torch.max(logits, dim=-1)
-
- # Validation Accuracy
- val_acc = self.calculate_metrics(preds.cpu(), batch['answer'].cpu())
- val_acc = torch.tensor(val_acc)
-
- # Logging
- self.log('val_ce_loss', loss, prog_bar=True)
- self.log('val_acc', val_acc, prog_bar=True)
- self.validation_losses.append(loss.item())
- return {'val_loss': loss, 'val_acc': val_acc}
-
- def optimizer_step(self, epoch_nb, batch_nb, optimizer, optimizer_i, opt_closure=None, on_tpu=False,
- using_native_amp=False, using_lbfgs=False):
-
- # Warmup for 1000 steps
- if self.trainer.global_step < 1000:
- lr_scale = min(1., float(self.trainer.global_step + 1) / 1000.)
- for pg in optimizer.param_groups:
- pg['lr'] = lr_scale * self.hparams.learning_rate
-
- # Linear Decay
- else:
- for pg in optimizer.param_groups:
- pg['lr'] = polynomial(
- self.hparams.learning_rate, self.trainer.global_step, max_iter=self.max_steps)
-
- optimizer.step(opt_closure)
- optimizer.zero_grad()
-
- def validation_epoch_end(self, outputs):
- val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
- val_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
-
- self.log('val_loss_epoch_end', val_loss, on_epoch=True, sync_dist=True)
- self.log('val_acc_epoch_end', val_acc, on_epoch=True, sync_dist=True)
diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/sar/sar_r31_parallel_decoder_academic.py b/spaces/EuroPython2022/mmocr-demo/configs/textrecog/sar/sar_r31_parallel_decoder_academic.py
deleted file mode 100644
index 983378118b4d589f531a7f401a06d238966a45d4..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/sar/sar_r31_parallel_decoder_academic.py
+++ /dev/null
@@ -1,33 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py', '../../_base_/recog_models/sar.py',
- '../../_base_/schedules/schedule_adam_step_5e.py',
- '../../_base_/recog_pipelines/sar_pipeline.py',
- '../../_base_/recog_datasets/ST_SA_MJ_real_train.py',
- '../../_base_/recog_datasets/academic_test.py'
-]
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline = {{_base_.train_pipeline}}
-test_pipeline = {{_base_.test_pipeline}}
-
-data = dict(
- samples_per_gpu=64,
- workers_per_gpu=2,
- val_dataloader=dict(samples_per_gpu=1),
- test_dataloader=dict(samples_per_gpu=1),
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline))
-
-evaluation = dict(interval=1, metric='acc')
diff --git a/spaces/FloydianSound/Nixeu_Diffusion/app.py b/spaces/FloydianSound/Nixeu_Diffusion/app.py
deleted file mode 100644
index 2edb2c8b670c628ef4471b54ca58519c22df5b9f..0000000000000000000000000000000000000000
--- a/spaces/FloydianSound/Nixeu_Diffusion/app.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
-import gradio as gr
-import torch
-from PIL import Image
-
-model_id = 'FloydianSound/Nixeu_Diffusion'
-prefix = 'nixeu'
-
-scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-if torch.cuda.is_available():
- pipe = pipe.to("cuda")
- pipe_i2i = pipe_i2i.to("cuda")
-
-def error_str(error, title="Error"):
- return f"""#### {title}
- {error}""" if error else ""
-
-def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
-
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
-
- try:
- if img is not None:
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
- else:
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
- except Exception as e:
- return None, error_str(e)
-
-def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
-
- result = pipe(
- prompt,
- negative_prompt = neg_prompt,
- num_inference_steps = int(steps),
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
-
- ratio = min(height / img.height, width / img.width)
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
- result = pipe_i2i(
- prompt,
- negative_prompt = neg_prompt,
- init_image = img,
- num_inference_steps = int(steps),
- strength = strength,
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
-"""
-with gr.Blocks(css=css) as demo:
- gr.HTML(
- f"""
-
-
-
Nixeu Diffusion
-
-
- Demo for Nixeu Diffusion Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix " if prefix else ""}
-
- Running on {"
GPU 🔥 " if torch.cuda.is_available() else f"
CPU 🥶 . For faster inference it is recommended to
upgrade to GPU in Settings "}
-
-
- """
- )
- with gr.Row():
-
- with gr.Column(scale=55):
- with gr.Group():
- with gr.Row():
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
-
- image_out = gr.Image(height=512)
- error_output = gr.Markdown()
-
- with gr.Column(scale=45):
- with gr.Tab("Options"):
- with gr.Group():
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
- auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically (nixeu)", value=prefix, visible=prefix)
-
- with gr.Row():
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
- steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
-
- with gr.Row():
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
-
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
-
- with gr.Tab("Image to image"):
- with gr.Group():
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
-
- auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
-
- inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
- outputs = [image_out, error_output]
- prompt.submit(inference, inputs=inputs, outputs=outputs)
- generate.click(inference, inputs=inputs, outputs=outputs)
-
- gr.HTML("""
-
- """)
-
-demo.queue(concurrency_count=1)
-demo.launch()
diff --git a/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/spec_utils.py b/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/spec_utils.py
deleted file mode 100644
index a3fd46d333da7becc7f09f42c084ac7cde661035..0000000000000000000000000000000000000000
--- a/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/spec_utils.py
+++ /dev/null
@@ -1,667 +0,0 @@
-import os, librosa
-import numpy as np
-import soundfile as sf
-from tqdm import tqdm
-import json, math, hashlib
-
-
-def crop_center(h1, h2):
- h1_shape = h1.size()
- h2_shape = h2.size()
-
- if h1_shape[3] == h2_shape[3]:
- return h1
- elif h1_shape[3] < h2_shape[3]:
- raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
-
- # s_freq = (h2_shape[2] - h1_shape[2]) // 2
- # e_freq = s_freq + h1_shape[2]
- s_time = (h1_shape[3] - h2_shape[3]) // 2
- e_time = s_time + h2_shape[3]
- h1 = h1[:, :, :, s_time:e_time]
-
- return h1
-
-
-def wave_to_spectrogram(
- wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
-):
- if reverse:
- wave_left = np.flip(np.asfortranarray(wave[0]))
- wave_right = np.flip(np.asfortranarray(wave[1]))
- elif mid_side:
- wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
- elif mid_side_b2:
- wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
- else:
- wave_left = np.asfortranarray(wave[0])
- wave_right = np.asfortranarray(wave[1])
-
- spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
- spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
-
- spec = np.asfortranarray([spec_left, spec_right])
-
- return spec
-
-
-def wave_to_spectrogram_mt(
- wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
-):
- import threading
-
- if reverse:
- wave_left = np.flip(np.asfortranarray(wave[0]))
- wave_right = np.flip(np.asfortranarray(wave[1]))
- elif mid_side:
- wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
- elif mid_side_b2:
- wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
- else:
- wave_left = np.asfortranarray(wave[0])
- wave_right = np.asfortranarray(wave[1])
-
- def run_thread(**kwargs):
- global spec_left
- spec_left = librosa.stft(**kwargs)
-
- thread = threading.Thread(
- target=run_thread,
- kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
- )
- thread.start()
- spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
- thread.join()
-
- spec = np.asfortranarray([spec_left, spec_right])
-
- return spec
-
-
-def combine_spectrograms(specs, mp):
- l = min([specs[i].shape[2] for i in specs])
- spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
- offset = 0
- bands_n = len(mp.param["band"])
-
- for d in range(1, bands_n + 1):
- h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
- spec_c[:, offset : offset + h, :l] = specs[d][
- :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
- ]
- offset += h
-
- if offset > mp.param["bins"]:
- raise ValueError("Too much bins")
-
- # lowpass fiter
- if (
- mp.param["pre_filter_start"] > 0
- ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
- if bands_n == 1:
- spec_c = fft_lp_filter(
- spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
- )
- else:
- gp = 1
- for b in range(
- mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
- ):
- g = math.pow(
- 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
- )
- gp = g
- spec_c[:, b, :] *= g
-
- return np.asfortranarray(spec_c)
-
-
-def spectrogram_to_image(spec, mode="magnitude"):
- if mode == "magnitude":
- if np.iscomplexobj(spec):
- y = np.abs(spec)
- else:
- y = spec
- y = np.log10(y**2 + 1e-8)
- elif mode == "phase":
- if np.iscomplexobj(spec):
- y = np.angle(spec)
- else:
- y = spec
-
- y -= y.min()
- y *= 255 / y.max()
- img = np.uint8(y)
-
- if y.ndim == 3:
- img = img.transpose(1, 2, 0)
- img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
-
- return img
-
-
-def reduce_vocal_aggressively(X, y, softmask):
- v = X - y
- y_mag_tmp = np.abs(y)
- v_mag_tmp = np.abs(v)
-
- v_mask = v_mag_tmp > y_mag_tmp
- y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
-
- return y_mag * np.exp(1.0j * np.angle(y))
-
-
-def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
- if min_range < fade_size * 2:
- raise ValueError("min_range must be >= fade_area * 2")
-
- mag = mag.copy()
-
- idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
- starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
- ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
- uninformative = np.where(ends - starts > min_range)[0]
- if len(uninformative) > 0:
- starts = starts[uninformative]
- ends = ends[uninformative]
- old_e = None
- for s, e in zip(starts, ends):
- if old_e is not None and s - old_e < fade_size:
- s = old_e - fade_size * 2
-
- if s != 0:
- weight = np.linspace(0, 1, fade_size)
- mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
- else:
- s -= fade_size
-
- if e != mag.shape[2]:
- weight = np.linspace(1, 0, fade_size)
- mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
- else:
- e += fade_size
-
- mag[:, :, s + fade_size : e - fade_size] += ref[
- :, :, s + fade_size : e - fade_size
- ]
- old_e = e
-
- return mag
-
-
-def align_wave_head_and_tail(a, b):
- l = min([a[0].size, b[0].size])
-
- return a[:l, :l], b[:l, :l]
-
-
-def cache_or_load(mix_path, inst_path, mp):
- mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
- inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
-
- cache_dir = "mph{}".format(
- hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
- )
- mix_cache_dir = os.path.join("cache", cache_dir)
- inst_cache_dir = os.path.join("cache", cache_dir)
-
- os.makedirs(mix_cache_dir, exist_ok=True)
- os.makedirs(inst_cache_dir, exist_ok=True)
-
- mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
- inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
-
- if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
- X_spec_m = np.load(mix_cache_path)
- y_spec_m = np.load(inst_cache_path)
- else:
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
-
- for d in range(len(mp.param["band"]), 0, -1):
- bp = mp.param["band"][d]
-
- if d == len(mp.param["band"]): # high-end band
- X_wave[d], _ = librosa.load(
- mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
- )
- y_wave[d], _ = librosa.load(
- inst_path,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- else: # lower bands
- X_wave[d] = librosa.resample(
- X_wave[d + 1],
- mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- y_wave[d] = librosa.resample(
- y_wave[d + 1],
- mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
-
- X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
-
- X_spec_s[d] = wave_to_spectrogram(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
- y_spec_s[d] = wave_to_spectrogram(
- y_wave[d],
- bp["hl"],
- bp["n_fft"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
-
- del X_wave, y_wave
-
- X_spec_m = combine_spectrograms(X_spec_s, mp)
- y_spec_m = combine_spectrograms(y_spec_s, mp)
-
- if X_spec_m.shape != y_spec_m.shape:
- raise ValueError("The combined spectrograms are different: " + mix_path)
-
- _, ext = os.path.splitext(mix_path)
-
- np.save(mix_cache_path, X_spec_m)
- np.save(inst_cache_path, y_spec_m)
-
- return X_spec_m, y_spec_m
-
-
-def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
- spec_left = np.asfortranarray(spec[0])
- spec_right = np.asfortranarray(spec[1])
-
- wave_left = librosa.istft(spec_left, hop_length=hop_length)
- wave_right = librosa.istft(spec_right, hop_length=hop_length)
-
- if reverse:
- return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
- elif mid_side:
- return np.asfortranarray(
- [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
- )
- elif mid_side_b2:
- return np.asfortranarray(
- [
- np.add(wave_right / 1.25, 0.4 * wave_left),
- np.subtract(wave_left / 1.25, 0.4 * wave_right),
- ]
- )
- else:
- return np.asfortranarray([wave_left, wave_right])
-
-
-def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
- import threading
-
- spec_left = np.asfortranarray(spec[0])
- spec_right = np.asfortranarray(spec[1])
-
- def run_thread(**kwargs):
- global wave_left
- wave_left = librosa.istft(**kwargs)
-
- thread = threading.Thread(
- target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
- )
- thread.start()
- wave_right = librosa.istft(spec_right, hop_length=hop_length)
- thread.join()
-
- if reverse:
- return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
- elif mid_side:
- return np.asfortranarray(
- [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
- )
- elif mid_side_b2:
- return np.asfortranarray(
- [
- np.add(wave_right / 1.25, 0.4 * wave_left),
- np.subtract(wave_left / 1.25, 0.4 * wave_right),
- ]
- )
- else:
- return np.asfortranarray([wave_left, wave_right])
-
-
-def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
- wave_band = {}
- bands_n = len(mp.param["band"])
- offset = 0
-
- for d in range(1, bands_n + 1):
- bp = mp.param["band"][d]
- spec_s = np.ndarray(
- shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
- )
- h = bp["crop_stop"] - bp["crop_start"]
- spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
- :, offset : offset + h, :
- ]
-
- offset += h
- if d == bands_n: # higher
- if extra_bins_h: # if --high_end_process bypass
- max_bin = bp["n_fft"] // 2
- spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
- :, :extra_bins_h, :
- ]
- if bp["hpf_start"] > 0:
- spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
- if bands_n == 1:
- wave = spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
- else:
- wave = np.add(
- wave,
- spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- ),
- )
- else:
- sr = mp.param["band"][d + 1]["sr"]
- if d == 1: # lower
- spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
- wave = librosa.resample(
- spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- ),
- bp["sr"],
- sr,
- res_type="sinc_fastest",
- )
- else: # mid
- spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
- spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
- wave2 = np.add(
- wave,
- spectrogram_to_wave(
- spec_s,
- bp["hl"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- ),
- )
- # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
- wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
-
- return wave.T
-
-
-def fft_lp_filter(spec, bin_start, bin_stop):
- g = 1.0
- for b in range(bin_start, bin_stop):
- g -= 1 / (bin_stop - bin_start)
- spec[:, b, :] = g * spec[:, b, :]
-
- spec[:, bin_stop:, :] *= 0
-
- return spec
-
-
-def fft_hp_filter(spec, bin_start, bin_stop):
- g = 1.0
- for b in range(bin_start, bin_stop, -1):
- g -= 1 / (bin_start - bin_stop)
- spec[:, b, :] = g * spec[:, b, :]
-
- spec[:, 0 : bin_stop + 1, :] *= 0
-
- return spec
-
-
-def mirroring(a, spec_m, input_high_end, mp):
- if "mirroring" == a:
- mirror = np.flip(
- np.abs(
- spec_m[
- :,
- mp.param["pre_filter_start"]
- - 10
- - input_high_end.shape[1] : mp.param["pre_filter_start"]
- - 10,
- :,
- ]
- ),
- 1,
- )
- mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
-
- return np.where(
- np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
- )
-
- if "mirroring2" == a:
- mirror = np.flip(
- np.abs(
- spec_m[
- :,
- mp.param["pre_filter_start"]
- - 10
- - input_high_end.shape[1] : mp.param["pre_filter_start"]
- - 10,
- :,
- ]
- ),
- 1,
- )
- mi = np.multiply(mirror, input_high_end * 1.7)
-
- return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
-
-
-def ensembling(a, specs):
- for i in range(1, len(specs)):
- if i == 1:
- spec = specs[0]
-
- ln = min([spec.shape[2], specs[i].shape[2]])
- spec = spec[:, :, :ln]
- specs[i] = specs[i][:, :, :ln]
-
- if "min_mag" == a:
- spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
- if "max_mag" == a:
- spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
-
- return spec
-
-
-def stft(wave, nfft, hl):
- wave_left = np.asfortranarray(wave[0])
- wave_right = np.asfortranarray(wave[1])
- spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
- spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
- spec = np.asfortranarray([spec_left, spec_right])
-
- return spec
-
-
-def istft(spec, hl):
- spec_left = np.asfortranarray(spec[0])
- spec_right = np.asfortranarray(spec[1])
-
- wave_left = librosa.istft(spec_left, hop_length=hl)
- wave_right = librosa.istft(spec_right, hop_length=hl)
- wave = np.asfortranarray([wave_left, wave_right])
-
-
-if __name__ == "__main__":
- import cv2
- import sys
- import time
- import argparse
- from model_param_init import ModelParameters
-
- p = argparse.ArgumentParser()
- p.add_argument(
- "--algorithm",
- "-a",
- type=str,
- choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
- default="min_mag",
- )
- p.add_argument(
- "--model_params",
- "-m",
- type=str,
- default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
- )
- p.add_argument("--output_name", "-o", type=str, default="output")
- p.add_argument("--vocals_only", "-v", action="store_true")
- p.add_argument("input", nargs="+")
- args = p.parse_args()
-
- start_time = time.time()
-
- if args.algorithm.startswith("invert") and len(args.input) != 2:
- raise ValueError("There should be two input files.")
-
- if not args.algorithm.startswith("invert") and len(args.input) < 2:
- raise ValueError("There must be at least two input files.")
-
- wave, specs = {}, {}
- mp = ModelParameters(args.model_params)
-
- for i in range(len(args.input)):
- spec = {}
-
- for d in range(len(mp.param["band"]), 0, -1):
- bp = mp.param["band"][d]
-
- if d == len(mp.param["band"]): # high-end band
- wave[d], _ = librosa.load(
- args.input[i],
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
-
- if len(wave[d].shape) == 1: # mono to stereo
- wave[d] = np.array([wave[d], wave[d]])
- else: # lower bands
- wave[d] = librosa.resample(
- wave[d + 1],
- mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
-
- spec[d] = wave_to_spectrogram(
- wave[d],
- bp["hl"],
- bp["n_fft"],
- mp.param["mid_side"],
- mp.param["mid_side_b2"],
- mp.param["reverse"],
- )
-
- specs[i] = combine_spectrograms(spec, mp)
-
- del wave
-
- if args.algorithm == "deep":
- d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
- v_spec = d_spec - specs[1]
- sf.write(
- os.path.join("{}.wav".format(args.output_name)),
- cmb_spectrogram_to_wave(v_spec, mp),
- mp.param["sr"],
- )
-
- if args.algorithm.startswith("invert"):
- ln = min([specs[0].shape[2], specs[1].shape[2]])
- specs[0] = specs[0][:, :, :ln]
- specs[1] = specs[1][:, :, :ln]
-
- if "invert_p" == args.algorithm:
- X_mag = np.abs(specs[0])
- y_mag = np.abs(specs[1])
- max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
- v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
- else:
- specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
- v_spec = specs[0] - specs[1]
-
- if not args.vocals_only:
- X_mag = np.abs(specs[0])
- y_mag = np.abs(specs[1])
- v_mag = np.abs(v_spec)
-
- X_image = spectrogram_to_image(X_mag)
- y_image = spectrogram_to_image(y_mag)
- v_image = spectrogram_to_image(v_mag)
-
- cv2.imwrite("{}_X.png".format(args.output_name), X_image)
- cv2.imwrite("{}_y.png".format(args.output_name), y_image)
- cv2.imwrite("{}_v.png".format(args.output_name), v_image)
-
- sf.write(
- "{}_X.wav".format(args.output_name),
- cmb_spectrogram_to_wave(specs[0], mp),
- mp.param["sr"],
- )
- sf.write(
- "{}_y.wav".format(args.output_name),
- cmb_spectrogram_to_wave(specs[1], mp),
- mp.param["sr"],
- )
-
- sf.write(
- "{}_v.wav".format(args.output_name),
- cmb_spectrogram_to_wave(v_spec, mp),
- mp.param["sr"],
- )
- else:
- if not args.algorithm == "deep":
- sf.write(
- os.path.join("ensembled", "{}.wav".format(args.output_name)),
- cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
- mp.param["sr"],
- )
-
- if args.algorithm == "align":
- trackalignment = [
- {
- "file1": '"{}"'.format(args.input[0]),
- "file2": '"{}"'.format(args.input[1]),
- }
- ]
-
- for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
- os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
-
- # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
diff --git a/spaces/GMFTBY/PandaGPT/header.py b/spaces/GMFTBY/PandaGPT/header.py
deleted file mode 100644
index 97338165d32d531838566ade9c9217182bb8ea67..0000000000000000000000000000000000000000
--- a/spaces/GMFTBY/PandaGPT/header.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import torch
-import datetime
-import types
-import deepspeed
-from transformers.deepspeed import HfDeepSpeedConfig
-import transformers
-import numpy as np
-from collections import OrderedDict
-from torch.utils.data import Dataset, DataLoader
-from torch.nn.utils import clip_grad_norm_
-from torch.cuda.amp import autocast, GradScaler
-from torch.nn import DataParallel
-from torch.optim import lr_scheduler
-import torch.optim as optim
-import torch.nn as nn
-import torch.nn.functional as F
-from tqdm import tqdm
-import os
-import re
-import math
-import random
-import json
-import time
-import logging
-from copy import deepcopy
-import ipdb
-import argparse
-import data
-from transformers import LlamaTokenizer, LlamaForCausalLM, LlamaConfig
-from torch.nn.utils.rnn import pad_sequence
-from peft import LoraConfig, TaskType, get_peft_model
-
-logging.getLogger("transformers").setLevel(logging.WARNING)
-logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR)
-os.environ['TOKENIZERS_PARALLELISM'] = 'false'
diff --git a/spaces/Gen-Sim/Gen-Sim/gensim/utils.py b/spaces/Gen-Sim/Gen-Sim/gensim/utils.py
deleted file mode 100644
index 0e83e6a2a2d8c0109807f4068ba4ad59a9b221ae..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/gensim/utils.py
+++ /dev/null
@@ -1,398 +0,0 @@
-import os
-
-import numpy as np
-import os
-import hydra
-import numpy as np
-import random
-
-from cliport import tasks
-from cliport.dataset import RavensDataset
-from cliport.environments.environment import Environment
-
-from pygments import highlight
-from pygments.lexers import PythonLexer
-from pygments.formatters import TerminalFormatter
-import re
-
-import openai
-import IPython
-import time
-import pybullet as p
-import traceback
-from datetime import datetime
-from pprint import pprint
-import cv2
-import re
-import random
-import json
-import operator
-import csv
-import itertools
-
-model = "gpt-4"
-# model = "gpt-3.5-turbo-16k"
-# model = "gpt-4-0613"
-
-def set_gpt_model(gpt_model_name):
- """ globally set gpt-model"""
- global model
- model = gpt_model_name
- print("use gpt model:", model)
-
-def mkdir_if_missing(dst_dir):
- if not os.path.exists(dst_dir):
- os.makedirs(dst_dir)
-
-
-def save_text(folder, name, out):
- mkdir_if_missing(folder)
- with open(os.path.join(folder, name + ".txt"), "w") as fhandle:
- fhandle.write(out)
-
-
-def add_to_txt(full_interaction, message, with_print=False):
- """ Add the message string to the full interaction """
- full_interaction.append("\n\n"+message)
- if with_print:
- print("\n\n"+message)
- return full_interaction
-
-def get_task_import_str():
- return "import numpy as np\n" + \
- "import os\n" + \
- "import pybullet as p\n" + \
- "import random\n" + \
- "from cliport.tasks import primitives\n" + \
- "from cliport.tasks.grippers import Spatula\n" + \
- "from cliport.tasks.task import Task\n" + \
- "from cliport.utils import utils\n"
-
-def extract_code(res):
- """ parse code block """
- # Pattern to find string between ```
- pattern = r'```(.*?)```'
-
- # Use re.findall to get all substrings within ```
- code_string = re.findall(pattern, res, re.DOTALL)
- if len(code_string) == 0:
- print("\n".join(res.split("\n")))
- print("empty code string")
- return '', ''
-
- code_string = code_string[0]
- code_string = code_string.replace('python', '')
- code_lines = code_string.split("\n")
-
- if 'python' in code_string:
- code_lines = code_lines[1:] # skip the first line
-
- class_def = [line for line in code_lines if line.startswith('class')]
- task_name = class_def[0]
- task_name = task_name[task_name.find("class "): task_name.rfind("(Task)")][6:]
-
- print("task_name:", task_name)
- return get_task_import_str() + '\n'.join(code_lines).strip(), task_name
-
-def extract_code_topdown(res):
- """ parse code block """
- # Pattern to find string between ```
- pattern = r'```python\n(.*?)```'
- # pattern = r'```python\n(.*?)'
- # Use re.findall to get all substrings within ```
- # code_string = re.findall(pattern, res, re.DOTALL)
- print(res)
- code_string = res[res.index("```python\n"):].strip()
- if len(code_string) == 0:
- print("\n".join(res.split("\n")))
- print("empty code string")
- return '', ''
-
- # code_string = code_string[0]
- code_string = code_string.replace('python', '')
- code_lines = code_string.split("\n")[1:]
- if code_lines[-1].strip().endswith(","):
- code_lines[-1] = code_lines[-1][:-1] + "))"
- if 'python' in code_string:
- code_lines = code_lines[1:] # skip the first line
-
- class_def = [line for line in code_lines if line.startswith('class')]
- task_name = class_def[0]
- task_name = task_name[task_name.find("class "): task_name.rfind("(Task)")][6:]
- # IPython.embed()
- print("task_name:", task_name)
- return '\n'.join(code_lines).strip(), task_name
-
-
-def extract_dict(res, prefix="new_task"):
- """ parse task dictionary """
- pattern = r'{(.*?)}'
- code_string = re.findall(pattern, res, re.DOTALL)
- if len(code_string) == 0:
- return ''
-
- code_string = code_string[0]
- code_string = code_string.replace('python', '')
-
- return prefix + '={'+ code_string.replace("\n","").strip() + '}'
-
-
-
-def extract_list(res, prefix="code_reference"):
- """ parse task dictionary """
- pattern = r'\[(.*?)\]'
- code_string = re.findall(pattern, res, re.DOTALL)
- if len(code_string) == 0:
- return ''
-
- code_string = code_string[0]
- return prefix + '=[' + code_string.strip() + ']'
-
-def extract_assets(res):
- """ parse generated assets """
- pattern = r''
- code_string = re.findall(pattern, res, re.DOTALL)
-
- assets_pattern = r'robot name="(.*?)">'
- assets_string = re.findall(assets_pattern, res, re.DOTALL)
- if len(code_string) == 0:
- return {}
-
- try:
- new_urdf = {}
- for asset_path, code in zip(assets_string, code_string):
- new_urdf[asset_path] = " 0:
- sample_idx = np.random.choice(sample_idx, sample_num, replace=False)
-
- for idx, (task_name, task_desc) in enumerate(task_name_dict.items()):
- if idx in sample_idx:
- prompt_replacement += f'- {task_name}: {task_desc}\n'
-
- return prompt_replacement + "\n\n"
-
-def format_list_prompt(task_list, sample_num=-1, sort_items=False):
- """ format a saved dictionary into prompt """
-
- # if sort_items:
- # task_list = sorted(task_list, key=operator.itemgetter(0))
- prompt_replacement = ''
- sample_idx = list(range(len(task_list)))
-
- if sample_num > 0:
- sample_idx = np.random.choice(len(task_list), sample_num, replace=False)
-
- for idx, task in enumerate(task_list):
- if idx in sample_idx:
- prompt_replacement += f"- {task['task-name']}: {task['task-descriptions']}\n"
-
- return prompt_replacement + "\n\n"
-
-def sample_list_reference(item_list, sample_num=-1):
- """ sample reference code from a list of python files """
- sample_idx = list(range(len(item_list)))
- prompt_replacement = ''
-
- if sample_num > 0:
- sample_idx = np.random.choice(len(item_list), sample_num, replace=False)
-
- print("reference files: ", [item_list[idx] for idx in sample_idx])
- for idx, item in enumerate(item_list):
- try:
- item_content = open(f"cliport/tasks/{item}").read()
- except:
- # one or the other
- item_content = open(f"cliport/generated_tasks/{item}").read()
-
- if idx in sample_idx:
- prompt_replacement += f'```\n{item_content}\n```\n\n'
-
- return prompt_replacement + "\n\n"
-
-
-def compute_diversity_score_from_assets_old(task_assets):
- """ compute how many new asset combos are covered by previous by a proxy"""
- if len(task_assets) < 2:
- return 0
-
- existing_assets = []
- for asset in task_assets:
- new_asset_flag = True
- for existing_asset in existing_assets:
- # it's covered by any previous assets
- if set(asset).issubset(existing_asset):
- new_asset_flag = False
- break
-
- if new_asset_flag:
- existing_assets.append(asset)
-
- return len(existing_assets) / len(task_assets)
-
-def iou_assets(asset1, asset2):
- asset1 = set(asset1)
- asset2 = set(asset2)
- return len(asset1 & asset2) / len(asset1 | asset2)
-
-def compute_diversity_score_from_assets(task_assets, total_trials):
- """ compute the pairwise IOU for assets"""
- if len(task_assets) == 0:
- return 0
-
- score = 0
- pairs = list(itertools.combinations(range(len(task_assets)), 2))
- for j, k in pairs:
- score += 1. - iou_assets(task_assets[j], task_assets[k])
-
- if len(pairs) == 0:
- return 0
-
- return score / len(pairs)
-
-def truncate_message_for_token_limit(message_history, max_tokens=6000):
- truncated_messages = []
- tokens = 0
-
- # reverse
- for idx in range(len(message_history)-1, -1, -1) :
- message = message_history[idx]
- message_tokens = len(message['content']) / 4 # rough estimate.
- # print("message_tokens:", message['content'])
- if tokens + message_tokens > max_tokens:
- break # This message would put us over the limit
-
- truncated_messages.append(message)
- tokens += message_tokens
-
- truncated_messages.reverse()
- # print("truncated messages:", len(truncated_messages))
- return truncated_messages
-
-def insert_system_message(message_history):
- system_message_prompt = 'You are a helpful and expert assistant in robot simulation code writing and task design.'
- 'You design tasks that are creative and do-able by table-top manipulation. '
- 'You write code without syntax errors and always think through and document your code carefully. '
- message_history.insert(0, {"role": "system", "content": system_message_prompt})
-
-# globally always feed the previous reply as the assistant message back into the model
-existing_messages = []
-def generate_feedback(prompt, max_tokens=2048, temperature=0.0, interaction_txt=None, retry_max=5, n=1):
- """ use GPT-4 API """
- global existing_messages
- global model
- if model == "text-davinci-003":
- return generate_feedback_completion_only(prompt, max_tokens, temperature)
- existing_messages.append({"role": "user", "content": prompt})
- truncated_messages = truncate_message_for_token_limit(existing_messages)
- insert_system_message(truncated_messages)
-
- params = {
- "model": model,
- "max_tokens": max_tokens,
- "temperature": temperature,
- "messages": truncated_messages,
- "n": n
- }
-
- for retry in range(retry_max):
- try:
- if interaction_txt is not None:
- add_to_txt(interaction_txt, ">>> Prompt: \n" + prompt, with_print=False)
- call_res = openai.ChatCompletion.create(**params)
- res = call_res["choices"][0]["message"]["content"]
- existing_messages.append({"role": "assistant", "content": res})
-
- to_print = highlight(f"{res}", PythonLexer(), TerminalFormatter())
- print(to_print)
- if interaction_txt is not None:
- add_to_txt(interaction_txt, ">>> Answer: \n" + res, with_print=False)
-
- if n > 1:
- return [r["message"]["content"] for r in call_res["choices"]]
- return res
-
- except Exception as e:
- print("failed chat completion", e)
- raise Exception("Failed to generate")
-
-def clear_messages():
- global existing_messages
- existing_messages = []
-
-
-def format_finetune_prompt(task_name):
- instruction_text = open('prompts/finetune_instructions_prompt.txt').read()
- instruction_text = instruction_text.replace("TASK_NAME_TEMPLATE", task_name)
- prompt_text = instruction_text
- return prompt_text
-
-def format_finetune_prompt_codeonly(task_name):
- instruction_text = open('prompts/finetune_instructions_prompt_codeonly.txt').read()
- instruction_text = instruction_text.replace("TASK_NAME_TEMPLATE", task_name)
- prompt_text = instruction_text
- return prompt_text
-
-existing_messages = []
-def generate_feedback_completion_only(prompt, max_tokens=800, temperature=0.0, interaction_txt=None, retry_max=5, n=1):
- """ use GPT-4 API """
- print("prompt size:", len(prompt))
- params = {
- "model": model,
- "max_tokens": 1200,
- "temperature": temperature,
- "prompt": prompt[-6000:], # in total 2048
- "n": n
- }
-
- for retry in range(retry_max):
- try:
- if interaction_txt is not None:
- add_to_txt(interaction_txt, ">>> Prompt: \n" + prompt, with_print=False)
- call_res = openai.Completion.create(**params)
- res = call_res["choices"][0]["text"]
-
- to_print = highlight(f"{res}", PythonLexer(), TerminalFormatter())
- print(to_print)
- if interaction_txt is not None:
- add_to_txt(interaction_txt, ">>> Answer: \n" + res, with_print=False)
-
- if n > 1:
- return [r["text"] for r in call_res["choices"]]
- return res
-
- except Exception as e:
- print("failed chat completion", e)
- # IPython.embed()
- raise Exception("Failed to generate")
diff --git a/spaces/GeorgeOrville/bingo/src/lib/hooks/use-bing.ts b/spaces/GeorgeOrville/bingo/src/lib/hooks/use-bing.ts
deleted file mode 100644
index dcdb1667ced0cba299b0825c0e91c4732411308c..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/lib/hooks/use-bing.ts
+++ /dev/null
@@ -1,173 +0,0 @@
-'use client'
-
-import { useState, useCallback, useEffect, useMemo } from 'react'
-import { useAtom, useAtomValue } from 'jotai'
-import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state'
-import { setConversationMessages } from './chat-history'
-import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types'
-import { nanoid } from '../utils'
-import { TTS } from '../bots/bing/tts'
-
-export function useBing(botId: BotId = 'bing') {
- const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId])
- const [enableTTS] = useAtom(voiceAtom)
- const speaker = useMemo(() => new TTS(), [])
- const [hash, setHash] = useAtom(hashAtom)
- const bingConversationStyle = useAtomValue(bingConversationStyleAtom)
- const [chatState, setChatState] = useAtom(chatAtom)
- const [input, setInput] = useState('')
- const [attachmentList, setAttachmentList] = useState([])
-
- const updateMessage = useCallback(
- (messageId: string, updater: (message: ChatMessageModel) => void) => {
- setChatState((draft) => {
- const message = draft.messages.find((m) => m.id === messageId)
- if (message) {
- updater(message)
- }
- })
- },
- [setChatState],
- )
-
- const sendMessage = useCallback(
- async (input: string, options = {}) => {
- const botMessageId = nanoid()
- const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined
- setChatState((draft) => {
- const text = imageUrl ? `${input}\n\n` : input
- draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' })
- setAttachmentList([])
- })
- const abortController = new AbortController()
- setChatState((draft) => {
- draft.generatingMessageId = botMessageId
- draft.abortController = abortController
- })
- speaker.reset()
- await chatState.bot.sendMessage({
- prompt: input,
- imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl,
- options: {
- ...options,
- bingConversationStyle,
- },
- signal: abortController.signal,
- onEvent(event) {
- if (event.type === 'UPDATE_ANSWER') {
- updateMessage(botMessageId, (message) => {
- if (event.data.text.length > message.text.length) {
- message.text = event.data.text
- }
-
- if (event.data.spokenText && enableTTS) {
- speaker.speak(event.data.spokenText)
- }
-
- message.throttling = event.data.throttling || message.throttling
- message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions
- message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses
- })
- } else if (event.type === 'ERROR') {
- updateMessage(botMessageId, (message) => {
- message.error = event.error
- })
- setChatState((draft) => {
- draft.abortController = undefined
- draft.generatingMessageId = ''
- })
- } else if (event.type === 'DONE') {
- setChatState((draft) => {
- draft.abortController = undefined
- draft.generatingMessageId = ''
- })
- }
- },
- })
- },
- [botId, attachmentList, chatState.bot, setChatState, updateMessage],
- )
-
- const uploadImage = useCallback(async (imgUrl: string) => {
- setAttachmentList([{ url: imgUrl, status: 'loading' }])
- const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle)
- if (response?.blobId) {
- setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }])
- } else {
- setAttachmentList([{ url: imgUrl, status: 'error' }])
- }
- }, [chatState.bot])
-
- const resetConversation = useCallback(() => {
- chatState.bot.resetConversation()
- speaker.abort()
- setChatState((draft) => {
- draft.abortController = undefined
- draft.generatingMessageId = ''
- draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }]
- draft.conversationId = nanoid()
- })
- }, [chatState.bot, setChatState])
-
- const stopGenerating = useCallback(() => {
- chatState.abortController?.abort()
- if (chatState.generatingMessageId) {
- updateMessage(chatState.generatingMessageId, (message) => {
- if (!message.text && !message.error) {
- message.text = 'Cancelled'
- }
- })
- }
- setChatState((draft) => {
- draft.generatingMessageId = ''
- })
- }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage])
-
- useEffect(() => {
- if (chatState.messages.length) {
- setConversationMessages(botId, chatState.conversationId, chatState.messages)
- }
- }, [botId, chatState.conversationId, chatState.messages])
-
- useEffect(() => {
- if (hash === 'reset') {
- resetConversation()
- setHash('')
- }
- }, [hash, setHash])
-
- const chat = useMemo(
- () => ({
- botId,
- bot: chatState.bot,
- isSpeaking: speaker.isSpeaking,
- messages: chatState.messages,
- sendMessage,
- setInput,
- input,
- resetConversation,
- generating: !!chatState.generatingMessageId,
- stopGenerating,
- uploadImage,
- setAttachmentList,
- attachmentList,
- }),
- [
- botId,
- bingConversationStyle,
- chatState.bot,
- chatState.generatingMessageId,
- chatState.messages,
- speaker.isSpeaking,
- setInput,
- input,
- setAttachmentList,
- attachmentList,
- resetConversation,
- sendMessage,
- stopGenerating,
- ],
- )
-
- return chat
-}
diff --git a/spaces/GodParticle69/minor_demo/mrcnn/dataset.py b/spaces/GodParticle69/minor_demo/mrcnn/dataset.py
deleted file mode 100644
index 4a66c18ea1b2781c219a60ea4dde2a59bb7ed8ae..0000000000000000000000000000000000000000
--- a/spaces/GodParticle69/minor_demo/mrcnn/dataset.py
+++ /dev/null
@@ -1,156 +0,0 @@
-from mrcnn import utils
-import numpy as np
-
-from pycocotools.coco import COCO
-from pycocotools.cocoeval import COCOeval
-from pycocotools import mask as maskUtils
-
-import os
-
-class MappingChallengeDataset(utils.Dataset):
- def load_dataset(self, dataset_dir, load_small=False, return_coco=True):
- """ Loads dataset released for the crowdAI Mapping Challenge(https://www.crowdai.org/challenges/mapping-challenge)
- Params:
- - dataset_dir : root directory of the dataset (can point to the train/val folder)
- - load_small : Boolean value which signals if the annotations for all the images need to be loaded into the memory,
- or if only a small subset of the same should be loaded into memory
- """
- self.load_small = load_small
- if self.load_small:
- annotation_path = os.path.join(dataset_dir, "annotation-small.json")
- else:
- annotation_path = os.path.join(dataset_dir, "annotation.json")
-
- image_dir = os.path.join(dataset_dir, "images")
- print("Annotation Path ", annotation_path)
- print("Image Dir ", image_dir)
- assert os.path.exists(annotation_path) and os.path.exists(image_dir)
-
- self.coco = COCO(annotation_path)
- self.image_dir = image_dir
- print(len(self.coco.imgs))
-
- # Load all classes (Only Building in this version)
- classIds = self.coco.getCatIds()
-
- # Load all images
- image_ids = list(self.coco.imgs.keys())
-
- # register classes
- for _class_id in classIds:
- self.add_class("crowdai-mapping-challenge", _class_id, self.coco.loadCats(_class_id)[0]["name"])
-
- # Register Images
- img_exist = []
- for _img_id in image_ids:
- path = os.path.join(image_dir, self.coco.imgs[_img_id]['file_name'])
- if os.path.exists(path):
- img_exist.append(_img_id)
-
- coco_updated = {}
- for i in img_exist:
- coco_updated[i] = self.coco.imgs[i]
-
- self.coco.imgs = coco_updated
- print(len(self.coco.imgs))
-
- for _img_id in img_exist:
- assert(os.path.exists(os.path.join(image_dir, self.coco.imgs[_img_id]['file_name'])))
-
- self.add_image(
- "crowdai-mapping-challenge", image_id=_img_id,
- path=os.path.join(image_dir, self.coco.imgs[_img_id]['file_name']),
- width=self.coco.imgs[_img_id]["width"],
- height=self.coco.imgs[_img_id]["height"],
- annotations=self.coco.loadAnns(self.coco.getAnnIds(
- imgIds=[_img_id],
- catIds=classIds,
- iscrowd=None)))
-
- if return_coco:
- return self.coco
-
- def load_mask(self, image_id):
- """ Loads instance mask for a given image
- This function converts mask from the coco format to a
- a bitmap [height, width, instance]
- Params:
- - image_id : reference id for a given image
-
- Returns:
- masks : A bool array of shape [height, width, instances] with
- one mask per instance
- class_ids : a 1D array of classIds of the corresponding instance masks
- (In this version of the challenge it will be of shape [instances] and always be filled with the class-id of the "Building" class.)
- """
-
- image_info = self.image_info[image_id]
- assert image_info["source"] == "crowdai-mapping-challenge"
-
- instance_masks = []
- class_ids = []
- annotations = self.image_info[image_id]["annotations"]
- # Build mask of shape [height, width, instance_count] and list
- # of class IDs that correspond to each channel of the mask.
- for annotation in annotations:
- class_id = self.map_source_class_id(
- "crowdai-mapping-challenge.{}".format(annotation['category_id']))
- if class_id:
- m = self.annToMask(annotation, image_info["height"],
- image_info["width"])
- # Some objects are so small that they're less than 1 pixel area
- # and end up rounded out. Skip those objects.
- if m.max() < 1:
- continue
-
- # Ignore the notion of "is_crowd" as specified in the coco format
- # as we donot have the said annotation in the current version of the dataset
-
- instance_masks.append(m)
- class_ids.append(class_id)
- # Pack instance masks into an array
- if class_ids:
- mask = np.stack(instance_masks, axis=2)
- class_ids = np.array(class_ids, dtype=np.int32)
- return mask, class_ids
- else:
- # Call super class to return an empty mask
- return super(MappingChallengeDataset, self).load_mask(image_id)
-
-
- def image_reference(self, image_id):
- """Return a reference for a particular image
-
- Ideally you this function is supposed to return a URL
- but in this case, we will simply return the image_id
- """
- return "crowdai-mapping-challenge::{}".format(image_id)
- # The following two functions are from pycocotools with a few changes.
-
- def annToRLE(self, ann, height, width):
- """
- Convert annotation which can be polygons, uncompressed RLE to RLE.
- :return: binary mask (numpy 2D array)
- """
- segm = ann['segmentation']
- if isinstance(segm, list):
- # polygon -- a single object might consist of multiple parts
- # we merge all parts into one mask rle code
- rles = maskUtils.frPyObjects(segm, height, width)
- rle = maskUtils.merge(rles)
- elif isinstance(segm['counts'], list):
- # uncompressed RLE
- rle = maskUtils.frPyObjects(segm, height, width)
- else:
- # rle
- rle = ann['segmentation']
- return rle
-
- def annToMask(self, ann, height, width):
- """
- Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
- :return: binary mask (numpy 2D array)
- """
- rle = self.annToRLE(ann, height, width)
- m = maskUtils.decode(rle)
- return m
diff --git a/spaces/Godrose0728/sound-link/text/mandarin.py b/spaces/Godrose0728/sound-link/text/mandarin.py
deleted file mode 100644
index ff71de9788e4f20c897b971a775d1ecfbfe1c7b7..0000000000000000000000000000000000000000
--- a/spaces/Godrose0728/sound-link/text/mandarin.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import os
-import sys
-import re
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba
-import cn2an
-import logging
-
-logging.getLogger('jieba').setLevel(logging.WARNING)
-jieba.initialize()
-
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (romaji, ipa) pairs:
-_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ʃy', 'ʃ'),
- ('ʧʰy', 'ʧʰ'),
- ('ʧ⁼y', 'ʧ⁼'),
- ('NN', 'n'),
- ('Ng', 'ŋ'),
- ('y', 'j'),
- ('h', 'x')
-]]
-
-# List of (bopomofo, ipa) pairs:
-_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'x'),
- ('ㄐ', 'tʃ⁼'),
- ('ㄑ', 'tʃʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ts`⁼'),
- ('ㄔ', 'ts`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ts⁼'),
- ('ㄘ', 'tsʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'ɛ'),
- ('ㄞ', 'aɪ'),
- ('ㄟ', 'eɪ'),
- ('ㄠ', 'ɑʊ'),
- ('ㄡ', 'oʊ'),
- ('ㄧㄢ', 'jɛn'),
- ('ㄩㄢ', 'ɥæn'),
- ('ㄢ', 'an'),
- ('ㄧㄣ', 'in'),
- ('ㄩㄣ', 'ɥn'),
- ('ㄣ', 'ən'),
- ('ㄤ', 'ɑŋ'),
- ('ㄧㄥ', 'iŋ'),
- ('ㄨㄥ', 'ʊŋ'),
- ('ㄩㄥ', 'jʊŋ'),
- ('ㄥ', 'əŋ'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (bopomofo, ipa2) pairs:
-_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'pwo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'tɕ'),
- ('ㄑ', 'tɕʰ'),
- ('ㄒ', 'ɕ'),
- ('ㄓ', 'tʂ'),
- ('ㄔ', 'tʂʰ'),
- ('ㄕ', 'ʂ'),
- ('ㄖ', 'ɻ'),
- ('ㄗ', 'ts'),
- ('ㄘ', 'tsʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ɤ'),
- ('ㄝ', 'ɛ'),
- ('ㄞ', 'aɪ'),
- ('ㄟ', 'eɪ'),
- ('ㄠ', 'ɑʊ'),
- ('ㄡ', 'oʊ'),
- ('ㄧㄢ', 'jɛn'),
- ('ㄩㄢ', 'yæn'),
- ('ㄢ', 'an'),
- ('ㄧㄣ', 'in'),
- ('ㄩㄣ', 'yn'),
- ('ㄣ', 'ən'),
- ('ㄤ', 'ɑŋ'),
- ('ㄧㄥ', 'iŋ'),
- ('ㄨㄥ', 'ʊŋ'),
- ('ㄩㄥ', 'jʊŋ'),
- ('ㄥ', 'ɤŋ'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'y'),
- ('ˉ', '˥'),
- ('ˊ', '˧˥'),
- ('ˇ', '˨˩˦'),
- ('ˋ', '˥˩'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def number_to_chinese(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- return text
-
-
-def chinese_to_bopomofo(text):
- text = text.replace('、', ',').replace(';', ',').replace(':', ',')
- words = jieba.lcut(text, cut_all=False)
- text = ''
- for word in words:
- bopomofos = lazy_pinyin(word, BOPOMOFO)
- if not re.search('[\u4e00-\u9fff]', word):
- text += word
- continue
- for i in range(len(bopomofos)):
- bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i])
- if text != '':
- text += ' '
- text += ''.join(bopomofos)
- return text
-
-
-def latin_to_bopomofo(text):
- for regex, replacement in _latin_to_bopomofo:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_romaji(text):
- for regex, replacement in _bopomofo_to_romaji:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_ipa(text):
- for regex, replacement in _bopomofo_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_ipa2(text):
- for regex, replacement in _bopomofo_to_ipa2:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_romaji(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_romaji(text)
- text = re.sub('i([aoe])', r'y\1', text)
- text = re.sub('u([aoəe])', r'w\1', text)
- text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
- text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
- return text
-
-
-def chinese_to_lazy_ipa(text):
- text = chinese_to_romaji(text)
- for regex, replacement in _romaji_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_ipa(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_ipa(text)
- text = re.sub('i([aoe])', r'j\1', text)
- text = re.sub('u([aoəe])', r'w\1', text)
- text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
- text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
- return text
-
-
-def chinese_to_ipa2(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_ipa2(text)
- text = re.sub(r'i([aoe])', r'j\1', text)
- text = re.sub(r'u([aoəe])', r'w\1', text)
- text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text)
- text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text)
- return text
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/apis/test.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/apis/test.py
deleted file mode 100644
index e54b1b8c24efc448972c31ee5da63041d7f97a47..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/apis/test.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import os.path as osp
-import pickle
-import shutil
-import tempfile
-import time
-
-import mmcv
-import torch
-import torch.distributed as dist
-from mmcv.image import tensor2imgs
-from mmcv.runner import get_dist_info
-
-from mmdet.core import encode_mask_results
-
-
-def single_gpu_test(model,
- data_loader,
- show=False,
- out_dir=None,
- show_score_thr=0.3):
- model.eval()
- results = []
- dataset = data_loader.dataset
- prog_bar = mmcv.ProgressBar(len(dataset))
- for i, data in enumerate(data_loader):
- with torch.no_grad():
- result = model(return_loss=False, rescale=True, **data)
-
- batch_size = len(result)
- if show or out_dir:
- if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
- img_tensor = data['img'][0]
- else:
- img_tensor = data['img'][0].data[0]
- img_metas = data['img_metas'][0].data[0]
- imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
- assert len(imgs) == len(img_metas)
-
- for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
- h, w, _ = img_meta['img_shape']
- img_show = img[:h, :w, :]
-
- ori_h, ori_w = img_meta['ori_shape'][:-1]
- img_show = mmcv.imresize(img_show, (ori_w, ori_h))
-
- if out_dir:
- out_file = osp.join(out_dir, img_meta['ori_filename'])
- else:
- out_file = None
-
- model.module.show_result(
- img_show,
- result[i],
- show=show,
- out_file=out_file,
- score_thr=show_score_thr)
-
- # encode mask results
- if isinstance(result[0], tuple):
- result = [(bbox_results, encode_mask_results(mask_results))
- for bbox_results, mask_results in result]
- results.extend(result)
-
- for _ in range(batch_size):
- prog_bar.update()
- return results
-
-
-def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
- """Test model with multiple gpus.
-
- This method tests model with multiple gpus and collects the results
- under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
- it encodes results to gpu tensors and use gpu communication for results
- collection. On cpu mode it saves the results on different gpus to 'tmpdir'
- and collects them by the rank 0 worker.
-
- Args:
- model (nn.Module): Model to be tested.
- data_loader (nn.Dataloader): Pytorch data loader.
- tmpdir (str): Path of directory to save the temporary results from
- different gpus under cpu mode.
- gpu_collect (bool): Option to use either gpu or cpu to collect results.
-
- Returns:
- list: The prediction results.
- """
- model.eval()
- results = []
- dataset = data_loader.dataset
- rank, world_size = get_dist_info()
- if rank == 0:
- prog_bar = mmcv.ProgressBar(len(dataset))
- time.sleep(2) # This line can prevent deadlock problem in some cases.
- for i, data in enumerate(data_loader):
- with torch.no_grad():
- result = model(return_loss=False, rescale=True, **data)
- # encode mask results
- if isinstance(result[0], tuple):
- result = [(bbox_results, encode_mask_results(mask_results))
- for bbox_results, mask_results in result]
- results.extend(result)
-
- if rank == 0:
- batch_size = len(result)
- for _ in range(batch_size * world_size):
- prog_bar.update()
-
- # collect results from all ranks
- if gpu_collect:
- results = collect_results_gpu(results, len(dataset))
- else:
- results = collect_results_cpu(results, len(dataset), tmpdir)
- return results
-
-
-def collect_results_cpu(result_part, size, tmpdir=None):
- rank, world_size = get_dist_info()
- # create a tmp dir if it is not specified
- if tmpdir is None:
- MAX_LEN = 512
- # 32 is whitespace
- dir_tensor = torch.full((MAX_LEN, ),
- 32,
- dtype=torch.uint8,
- device='cuda')
- if rank == 0:
- mmcv.mkdir_or_exist('.dist_test')
- tmpdir = tempfile.mkdtemp(dir='.dist_test')
- tmpdir = torch.tensor(
- bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
- dir_tensor[:len(tmpdir)] = tmpdir
- dist.broadcast(dir_tensor, 0)
- tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
- else:
- mmcv.mkdir_or_exist(tmpdir)
- # dump the part result to the dir
- mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
- dist.barrier()
- # collect all parts
- if rank != 0:
- return None
- else:
- # load results of all parts from tmp dir
- part_list = []
- for i in range(world_size):
- part_file = osp.join(tmpdir, f'part_{i}.pkl')
- part_list.append(mmcv.load(part_file))
- # sort the results
- ordered_results = []
- for res in zip(*part_list):
- ordered_results.extend(list(res))
- # the dataloader may pad some samples
- ordered_results = ordered_results[:size]
- # remove tmp dir
- shutil.rmtree(tmpdir)
- return ordered_results
-
-
-def collect_results_gpu(result_part, size):
- rank, world_size = get_dist_info()
- # dump result part to tensor with pickle
- part_tensor = torch.tensor(
- bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
- # gather all result part tensor shape
- shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
- shape_list = [shape_tensor.clone() for _ in range(world_size)]
- dist.all_gather(shape_list, shape_tensor)
- # padding result part tensor to max length
- shape_max = torch.tensor(shape_list).max()
- part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
- part_send[:shape_tensor[0]] = part_tensor
- part_recv_list = [
- part_tensor.new_zeros(shape_max) for _ in range(world_size)
- ]
- # gather all result part
- dist.all_gather(part_recv_list, part_send)
-
- if rank == 0:
- part_list = []
- for recv, shape in zip(part_recv_list, shape_list):
- part_list.append(
- pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
- # sort the results
- ordered_results = []
- for res in zip(*part_list):
- ordered_results.extend(list(res))
- # the dataloader may pad some samples
- ordered_results = ordered_results[:size]
- return ordered_results
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/danet_r50-d8.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/danet_r50-d8.py
deleted file mode 100644
index 2c934939fac48525f22ad86f489a041dd7db7d09..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/danet_r50-d8.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='DAHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- pam_channels=64,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py
deleted file mode 100644
index 82a1c9386c51fb0ada436e51702beb961a534b26..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py
+++ /dev/null
@@ -1,6 +0,0 @@
-_base_ = [
- '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
-]
-model = dict(
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py
deleted file mode 100644
index f9e831bcd1043ed9feba88bc28ab69d87287ca98..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3plus_r50-d8.py',
- '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_40k.py'
-]
-model = dict(
- decode_head=dict(num_classes=59),
- auxiliary_head=dict(num_classes=59),
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
-optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/upernet/upernet_r50_769x769_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/upernet/upernet_r50_769x769_40k_cityscapes.py
deleted file mode 100644
index 89b18aa2840d12e67339ce0b7a0561fa2ba0c6fa..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/upernet/upernet_r50_769x769_40k_cityscapes.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = [
- '../_base_/models/upernet_r50.py',
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_40k.py'
-]
-model = dict(
- decode_head=dict(align_corners=True),
- auxiliary_head=dict(align_corners=True),
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/scripts/static/style.css b/spaces/GrandaddyShmax/AudioCraft_Plus/scripts/static/style.css
deleted file mode 100644
index a0df7c63a0d2dd9a79f33f5d869ca31c9da87e8d..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/scripts/static/style.css
+++ /dev/null
@@ -1,113 +0,0 @@
-body {
- background-color: #fbfbfb;
- margin: 0;
-}
-
-select, input {
- font-size: 1em;
- max-width: 100%;
-}
-
-.xp_name {
- font-family: monospace;
-}
-
-.simple_form {
- background-color: #dddddd;
- padding: 1em;
- margin: 0.5em;
-}
-
-textarea {
- margin-top: 0.5em;
- margin-bottom: 0.5em;
-}
-
-.rating {
- background-color: grey;
- padding-top: 5px;
- padding-bottom: 5px;
- padding-left: 8px;
- padding-right: 8px;
- margin-right: 2px;
- cursor:pointer;
-}
-
-.rating_selected {
- background-color: purple;
-}
-
-.content {
- font-family: sans-serif;
- background-color: #f6f6f6;
- padding: 40px;
- margin: 0 auto;
- max-width: 1000px;
-}
-
-.track label {
- padding-top: 10px;
- padding-bottom: 10px;
-}
-.track {
- padding: 15px;
- margin: 5px;
- background-color: #c8c8c8;
-}
-
-.submit-big {
- width:400px;
- height:30px;
- font-size: 20px;
-}
-
-.error {
- color: red;
-}
-
-.ratings {
- margin-left: 10px;
-}
-
-.important {
- font-weight: bold;
-}
-
-.survey {
- margin-bottom: 100px;
-}
-
-.success {
- color: #25901b;
- font-weight: bold;
-}
-.warning {
- color: #8a1f19;
- font-weight: bold;
-}
-.track>section {
- display: flex;
- align-items: center;
-}
-
-.prompt {
- display: flex;
- align-items: center;
-}
-
-.track>section>div {
- padding-left: 10px;
-}
-
-audio {
- max-width: 280px;
- max-height: 40px;
- margin-left: 10px;
- margin-right: 10px;
-}
-
-.special {
- font-weight: bold;
- color: #2c2c2c;
-}
-
diff --git a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/tool/allunitsample.py b/spaces/HaHaBill/LandShapes-Antarctica/netdissect/tool/allunitsample.py
deleted file mode 100644
index 9f86e196ce63ebfcad1fcee8bd2b7358463ff3d1..0000000000000000000000000000000000000000
--- a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/tool/allunitsample.py
+++ /dev/null
@@ -1,199 +0,0 @@
-'''
-A simple tool to generate sample of output of a GAN,
-subject to filtering, sorting, or intervention.
-'''
-
-import torch, numpy, os, argparse, sys, shutil, errno, numbers
-from PIL import Image
-from torch.utils.data import TensorDataset
-from netdissect.zdataset import standard_z_sample
-from netdissect.progress import default_progress, verbose_progress
-from netdissect.autoeval import autoimport_eval
-from netdissect.workerpool import WorkerBase, WorkerPool
-from netdissect.nethook import retain_layers
-from netdissect.runningstats import RunningTopK
-
-def main():
- parser = argparse.ArgumentParser(description='GAN sample making utility')
- parser.add_argument('--model', type=str, default=None,
- help='constructor for the model to test')
- parser.add_argument('--pthfile', type=str, default=None,
- help='filename of .pth file for the model')
- parser.add_argument('--outdir', type=str, default='images',
- help='directory for image output')
- parser.add_argument('--size', type=int, default=100,
- help='number of images to output')
- parser.add_argument('--test_size', type=int, default=None,
- help='number of images to test')
- parser.add_argument('--layer', type=str, default=None,
- help='layer to inspect')
- parser.add_argument('--seed', type=int, default=1,
- help='seed')
- parser.add_argument('--quiet', action='store_true', default=False,
- help='silences console output')
- if len(sys.argv) == 1:
- parser.print_usage(sys.stderr)
- sys.exit(1)
- args = parser.parse_args()
- verbose_progress(not args.quiet)
-
- # Instantiate the model
- model = autoimport_eval(args.model)
- if args.pthfile is not None:
- data = torch.load(args.pthfile)
- if 'state_dict' in data:
- meta = {}
- for key in data:
- if isinstance(data[key], numbers.Number):
- meta[key] = data[key]
- data = data['state_dict']
- model.load_state_dict(data)
- # Unwrap any DataParallel-wrapped model
- if isinstance(model, torch.nn.DataParallel):
- model = next(model.children())
- # Examine first conv in model to determine input feature size.
- first_layer = [c for c in model.modules()
- if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d,
- torch.nn.Linear))][0]
- # 4d input if convolutional, 2d input if first layer is linear.
- if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
- z_channels = first_layer.in_channels
- spatialdims = (1, 1)
- else:
- z_channels = first_layer.in_features
- spatialdims = ()
- # Instrument the model
- retain_layers(model, [args.layer])
- model.cuda()
-
- if args.test_size is None:
- args.test_size = args.size * 20
- z_universe = standard_z_sample(args.test_size, z_channels,
- seed=args.seed)
- z_universe = z_universe.view(tuple(z_universe.shape) + spatialdims)
- indexes = get_all_highest_znums(
- model, z_universe, args.size, seed=args.seed)
- save_chosen_unit_images(args.outdir, model, z_universe, indexes,
- lightbox=True)
-
-
-def get_all_highest_znums(model, z_universe, size,
- batch_size=10, seed=1):
- # The model should have been instrumented already
- retained_items = list(model.retained.items())
- assert len(retained_items) == 1
- layer = retained_items[0][0]
- # By default, a 10% sample
- progress = default_progress()
- num_units = None
- with torch.no_grad():
- # Pass 1: collect max activation stats
- z_loader = torch.utils.data.DataLoader(TensorDataset(z_universe),
- batch_size=batch_size, num_workers=2,
- pin_memory=True)
- rtk = RunningTopK(k=size)
- for [z] in progress(z_loader, desc='Finding max activations'):
- z = z.cuda()
- model(z)
- feature = model.retained[layer]
- num_units = feature.shape[1]
- max_feature = feature.view(
- feature.shape[0], num_units, -1).max(2)[0]
- rtk.add(max_feature)
- td, ti = rtk.result()
- highest = ti.sort(1)[0]
- return highest
-
-def save_chosen_unit_images(dirname, model, z_universe, indices,
- shared_dir="shared_images",
- unitdir_template="unit_{}",
- name_template="image_{}.jpg",
- lightbox=False, batch_size=50, seed=1):
- all_indices = torch.unique(indices.view(-1), sorted=True)
- z_sample = z_universe[all_indices]
- progress = default_progress()
- sdir = os.path.join(dirname, shared_dir)
- created_hashdirs = set()
- for index in range(len(z_universe)):
- hd = hashdir(index)
- if hd not in created_hashdirs:
- created_hashdirs.add(hd)
- os.makedirs(os.path.join(sdir, hd), exist_ok=True)
- with torch.no_grad():
- # Pass 2: now generate images
- z_loader = torch.utils.data.DataLoader(TensorDataset(z_sample),
- batch_size=batch_size, num_workers=2,
- pin_memory=True)
- saver = WorkerPool(SaveImageWorker)
- for batch_num, [z] in enumerate(progress(z_loader,
- desc='Saving images')):
- z = z.cuda()
- start_index = batch_num * batch_size
- im = ((model(z) + 1) / 2 * 255).clamp(0, 255).byte().permute(
- 0, 2, 3, 1).cpu()
- for i in range(len(im)):
- index = all_indices[i + start_index].item()
- filename = os.path.join(sdir, hashdir(index),
- name_template.format(index))
- saver.add(im[i].numpy(), filename)
- saver.join()
- linker = WorkerPool(MakeLinkWorker)
- for u in progress(range(len(indices)), desc='Making links'):
- udir = os.path.join(dirname, unitdir_template.format(u))
- os.makedirs(udir, exist_ok=True)
- for r in range(indices.shape[1]):
- index = indices[u,r].item()
- fn = name_template.format(index)
- # sourcename = os.path.join('..', shared_dir, fn)
- sourcename = os.path.join(sdir, hashdir(index), fn)
- targname = os.path.join(udir, fn)
- linker.add(sourcename, targname)
- if lightbox:
- copy_lightbox_to(udir)
- linker.join()
-
-def copy_lightbox_to(dirname):
- srcdir = os.path.realpath(
- os.path.join(os.getcwd(), os.path.dirname(__file__)))
- shutil.copy(os.path.join(srcdir, 'lightbox.html'),
- os.path.join(dirname, '+lightbox.html'))
-
-def hashdir(index):
- # To keep the number of files the shared directory lower, split it
- # into 100 subdirectories named as follows.
- return '%02d' % (index % 100)
-
-class SaveImageWorker(WorkerBase):
- # Saving images can be sped up by sending jpeg encoding and
- # file-writing work to a pool.
- def work(self, data, filename):
- Image.fromarray(data).save(filename, optimize=True, quality=100)
-
-class MakeLinkWorker(WorkerBase):
- # Creating symbolic links is a bit slow and can be done faster
- # in parallel rather than waiting for each to be created.
- def work(self, sourcename, targname):
- try:
- os.link(sourcename, targname)
- except OSError as e:
- if e.errno == errno.EEXIST:
- os.remove(targname)
- os.link(sourcename, targname)
- else:
- raise
-
-class MakeSyminkWorker(WorkerBase):
- # Creating symbolic links is a bit slow and can be done faster
- # in parallel rather than waiting for each to be created.
- def work(self, sourcename, targname):
- try:
- os.symlink(sourcename, targname)
- except OSError as e:
- if e.errno == errno.EEXIST:
- os.remove(targname)
- os.symlink(sourcename, targname)
- else:
- raise
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Hallucinate/demo/infer.py b/spaces/Hallucinate/demo/infer.py
deleted file mode 100644
index 1a40d754ad629856c29ede673b348018315f85bf..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/infer.py
+++ /dev/null
@@ -1,228 +0,0 @@
-import glob
-import os
-
-import numpy as np
-import torch
-import torch.nn as nn
-from PIL import Image
-from torchvision import transforms
-from tqdm import tqdm
-
-import model_io
-import utils
-#from models import UnetAdaptiveBins
-#import UnetAdaptiveBins
-
-class UnetAdaptiveBins(nn.Module):
- def __init__(self, backend, n_bins=100, min_val=0.1, max_val=10, norm='linear'):
- super(UnetAdaptiveBins, self).__init__()
- self.num_classes = n_bins
- self.min_val = min_val
- self.max_val = max_val
- self.encoder = Encoder(backend)
- self.adaptive_bins_layer = mViT(128, n_query_channels=128, patch_size=16,
- dim_out=n_bins,
- embedding_dim=128, norm=norm)
-
- self.decoder = DecoderBN(num_classes=128)
- self.conv_out = nn.Sequential(nn.Conv2d(128, n_bins, kernel_size=1, stride=1, padding=0),
- nn.Softmax(dim=1))
-
- def forward(self, x, **kwargs):
- unet_out = self.decoder(self.encoder(x), **kwargs)
- bin_widths_normed, range_attention_maps = self.adaptive_bins_layer(unet_out)
- out = self.conv_out(range_attention_maps)
-
- # Post process
- # n, c, h, w = out.shape
- # hist = torch.sum(out.view(n, c, h * w), dim=2) / (h * w) # not used for training
-
- bin_widths = (self.max_val - self.min_val) * bin_widths_normed # .shape = N, dim_out
- bin_widths = nn.functional.pad(bin_widths, (1, 0), mode='constant', value=self.min_val)
- bin_edges = torch.cumsum(bin_widths, dim=1)
-
- centers = 0.5 * (bin_edges[:, :-1] + bin_edges[:, 1:])
- n, dout = centers.size()
- centers = centers.view(n, dout, 1, 1)
-
- pred = torch.sum(out * centers, dim=1, keepdim=True)
-
- return bin_edges, pred
-
- def get_1x_lr_params(self): # lr/10 learning rate
- return self.encoder.parameters()
-
- def get_10x_lr_params(self): # lr learning rate
- modules = [self.decoder, self.adaptive_bins_layer, self.conv_out]
- for m in modules:
- yield from m.parameters()
-
- @classmethod
- def build(cls, n_bins, **kwargs):
- basemodel_name = 'tf_efficientnet_b5_ap'
-
- print('Loading base model ()...'.format(basemodel_name), end='')
- basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True)
- print('Done.')
-
- # Remove last layer
- print('Removing last two layers (global_pool & classifier).')
- basemodel.global_pool = nn.Identity()
- basemodel.classifier = nn.Identity()
-
- # Building Encoder-Decoder model
- print('Building Encoder-Decoder model..', end='')
- m = cls(basemodel, n_bins=n_bins, **kwargs)
- print('Done.')
- return m
-
-
-
-def _is_pil_image(img):
- return isinstance(img, Image.Image)
-
-
-def _is_numpy_image(img):
- return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
-
-
-class ToTensor(object):
- def __init__(self):
- self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-
- def __call__(self, image, target_size=(640, 480)):
- # image = image.resize(target_size)
- image = self.to_tensor(image)
- image = self.normalize(image)
- return image
-
- def to_tensor(self, pic):
- if not (_is_pil_image(pic) or _is_numpy_image(pic)):
- raise TypeError(
- 'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
-
- if isinstance(pic, np.ndarray):
- img = torch.from_numpy(pic.transpose((2, 0, 1)))
- return img
-
- # handle PIL Image
- if pic.mode == 'I':
- img = torch.from_numpy(np.array(pic, np.int32, copy=False))
- elif pic.mode == 'I;16':
- img = torch.from_numpy(np.array(pic, np.int16, copy=False))
- else:
- img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
- # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
- if pic.mode == 'YCbCr':
- nchannel = 3
- elif pic.mode == 'I;16':
- nchannel = 1
- else:
- nchannel = len(pic.mode)
- img = img.view(pic.size[1], pic.size[0], nchannel)
-
- img = img.transpose(0, 1).transpose(0, 2).contiguous()
- if isinstance(img, torch.ByteTensor):
- return img.float()
- else:
- return img
-
-
-class InferenceHelper:
- def __init__(self, dataset='nyu', device='cuda:0'):
- self.toTensor = ToTensor()
- self.device = device
- if dataset == 'nyu':
- self.min_depth = 1e-3
- self.max_depth = 10
- self.saving_factor = 1000 # used to save in 16 bit
- model = UnetAdaptiveBins.build(n_bins=256, min_val=self.min_depth, max_val=self.max_depth)
- pretrained_path = "./pretrained/AdaBins_nyu.pt"
- elif dataset == 'kitti':
- self.min_depth = 1e-3
- self.max_depth = 80
- self.saving_factor = 256
- model = UnetAdaptiveBins.build(n_bins=256, min_val=self.min_depth, max_val=self.max_depth)
- pretrained_path = "./pretrained/AdaBins_kitti.pt"
- else:
- raise ValueError("dataset can be either 'nyu' or 'kitti' but got {}".format(dataset))
-
- model, _, _ = model_io.load_checkpoint(pretrained_path, model)
- model.eval()
- self.model = model.to(self.device)
-
- @torch.no_grad()
- def predict_pil(self, pil_image, visualized=False):
- # pil_image = pil_image.resize((640, 480))
- img = np.asarray(pil_image) / 255.
-
- img = self.toTensor(img).unsqueeze(0).float().to(self.device)
- bin_centers, pred = self.predict(img)
-
- if visualized:
- viz = utils.colorize(torch.from_numpy(pred).unsqueeze(0), vmin=None, vmax=None, cmap='magma')
- # pred = np.asarray(pred*1000, dtype='uint16')
- viz = Image.fromarray(viz)
- return bin_centers, pred, viz
- return bin_centers, pred
-
- @torch.no_grad()
- def predict(self, image):
- bins, pred = self.model(image)
- pred = np.clip(pred.cpu().numpy(), self.min_depth, self.max_depth)
-
- # Flip
- image = torch.Tensor(np.array(image.cpu().numpy())[..., ::-1].copy()).to(self.device)
- pred_lr = self.model(image)[-1]
- pred_lr = np.clip(pred_lr.cpu().numpy()[..., ::-1], self.min_depth, self.max_depth)
-
- # Take average of original and mirror
- final = 0.5 * (pred + pred_lr)
- final = nn.functional.interpolate(torch.Tensor(final), image.shape[-2:],
- mode='bilinear', align_corners=True).cpu().numpy()
-
- final[final < self.min_depth] = self.min_depth
- final[final > self.max_depth] = self.max_depth
- final[np.isinf(final)] = self.max_depth
- final[np.isnan(final)] = self.min_depth
-
- centers = 0.5 * (bins[:, 1:] + bins[:, :-1])
- centers = centers.cpu().squeeze().numpy()
- centers = centers[centers > self.min_depth]
- centers = centers[centers < self.max_depth]
-
- return centers, final
-
- @torch.no_grad()
- def predict_dir(self, test_dir, out_dir):
- os.makedirs(out_dir, exist_ok=True)
- transform = ToTensor()
- all_files = glob.glob(os.path.join(test_dir, "*"))
- self.model.eval()
- for f in tqdm(all_files):
- image = np.asarray(Image.open(f), dtype='float32') / 255.
- image = transform(image).unsqueeze(0).to(self.device)
-
- centers, final = self.predict(image)
- # final = final.squeeze().cpu().numpy()
-
- final = (final * self.saving_factor).astype('uint16')
- basename = os.path.basename(f).split('.')[0]
- save_path = os.path.join(out_dir, basename + ".png")
-
- Image.fromarray(final.squeeze()).save(save_path)
-
-
-if __name__ == '__main__':
- import matplotlib.pyplot as plt
- from time import time
-
- img = Image.open("test_imgs/classroom__rgb_00283.jpg")
- start = time()
- inferHelper = InferenceHelper()
- centers, pred = inferHelper.predict_pil(img)
- print(f"took :{time() - start}s")
- plt.imshow(pred.squeeze(), cmap='magma_r')
- plt.show()
-
-
\ No newline at end of file
diff --git a/spaces/Hallucinate/demo/k_diffusion/config.py b/spaces/Hallucinate/demo/k_diffusion/config.py
deleted file mode 100644
index f9de7bc203216b0a4e26a6d18c913fedc84dbe46..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/k_diffusion/config.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from functools import partial
-import json
-import math
-import warnings
-
-from jsonmerge import merge
-
-from . import augmentation, layers, models, utils
-
-
-def load_config(file):
- defaults = {
- 'model': {
- 'sigma_data': 1.,
- 'patch_size': 1,
- 'dropout_rate': 0.,
- 'augment_wrapper': True,
- 'augment_prob': 0.,
- 'mapping_cond_dim': 0,
- 'unet_cond_dim': 0,
- 'cross_cond_dim': 0,
- 'cross_attn_depths': None,
- 'skip_stages': 0,
- 'has_variance': False,
- 'loss_config': 'karras',
- },
- 'dataset': {
- 'type': 'imagefolder',
- },
- 'optimizer': {
- 'type': 'adamw',
- 'lr': 1e-4,
- 'betas': [0.95, 0.999],
- 'eps': 1e-6,
- 'weight_decay': 1e-3,
- },
- 'lr_sched': {
- 'type': 'constant',
- },
- 'ema_sched': {
- 'type': 'inverse',
- 'power': 0.6667,
- 'max_value': 0.9999
- },
- }
- config = json.load(file)
- return merge(defaults, config)
-
-
-def make_model(config):
- config = config['model']
- assert config['type'] == 'image_v1'
- model = models.ImageDenoiserModelV1(
- config['input_channels'],
- config['mapping_out'],
- config['depths'],
- config['channels'],
- config['self_attn_depths'],
- config['cross_attn_depths'],
- patch_size=config['patch_size'],
- dropout_rate=config['dropout_rate'],
- mapping_cond_dim=config['mapping_cond_dim'] + (9 if config['augment_wrapper'] else 0),
- unet_cond_dim=config['unet_cond_dim'],
- cross_cond_dim=config['cross_cond_dim'],
- skip_stages=config['skip_stages'],
- has_variance=config['has_variance'],
- )
- if config['augment_wrapper']:
- model = augmentation.KarrasAugmentWrapper(model)
- return model
-
-
-def make_denoiser_wrapper(config):
- config = config['model']
- sigma_data = config.get('sigma_data', 1.)
- has_variance = config.get('has_variance', False)
- loss_config = config.get('loss_config', 'karras')
- if loss_config == 'karras':
- if not has_variance:
- return partial(layers.Denoiser, sigma_data=sigma_data)
- return partial(layers.DenoiserWithVariance, sigma_data=sigma_data)
- if loss_config == 'simple':
- if has_variance:
- raise ValueError('Simple loss config does not support a variance output')
- return partial(layers.SimpleLossDenoiser, sigma_data=sigma_data)
- raise ValueError('Unknown loss config type')
-
-
-def make_sample_density(config):
- sd_config = config['sigma_sample_density']
- sigma_data = config['sigma_data']
- if sd_config['type'] == 'lognormal':
- loc = sd_config['mean'] if 'mean' in sd_config else sd_config['loc']
- scale = sd_config['std'] if 'std' in sd_config else sd_config['scale']
- return partial(utils.rand_log_normal, loc=loc, scale=scale)
- if sd_config['type'] == 'loglogistic':
- loc = sd_config['loc'] if 'loc' in sd_config else math.log(sigma_data)
- scale = sd_config['scale'] if 'scale' in sd_config else 0.5
- min_value = sd_config['min_value'] if 'min_value' in sd_config else 0.
- max_value = sd_config['max_value'] if 'max_value' in sd_config else float('inf')
- return partial(utils.rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)
- if sd_config['type'] == 'loguniform':
- min_value = sd_config['min_value'] if 'min_value' in sd_config else config['sigma_min']
- max_value = sd_config['max_value'] if 'max_value' in sd_config else config['sigma_max']
- return partial(utils.rand_log_uniform, min_value=min_value, max_value=max_value)
- if sd_config['type'] in {'v-diffusion', 'cosine'}:
- min_value = sd_config['min_value'] if 'min_value' in sd_config else 1e-3
- max_value = sd_config['max_value'] if 'max_value' in sd_config else 1e3
- return partial(utils.rand_v_diffusion, sigma_data=sigma_data, min_value=min_value, max_value=max_value)
- if sd_config['type'] == 'split-lognormal':
- loc = sd_config['mean'] if 'mean' in sd_config else sd_config['loc']
- scale_1 = sd_config['std_1'] if 'std_1' in sd_config else sd_config['scale_1']
- scale_2 = sd_config['std_2'] if 'std_2' in sd_config else sd_config['scale_2']
- return partial(utils.rand_split_log_normal, loc=loc, scale_1=scale_1, scale_2=scale_2)
- raise ValueError('Unknown sample density type')
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/scalar/ops.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/scalar/ops.py
deleted file mode 100644
index c74f530380b393ffc53ecfb1398000079495772f..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/scalar/ops.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-
-
-def emulate_int(w, bits, method, scale=None, zero_point=None):
- q = globals()[f"emulate_int8_{method}"]
- return q(w, scale=scale, zero_point=zero_point, bits=bits)
-
-
-def quantize(w, scale, zero_point, bits=8):
- # In the default behavior, max_val = 255.
- max_val = 2 ** bits - 1
- return (
- torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point
- ) * scale
-
-
-def emulate_int8_histogram(w, scale=None, zero_point=None, bits=8):
- if scale is None:
- obs = torch.quantization.observer.HistogramObserver()
- obs.to(device=w.device)
- _ = obs(w.float())
- scale, zero_point = obs.calculate_qparams()
- scale = scale.cuda().type_as(w)
- zero_point = zero_point.cuda().type_as(w)
- return quantize(w, scale, zero_point, bits=bits), scale, zero_point
-
-
-def emulate_int8_channel(w, scale=None, zero_point=None, bits=8):
- if scale is None:
- obs = torch.quantization.observer.PerChannelMinMaxObserver(
- ch_axis=-1, qscheme=torch.per_channel_symmetric
- )
- obs.to(device=w.device)
- _ = obs(w)
- scale, zero_point, ch_axis = obs.get_qparams()
- scale = scale.cuda().type_as(w)
- zero_point = zero_point.cuda().type_as(w)
- return quantize(w, scale, zero_point, bits=bits), scale, zero_point
-
-
-def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8):
- if scale is None:
- obs = torch.quantization.observer.MinMaxObserver()
- obs.to(device=w.device)
- _ = obs(w)
- scale, zero_point = obs.calculate_qparams()
- scale = scale.cuda().type_as(w)
- zero_point = zero_point.cuda().type_as(w)
- return quantize(w, scale, zero_point, bits=bits), scale, zero_point
diff --git a/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/morph/unsupervised_morph.py b/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/morph/unsupervised_morph.py
deleted file mode 100644
index 55c70f13e0ff7d4e89726e6b9c7932649afdf068..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/morph/unsupervised_morph.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#
-# Copyright (c) 2013-present, Anoop Kunchukuttan
-# All rights reserved.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-#
-
-import codecs, sys, itertools,re,os
-import morfessor
-
-from functools import lru_cache
-
-from indicnlp import langinfo
-from indicnlp import common
-from indicnlp.tokenize import indic_tokenize
-
-# Unsupervised Morphological Analyser for Indian languages.
-#
-# @author Anoop Kunchukuttan
-#
-
-class MorphAnalyzerI(object):
- """
- Interface for Morph Analyzer
- """
-
- def morph_analyze(word):
- pass
-
- def morph_analyze_document(tokens):
- pass
-
-class UnsupervisedMorphAnalyzer(MorphAnalyzerI):
- """
- Unsupervised Morphological analyser built using Morfessor 2.0
- """
-
- def __init__(self,lang,add_marker=False):
- self.lang=lang
- self.add_marker=add_marker
-
- io = morfessor.MorfessorIO()
- self._morfessor_model=io.read_any_model(os.path.join(common.INDIC_RESOURCES_PATH,'morph','morfessor','{}.model'.format(lang)))
-
- self._script_range_pat=r'^[{}-{}]+$'.format(chr(langinfo.SCRIPT_RANGES[lang][0]),chr(langinfo.SCRIPT_RANGES[lang][1]))
- self._script_check_re=re.compile(self._script_range_pat)
-
- def _contains_number(self,text):
- if self.lang in langinfo.SCRIPT_RANGES:
- for c in text:
- offset=ord(c)-langinfo.SCRIPT_RANGES[self.lang][0]
- if offset >=langinfo.NUMERIC_OFFSET_START and offset <= langinfo.NUMERIC_OFFSET_END:
- return True
- return False
-
- def _morphanalysis_needed(self,word):
- return self._script_check_re.match(word) and not self._contains_number(word)
-
- @lru_cache(maxsize=16384)
- def morph_analyze(self,word):
- """
- Morphanalyzes a single word and returns a list of component morphemes
-
- @param word: string input word
- """
- m_list=[]
- if self._morphanalysis_needed(word):
- val=self._morfessor_model.viterbi_segment(word)
- m_list=val[0]
- if self.add_marker:
- m_list= [ '{}_S_'.format(m) if i>0 else '{}_R_'.format(m) for i,m in enumerate(m_list)]
- else:
- if self.add_marker:
- word='{}_E_'.format(word)
- m_list=[word]
- return m_list
-
- ### Older implementation
- #val=self._morfessor_model.viterbi_segment(word)
- #m_list=val[0]
- #if self.add_marker:
- # m_list= [ u'{}_S_'.format(m) if i>0 else u'{}_R_'.format(m) for i,m in enumerate(m_list)]
- #return m_list
-
-
- def morph_analyze_document(self,tokens):
- """
- Morphanalyzes a document, represented as a list of tokens
- Each word is morphanalyzed and result is a list of morphemes constituting the document
-
- @param tokens: string sequence of words
-
- @return list of segments in the document after morph analysis
- """
-
- out_tokens=[]
- for token in tokens:
- morphs=self.morph_analyze(token)
- out_tokens.extend(morphs)
- return out_tokens
-
- #### Older implementation
- #out_tokens=[]
- #for token in tokens:
- # if self._morphanalysis_needed(token):
- # morphs=self.morph_analyze(token)
- # out_tokens.extend(morphs)
- # else:
- # if self.add_marker:
- # token=u'{}_E_'.format(token)
- # out_tokens.append(token)
- #return out_tokens
-
-
-if __name__ == '__main__':
-
- if len(sys.argv)<4:
- print("Usage: python unsupervised_morph.py []")
- sys.exit(1)
-
- language=sys.argv[3]
- common.INDIC_RESOURCES_PATH=sys.argv[4]
-
- add_marker=False
-
- if len(sys.argv)==6:
- add_marker= True if sys.argv[5] == 'True' else False
-
- print('Loading morph analyser for ' + language)
- analyzer=UnsupervisedMorphAnalyzer(language,add_marker)
- print('Loaded morph analyser for ' + language)
-
- with codecs.open(sys.argv[1],'r','utf-8') as ifile:
- with codecs.open(sys.argv[2],'w','utf-8') as ofile:
- for line in ifile.readlines():
- line=line.strip()
- tokens=indic_tokenize.trivial_tokenize(line)
- morph_tokens=analyzer.morph_analyze_document(tokens)
- ofile.write(' '.join(morph_tokens))
- ofile.write('\n')
-
diff --git a/spaces/Harveenchadha/oiTrans/subword-nmt/subword_nmt/bpe_toy.py b/spaces/Harveenchadha/oiTrans/subword-nmt/subword_nmt/bpe_toy.py
deleted file mode 100644
index 0421b255861cb56eb40bf58a8225807cc396e968..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/oiTrans/subword-nmt/subword_nmt/bpe_toy.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Author: Rico Sennrich
-
-"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
-Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary
-of a text to a configurable number of symbols, with only a small increase in the number of tokens.
-This is an (inefficient) toy implementation that shows the algorithm. For processing large datasets,
-indexing and incremental updates can be used to speed up the implementation (see learn_bpe.py).
-
-Reference:
-Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
-Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
-"""
-
-
-import re
-import sys
-import collections
-
-def get_stats(vocab):
- pairs = collections.defaultdict(int)
- for word, freq in vocab.items():
- symbols = word.split()
- for i in range(len(symbols)-1):
- pairs[symbols[i],symbols[i+1]] += freq
- return pairs
-
-def merge_vocab(pair, v_in):
- v_out = {}
- bigram_pattern = re.escape(' '.join(pair))
- p = re.compile(r'(?' : 5, 'l o w e r' : 2,
- 'n e w e s t' : 6, 'w i d e s t' : 3}
-num_merges = 15
-for i in range(num_merges):
- pairs = get_stats(vocab)
- try:
- best = max(pairs, key=pairs.get)
- except ValueError:
- break
- if pairs[best] < 2:
- sys.stderr.write('no pair has frequency > 1. Stopping\n')
- break
- vocab = merge_vocab(best, vocab)
- print(best)
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/ipython_ext.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/ipython_ext.py
deleted file mode 100644
index d601c6365bf30925f535e5ecb8a7240ecc333bc0..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/ipython_ext.py
+++ /dev/null
@@ -1,17 +0,0 @@
-try:
- from IPython.core.magic import needs_local_scope, register_cell_magic
-except ImportError:
- pass
-
-import gradio
-
-
-def load_ipython_extension(ipython):
- __demo = gradio.Blocks()
-
- @register_cell_magic
- @needs_local_scope
- def blocks(line, cell, local_ns=None):
- with __demo.clear():
- exec(cell, None, local_ns)
- __demo.launch(quiet=True)
diff --git a/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/nodes/1.7a9a475b.js b/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/nodes/1.7a9a475b.js
deleted file mode 100644
index 2b0537ce2b77bf7fe2ba26613c57e2a4e7955608..0000000000000000000000000000000000000000
--- a/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/nodes/1.7a9a475b.js
+++ /dev/null
@@ -1 +0,0 @@
-import{S as x,i as H,s as S,k as u,q as h,a as g,l as d,m as v,r as b,h as m,c as k,b as _,G as E,u as $,H as q,I as y}from"../chunks/index.9af7eb9c.js";import{p as C}from"../chunks/stores.be116e24.js";function G(l){var f;let a,t=l[0].status+"",r,o,n,p=((f=l[0].error)==null?void 0:f.message)+"",c;return{c(){a=u("h1"),r=h(t),o=g(),n=u("p"),c=h(p)},l(e){a=d(e,"H1",{});var s=v(a);r=b(s,t),s.forEach(m),o=k(e),n=d(e,"P",{});var i=v(n);c=b(i,p),i.forEach(m)},m(e,s){_(e,a,s),E(a,r),_(e,o,s),_(e,n,s),E(n,c)},p(e,[s]){var i;s&1&&t!==(t=e[0].status+"")&&$(r,t),s&1&&p!==(p=((i=e[0].error)==null?void 0:i.message)+"")&&$(c,p)},i:q,o:q,d(e){e&&m(a),e&&m(o),e&&m(n)}}}function I(l,a,t){let r;return y(l,C,o=>t(0,r=o)),[r]}class w extends x{constructor(a){super(),H(this,a,I,G,S,{})}}export{w as component};
diff --git a/spaces/Hunter731/Unity3D-RTS/style.css b/spaces/Hunter731/Unity3D-RTS/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/Hunter731/Unity3D-RTS/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/utils/flask_rest_api/example_request.py b/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/utils/flask_rest_api/example_request.py
deleted file mode 100644
index 773ad893296750992789a77a59e0f5ad657d0e35..0000000000000000000000000000000000000000
--- a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/utils/flask_rest_api/example_request.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Perform test request
-"""
-
-import pprint
-
-import requests
-
-DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
-IMAGE = "zidane.jpg"
-
-# Read image
-with open(IMAGE, "rb") as f:
- image_data = f.read()
-
-response = requests.post(DETECTION_URL, files={"image": image_data}).json()
-
-pprint.pprint(response)
diff --git a/spaces/Illumotion/Koboldcpp/common/common.h b/spaces/Illumotion/Koboldcpp/common/common.h
deleted file mode 100644
index c802152791797c4e30ec5544be0effeea03ae136..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/common/common.h
+++ /dev/null
@@ -1,225 +0,0 @@
-// Various helper functions and utilities
-
-#pragma once
-
-#include "llama.h"
-
-#define LOG_NO_FILE_LINE_FUNCTION
-#include "log.h"
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifdef _WIN32
-#define DIRECTORY_SEPARATOR '\\'
-#else
-#define DIRECTORY_SEPARATOR '/'
-#endif // _WIN32
-
-#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
-#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
-
-#define print_build_info() do { \
- fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); \
- fprintf(stderr, "%s: built with %s for %s\n", __func__, BUILD_COMPILER, BUILD_TARGET); \
-} while(0)
-
-//
-// CLI argument parsing
-//
-int32_t get_num_physical_cores();
-
-struct gpt_params {
- uint32_t seed = -1; // RNG seed
- int32_t n_threads = get_num_physical_cores();
- int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
- int32_t n_predict = -1; // new tokens to predict
- int32_t n_ctx = 512; // context size
- int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
- int32_t n_keep = 0; // number of tokens to keep from initial prompt
- int32_t n_draft = 16; // number of tokens to draft during speculative decoding
- int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
- int32_t n_parallel = 1; // number of parallel sequences to decode
- int32_t n_sequences = 1; // number of sequences to decode
- int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
- int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
- int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
- float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
- int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
- int32_t n_beams = 0; // if non-zero then use beam search of given width.
- float rope_freq_base = 0.0f; // RoPE base frequency
- float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
-
- // sampling parameters
- int32_t top_k = 40; // <= 0 to use vocab size
- float top_p = 0.95f; // 1.0 = disabled
- float tfs_z = 1.00f; // 1.0 = disabled
- float typical_p = 1.00f; // 1.0 = disabled
- float temp = 0.80f; // 1.0 = disabled
- float repeat_penalty = 1.10f; // 1.0 = disabled
- int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
- float frequency_penalty = 0.00f; // 0.0 = disabled
- float presence_penalty = 0.00f; // 0.0 = disabled
- int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
- float mirostat_tau = 5.00f; // target entropy
- float mirostat_eta = 0.10f; // learning rate
-
- std::unordered_map logit_bias; // logit bias for specific tokens
-
- // Classifier-Free Guidance
- // https://arxiv.org/abs/2306.17806
- std::string cfg_negative_prompt; // string to help guidance
- float cfg_scale = 1.f; // How strong is guidance
-
- std::string model = "models/7B/ggml-model-f16.gguf"; // model path
- std::string model_draft = ""; // draft model for speculative decoding
- std::string model_alias = "unknown"; // model alias
- std::string prompt = "";
- std::string prompt_file = ""; // store the external prompt file name
- std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
- std::string input_prefix = ""; // string to prefix user inputs with
- std::string input_suffix = ""; // string to suffix user inputs with
- std::string grammar = ""; // optional BNF-like grammar to constrain sampling
- std::vector antiprompt; // string upon seeing which more user input is prompted
- std::string logdir = ""; // directory in which to save YAML log files
-
- std::vector> lora_adapter; // lora adapter path with user defined scale
- std::string lora_base = ""; // base model path for the lora adapter
-
- int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
- int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
- // (which is more convenient to use for plotting)
- //
- bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
- size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
-
- bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
- bool memory_f16 = true; // use f16 instead of f32 for memory kv
- bool random_prompt = false; // do not randomize prompt if none provided
- bool use_color = false; // use color to distinguish generations and inputs
- bool interactive = false; // interactive mode
- bool prompt_cache_all = false; // save user input and generations to prompt cache
- bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
-
- bool embedding = false; // get only sentence embedding
- bool escape = false; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
- bool interactive_first = false; // wait for user input immediately
- bool multiline_input = false; // reverse the usage of `\`
- bool simple_io = false; // improves compatibility with subprocesses and limited consoles
- bool cont_batching = false; // insert new sequences for decoding on-the-fly
-
- bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
- bool ignore_eos = false; // ignore generated EOS tokens
- bool instruct = false; // instruction mode (used for Alpaca models)
- bool penalize_nl = true; // consider newlines as a repeatable token
- bool logits_all = false; // return logits for all tokens in the batch
- bool use_mmap = true; // use mmap for faster loads
- bool use_mlock = false; // use mlock to keep model in memory
- bool numa = false; // attempt optimizations that help on some NUMA systems
- bool verbose_prompt = false; // print prompt tokens before generation
- bool infill = false; // use infill mode
-};
-
-bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
-
-void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
-
-std::string get_system_info(const gpt_params & params);
-
-std::string gpt_random_prompt(std::mt19937 & rng);
-
-void process_escapes(std::string& input);
-
-//
-// Model utils
-//
-
-std::tuple llama_init_from_gpt_params(gpt_params & params);
-struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & params);
-struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
-
-//
-// Vocab utils
-//
-
-// tokenizes a string into a vector of tokens
-// should work similar to Python's `tokenizer.encode`
-std::vector llama_tokenize(
- const struct llama_context * ctx,
- const std::string & text,
- bool add_bos);
-
-std::vector llama_tokenize(
- const struct llama_model * model,
- const std::string & text,
- bool add_bos);
-
-// tokenizes a token into a piece
-// should work similar to Python's `tokenizer.id_to_piece`
-std::string llama_token_to_piece(
- const struct llama_context * ctx,
- llama_token token);
-
-// TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function
-// that takes into account the tokenizer type and decides how to handle the leading space
-//
-// detokenizes a vector of tokens into a string
-// should work similar to Python's `tokenizer.decode`
-// removes the leading space from the first non-BOS token
-std::string llama_detokenize_spm(
- llama_context * ctx,
- const std::vector & tokens);
-
-// detokenizes a vector of tokens into a string
-// should work similar to Python's `tokenizer.decode`
-std::string llama_detokenize_bpe(
- llama_context * ctx,
- const std::vector & tokens);
-
-//
-// Sampling utils
-//
-
-// this is a common sampling function used across the examples for convenience
-// it can serve as a starting point for implementing your own sampling function
-//
-// required:
-// - ctx: context to use for sampling
-// - params: sampling parameters
-//
-// optional:
-// - ctx_guidance: context to use for classifier-free guidance, ignore if NULL
-// - grammar: grammar to use for sampling, ignore if NULL
-// - last_tokens: needed for repetition penalty, ignore if empty
-// - idx: sample from llama_get_logits_ith(ctx, idx)
-//
-// returns:
-// - token: sampled token
-// - candidates: vector of candidate tokens
-//
-llama_token llama_sample_token(
- struct llama_context * ctx,
- struct llama_context * ctx_guidance,
- struct llama_grammar * grammar,
- const struct gpt_params & params,
- const std::vector & last_tokens,
- std::vector & candidates,
- int idx = 0);
-
-//
-// YAML utils
-//
-
-bool create_directory_with_parents(const std::string & path);
-void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector & data);
-void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector & data);
-void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
-std::string get_sortable_timestamp();
-
-void dump_non_result_info_yaml(
- FILE * stream, const gpt_params & params, const llama_context * lctx,
- const std::string & timestamp, const std::vector & prompt_tokens, const char * model_desc);
diff --git a/spaces/Illumotion/Koboldcpp/otherarch/ggml_v2-opencl-legacy.c b/spaces/Illumotion/Koboldcpp/otherarch/ggml_v2-opencl-legacy.c
deleted file mode 100644
index 50ade6de38b8fe695f661c26d0ea0efbe6125673..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/otherarch/ggml_v2-opencl-legacy.c
+++ /dev/null
@@ -1,427 +0,0 @@
-#include "ggml_v2-opencl-legacy.h"
-
-#define CL_TARGET_OPENCL_VERSION 110
-#include
-
-#include
-#include
-#include
-
-#include "ggml_v2.h"
-
-#define MULTILINE_QUOTE(...) #__VA_ARGS__
-const char * clblast_dequant_legacy = MULTILINE_QUOTE(
-
-struct block_q4_0
-{
- float d;
- uchar qs[16];
-};
-
-__kernel void dequantize_row_q4_0(__global struct block_q4_0* blocks, __global float* result) {
- const uint i = get_global_id(0) / 32;
- const uint l = get_local_id(0);
-
- const float d = blocks[i].d;
-
- const uchar vi = blocks[i].qs[l];
-
- const uint index = i*32 + l*2;
- result[index + 0] = ((vi & 0xf) - 8)*d;
- result[index + 1] = ((vi >> 4) - 8)*d;
-}
-
-struct block_q4_1
-{
- float d;
- float m;
- uchar qs[16];
-};
-
-__kernel void dequantize_row_q4_1(__global struct block_q4_1* blocks, __global float* result) {
- const uint i = get_global_id(0) / 32;
- const uint l = get_local_id(0);
-
- const float d = blocks[i].d;
- const float m = blocks[i].m;
-
- const uchar vi = blocks[i].qs[l];
-
- const uint index = i*32 + l*2;
- result[index + 0] = (vi & 0xf) * d + m;
- result[index + 1] = (vi >> 4) * d + m;
-}
-
-struct block_q4_2
-{
- ushort d;
- uchar qs[8];
-};
-
-__kernel void dequantize_row_q4_2(__global struct block_q4_2* blocks, __global float* result) {
- const uint i = get_global_id(0) / 16;
- const uint l = get_local_id(0);
-
- const float d = vload_half(0, (__global half*) &blocks[i].d);
-
- const uchar vi = blocks[i].qs[l];
-
- const uint index = i*16 + l*2;
- result[index + 0] = ((vi & 0xf) - 8)*d;
- result[index + 1] = ((vi >> 4) - 8)*d;
-}
-
-struct block_q4_3
-{
- ushort d;
- ushort m;
- uchar qs[8];
-};
-
-__kernel void dequantize_row_q4_3(__global struct block_q4_3* blocks, __global float* result) {
- const uint i = get_global_id(0) / 16;
- const uint l = get_local_id(0);
-
- const float d = vload_half(0, (__global half*) &(blocks[i].d));
- const float m = vload_half(0, (__global half*) &(blocks[i].m));
-
- const uchar vi = blocks[i].qs[l];
-
- const uint index = i*16 + l*2;
- result[index + 0] = (vi & 0xf) * d + m;
- result[index + 1] = (vi >> 4) * d + m;
-}
-
-struct block_q5_0
-{
- float d;
- uint qh;
- uchar qs[16];
-};
-
-__kernel void dequantize_row_q5_0(__global struct block_q5_0* blocks, __global float* result) {
- const uint i = get_global_id(0) / 32;
- const uint l = get_local_id(0);
-
- const float d = blocks[i].d;
-
- const uchar vi = blocks[i].qs[l];
-
- const uint l2 = l * 2;
-
- const uchar vh0 = ((blocks[i].qh & (1 << (l2 + 0))) >> (l2 + 0)) << 4;
- const uchar vh1 = ((blocks[i].qh & (1 << (l2 + 1))) >> (l2 + 1)) << 4;
-
- const uint index = i*32 + l2;
- result[index + 0] = (((vi & 0xf) | vh0) - 16)*d;
- result[index + 1] = (((vi >> 4) | vh1) - 16)*d;
-}
-
-struct block_q5_1
-{
- ushort d;
- ushort m;
- uint qh;
- uchar qs[16];
-};
-
-__kernel void dequantize_row_q5_1(__global struct block_q5_1* blocks, __global float* result) {
- const uint i = get_global_id(0) / 32;
- const uint l = get_local_id(0);
-
- const float d = vload_half(0, (__global half*) &blocks[i].d);
- const float m = vload_half(0, (__global half*) &blocks[i].m);
-
- const uchar vi = blocks[i].qs[l];
-
- const uint l2 = l * 2;
-
- const uchar vh0 = ((blocks[i].qh & (1 << (l2 + 0))) >> (l2 + 0)) << 4;
- const uchar vh1 = ((blocks[i].qh & (1 << (l2 + 1))) >> (l2 + 1)) << 4;
-
- const uint index = i*32 + l2;
- result[index + 0] = ((vi & 0xf) | vh0)*d + m;
- result[index + 1] = ((vi >> 4) | vh1)*d + m;
-}
-
-struct block_q8_0
-{
- float d;
- char qs[32];
-};
-
-__kernel void dequantize_row_q8_0(__global struct block_q8_0* blocks, __global float* result) {
- const uint i = get_global_id(0) / 32;
- const uint l = get_local_id(0);
-
- result[i*32 + l] = blocks[i].qs[l] * blocks[i].d;
-}
-
-);
-
-#define CL_CHECK(err, name) \
- do { \
- cl_int err_ = (err); \
- if (err_ != CL_SUCCESS) { \
- fprintf(stderr, "OpenCL %s error %d at %s:%d\n", name, err_, __FILE__, __LINE__); \
- fprintf(stderr, "You may be out of VRAM. Please check if you have enough.\n"); \
- exit(1); \
- } \
- } while (0)
-
-#define QK5_0 32
-typedef struct {
- ggml_v2_fp16_t d; // delta
- uint8_t qh[4]; // 5-th bit of quants
- uint8_t qs[QK5_0 / 2]; // nibbles / quants
-} block_q5_0;
-
-
-typedef struct {
- float d; // delta
- uint32_t qh; // 5-th bit of quants
- uint8_t qs[QK5_0 / 2]; // nibbles / quants
-} cl_block_q5_0;
-
-static cl_platform_id platform;
-static cl_device_id device;
-static cl_context context;
-static cl_command_queue queue;
-static cl_program program;
-static cl_kernel kernel_q4_0, kernel_q4_1, kernel_q4_2, kernel_q4_3, kernel_q5_0, kernel_q5_1, kernel_q8_0;
-static cl_mem cl_buffer_a, cl_buffer_qb, cl_buffer_b, cl_buffer_c;
-static size_t cl_size_a = 0, cl_size_qb = 0, cl_size_b = 0, cl_size_c = 0;
-
-static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, const char* program_buffer) {
- cl_program p;
- char *program_log;
- size_t program_size, log_size;
- int err;
-
- program_size = strlen(program_buffer);
-
- p = clCreateProgramWithSource(ctx, 1, (const char**)&program_buffer, &program_size, &err);
- if(err < 0) {
- fprintf(stderr, "OpenCL error creating program");
- exit(1);
- }
-
- err = clBuildProgram(p, 0, NULL, NULL, NULL, NULL);
- if(err < 0) {
-
- clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
- program_log = (char*) malloc(log_size + 1);
- program_log[log_size] = '\0';
- clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL);
- printf("%s\n", program_log);
- free(program_log);
- exit(1);
- }
-
- return p;
-}
-
-void ggml_v2_cl_init_legacy(void) {
- cl_int err = 0;
- char * GGML_V2_CLBLAST_PLATFORM = getenv("GGML_OPENCL_PLATFORM");
- char * GGML_V2_CLBLAST_DEVICE = getenv("GGML_OPENCL_DEVICE");
- int plat_num = (GGML_V2_CLBLAST_PLATFORM == NULL ? 0 : atoi(GGML_V2_CLBLAST_PLATFORM));
- int dev_num = (GGML_V2_CLBLAST_DEVICE == NULL ? 0 : atoi(GGML_V2_CLBLAST_DEVICE));
- printf("\nInitializing LEGACY CLBlast (First Run)...");
- printf("\nAttempting to use: Platform=%d, Device=%d (If invalid, program will crash)\n",plat_num,dev_num);
- cl_uint num_platforms;
- clGetPlatformIDs(0, NULL, &num_platforms);
- cl_platform_id* platforms = (cl_platform_id*)malloc(num_platforms*sizeof(cl_platform_id));
- clGetPlatformIDs(num_platforms, platforms, NULL);
- platform = platforms[plat_num];
- char platform_buffer[1024];
- clGetPlatformInfo(platform, CL_PLATFORM_NAME, sizeof(platform_buffer), &platform_buffer, NULL);
- cl_uint num_devices;
- clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 0, NULL, &num_devices);
- cl_device_id* devices = (cl_device_id*)malloc(num_devices*sizeof(cl_device_id));
- clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, num_devices, devices, NULL);
- device = devices[dev_num];
- char device_buffer[1024];
- clGetDeviceInfo(device, CL_DEVICE_NAME, sizeof(device_buffer), &device_buffer, NULL);
- printf("Using Platform: %s Device: %s\n", platform_buffer, device_buffer);
- context = clCreateContext(NULL, 1, &device, NULL, NULL, &err);
- CL_CHECK(err, "clCreateContext");
- queue = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &err);
- CL_CHECK(err, "clCreateCommandQueue");
-
- free(platforms);
- free(devices);
-
- program = build_program_from_source(context, device, clblast_dequant_legacy);
-
- // Prepare dequantize kernels
- kernel_q4_0 = clCreateKernel(program, "dequantize_row_q4_0", &err);
- CL_CHECK(err, "clCreateKernel");
- kernel_q4_1 = clCreateKernel(program, "dequantize_row_q4_1", &err);
- CL_CHECK(err, "clCreateKernel");
- kernel_q4_2 = clCreateKernel(program, "dequantize_row_q4_2", &err);
- CL_CHECK(err, "clCreateKernel");
- kernel_q4_3 = clCreateKernel(program, "dequantize_row_q4_3", &err);
- CL_CHECK(err, "clCreateKernel");
- kernel_q5_0 = clCreateKernel(program, "dequantize_row_q5_0", &err);
- CL_CHECK(err, "clCreateKernel");
- kernel_q5_1 = clCreateKernel(program, "dequantize_row_q5_1", &err);
- CL_CHECK(err, "clCreateKernel");
- kernel_q8_0 = clCreateKernel(program, "dequantize_row_q8_0", &err);
- CL_CHECK(err, "clCreateKernel");
-}
-
-static void ggml_v2_cl_malloc(size_t req_size, size_t* cur_size, cl_mem_flags flags, cl_mem* buf) {
- if (req_size <= *cur_size) {
- return;
- }
-
- // Reallocate buffer with enough space
- if (*cur_size > 0) {
- clReleaseMemObject(*buf);
- }
- cl_int err;
- *buf = clCreateBuffer(context, flags, req_size, NULL, &err);
- *cur_size = req_size;
- CL_CHECK(err, "clCreateBuffer");
-}
-
-void ggml_v2_cl_sgemm_wrapper_legacy(
- const enum ggml_v2_blas_order order, const enum ggml_v2_blas_op trans_a, const enum ggml_v2_blas_op trans_b,
- const int m, const int n, const int k,
- const float alpha, const void *host_a, const int lda,
- const float *host_b, const int ldb, const float beta,
- float *host_c, const int ldc, const int btype) {
- cl_int err = 0;
-
- cl_kernel kernel;
- size_t global = n * k, local, size_qb;
- bool dequant;
- cl_block_q5_0* cl_host_b;
-
- switch (btype) {
- case GGML_V2_TYPE_F32:
- dequant = false;
- break;
- case GGML_V2_TYPE_Q4_0:
- dequant = true;
- kernel = kernel_q4_0;
- local = 16;
- size_qb = global * (sizeof(float) + local) / 32;
- break;
- case GGML_V2_TYPE_Q4_1:
- dequant = true;
- kernel = kernel_q4_1;
- local = 16;
- size_qb = global * (sizeof(float) * 2 + local) / 32;
- break;
- case GGML_V2_TYPE_Q4_2:
- dequant = true;
- kernel = kernel_q4_2;
- local = 8;
- size_qb = global * (sizeof(ggml_v2_fp16_t) + local) / 16;
- break;
- case GGML_V2_TYPE_Q4_3:
- dequant = true;
- kernel = kernel_q4_3;
- local = 8;
- size_qb = global * (sizeof(short) * 2 + local) / 16;
- break;
- case GGML_V2_TYPE_Q5_0:
- dequant = true;
- kernel = kernel_q5_0;
- local = 16;
- // For some reason OpenCL seems to be incapable of working with structs of size 22.
- // 20 and 24 bytes are fine. Workaround to do the fp16 to fp32 step on CPU...
- // TODO Find the reason, fix and remove workaround.
- const block_q5_0* b = (const block_q5_0*) host_b;
- cl_host_b = (cl_block_q5_0*) malloc(sizeof(cl_block_q5_0) * global / 32);
- for (size_t i = 0; i < global / 32; i++) {
- cl_host_b[i].d = ggml_v2_fp16_to_fp32(b[i].d);
- memcpy(&cl_host_b[i].qh, b[i].qh, sizeof(uint32_t));
- memcpy(&cl_host_b[i].qs, b[i].qs, QK5_0 / 2);
- }
- host_b = (const float*) cl_host_b;
- size_qb = global * (sizeof(float) + sizeof(uint32_t) + local) / 32;
- break;
- case GGML_V2_TYPE_Q5_1:
- dequant = true;
- kernel = kernel_q5_1;
- local = 16;
- size_qb = global * (sizeof(ggml_v2_fp16_t) * 2 + sizeof(uint32_t) + local) / 32;
- break;
- case GGML_V2_TYPE_Q8_0:
- dequant = true;
- kernel = kernel_q8_0;
- local = 32;
- size_qb = global * (sizeof(float) + local) / 32;
- break;
- default:
- fprintf(stderr, "Error: Unsupported OpenCL btype %d\n", btype);
- abort();
- }
-
- const size_t size_a = m * k * sizeof(float);
- const size_t size_b = n * k * sizeof(float);
- const size_t size_c = m * n * sizeof(float);
-
- // Prepare buffers
- ggml_v2_cl_malloc(size_a, &cl_size_a, CL_MEM_READ_ONLY, &cl_buffer_a);
- if (dequant) {
- ggml_v2_cl_malloc(size_qb, &cl_size_qb, CL_MEM_READ_ONLY, &cl_buffer_qb);
- }
- ggml_v2_cl_malloc(size_b, &cl_size_b, CL_MEM_READ_WRITE, &cl_buffer_b);
- ggml_v2_cl_malloc(size_c, &cl_size_c, CL_MEM_WRITE_ONLY, &cl_buffer_c);
-
- cl_event ev_a, ev_qb, ev_b;
-
- if (dequant) {
- err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &cl_buffer_qb);
- err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &cl_buffer_b);
- CL_CHECK(err, "clSetKernelArg");
- err = clEnqueueWriteBuffer(queue, cl_buffer_qb, CL_FALSE, 0, size_qb, host_b, 0, NULL, &ev_qb);
- CL_CHECK(err, "clEnqueueWriteBuffer qb");
- } else {
- err = clEnqueueWriteBuffer(queue, cl_buffer_b, CL_FALSE, 0, size_b, host_b, 0, NULL, &ev_b);
- CL_CHECK(err, "clEnqueueWriteBuffer b");
- }
-
- err = clEnqueueWriteBuffer(queue, cl_buffer_a, CL_FALSE, 0, size_a, host_a, 0, NULL, &ev_a);
- CL_CHECK(err, "clEnqueueWriteBuffer a");
- if (dequant) {
- err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global, &local, 1, &ev_qb, &ev_b);
- CL_CHECK(err, "clEnqueueNDRangeKernel");
- clReleaseEvent(ev_qb);
- }
- clWaitForEvents(1, &ev_a);
- clWaitForEvents(1, &ev_b);
- clReleaseEvent(ev_a);
- clReleaseEvent(ev_b);
-
- cl_event ev_sgemm;
- CLBlastStatusCode status = CLBlastSgemm((CLBlastLayout)order,
- (CLBlastTranspose)trans_a, (CLBlastTranspose)trans_b,
- m, n, k,
- alpha,
- cl_buffer_a, 0, lda,
- cl_buffer_b, 0, ldb,
- beta,
- cl_buffer_c, 0, ldc,
- &queue, &ev_sgemm);
-
- if (status != CLBlastSuccess) {
- fprintf(stderr, "Error: CLBlast SGEMM %d\n", status);
- abort();
- }
-
- cl_event ev_c;
- clEnqueueReadBuffer(queue, cl_buffer_c, CL_TRUE, 0, size_c, host_c, 1, &ev_sgemm, &ev_c);
-
- // Wait for completion
- clWaitForEvents(1, &ev_c);
- clReleaseEvent(ev_sgemm);
- clReleaseEvent(ev_c);
- if (btype == GGML_V2_TYPE_Q5_0) {
- free((void*) cl_host_b);
- }
-}
diff --git a/spaces/JFoz/dog-controlnet/app.py b/spaces/JFoz/dog-controlnet/app.py
deleted file mode 100644
index 9fc05070bde7fc38b8068d3ab57baf342db6b395..0000000000000000000000000000000000000000
--- a/spaces/JFoz/dog-controlnet/app.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import gradio as gr
-import jax
-import jax.numpy as jnp
-import numpy as np
-from flax.jax_utils import replicate
-from flax.training.common_utils import shard
-from PIL import Image
-from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel
-import cv2
-
-def create_key(seed=0):
- return jax.random.PRNGKey(seed)
-
-controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
- "JFoz/dog-cat-pose", dtype=jnp.bfloat16
-)
-pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
- "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.bfloat16
-)
-
-def infer(prompts, negative_prompts, image):
- params["controlnet"] = controlnet_params
-
- num_samples = 1 #jax.device_count()
- rng = create_key(0)
- rng = jax.random.split(rng, jax.device_count())
- im = image
- image = Image.fromarray(im)
-
- prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
- negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)
- processed_image = pipe.prepare_image_inputs([image] * num_samples)
-
- p_params = replicate(params)
- prompt_ids = shard(prompt_ids)
- negative_prompt_ids = shard(negative_prompt_ids)
- processed_image = shard(processed_image)
-
- output = pipe(
- prompt_ids=prompt_ids,
- image=processed_image,
- params=p_params,
- prng_seed=rng,
- num_inference_steps=50,
- neg_prompt_ids=negative_prompt_ids,
- jit=True,
- ).images
-
- output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
- return output_images
-
-#gr.Interface(infer, inputs=["text", "text", "image"], outputs="gallery").launch()
-
-title = "Animal Pose Control Net"
-description = "This is a demo of Animal Pose ControlNet, which is a model trained on runwayml/stable-diffusion-v1-5 with new type of conditioning."
-
-#with gr.Blocks(theme=gr.themes.Default(font=[gr.themes.GoogleFont("Inconsolata"), "Arial", "sans-serif"])) as demo:
- #gr.Markdown(
- # """
- # Animal Pose Control Net
- # This is a demo of Animal Pose Control Net, which is a model trained on runwayml/stable-diffusion-v1-5 with new type of conditioning.
- #""")
-
-theme = gr.themes.Default(primary_hue="green").set(
- button_primary_background_fill="*primary_200",
- button_primary_background_fill_hover="*primary_300",
-)
-
-gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "gallery",
- title = title, description = description, theme='gradio/soft',
- examples=[["a Labrador crossing the road", "low quality", "pose_256.jpg"]]
-).launch()
-
-
-gr.Markdown(
- """
- * [Dataset](https://huggingface.co/datasets/JFoz/dog-poses-controlnet-dataset)
- * [Diffusers model](), [Web UI model](https://huggingface.co/JFoz/dog-pose)
- * [Training Report](https://wandb.ai/john-fozard/dog-cat-pose/runs/kmwcvae5))
- """)
diff --git a/spaces/Jamkonams/AutoGPT/autogpt/memory/no_memory.py b/spaces/Jamkonams/AutoGPT/autogpt/memory/no_memory.py
deleted file mode 100644
index 0371e96ae89f5eb88dae019a66351a229596ed7a..0000000000000000000000000000000000000000
--- a/spaces/Jamkonams/AutoGPT/autogpt/memory/no_memory.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""A class that does not store any data. This is the default memory provider."""
-from __future__ import annotations
-
-from typing import Any
-
-from autogpt.memory.base import MemoryProviderSingleton
-
-
-class NoMemory(MemoryProviderSingleton):
- """
- A class that does not store any data. This is the default memory provider.
- """
-
- def __init__(self, cfg):
- """
- Initializes the NoMemory provider.
-
- Args:
- cfg: The config object.
-
- Returns: None
- """
- pass
-
- def add(self, data: str) -> str:
- """
- Adds a data point to the memory. No action is taken in NoMemory.
-
- Args:
- data: The data to add.
-
- Returns: An empty string.
- """
- return ""
-
- def get(self, data: str) -> list[Any] | None:
- """
- Gets the data from the memory that is most relevant to the given data.
- NoMemory always returns None.
-
- Args:
- data: The data to compare to.
-
- Returns: None
- """
- return None
-
- def clear(self) -> str:
- """
- Clears the memory. No action is taken in NoMemory.
-
- Returns: An empty string.
- """
- return ""
-
- def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
- """
- Returns all the data in the memory that is relevant to the given data.
- NoMemory always returns None.
-
- Args:
- data: The data to compare to.
- num_relevant: The number of relevant data to return.
-
- Returns: None
- """
- return None
-
- def get_stats(self):
- """
- Returns: An empty dictionary as there are no stats in NoMemory.
- """
- return {}
diff --git a/spaces/JcRolling/cartoon-converter/README.md b/spaces/JcRolling/cartoon-converter/README.md
deleted file mode 100644
index ad757e8c29a07a7915e1a62f7ea5836edf77468e..0000000000000000000000000000000000000000
--- a/spaces/JcRolling/cartoon-converter/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: cartoon-converter
-emoji: 😁
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-author: xin chen
-duplicated_from: JcRolling/AnimeGANv3
----
\ No newline at end of file
diff --git a/spaces/Joeythemonster/prompt-extend/app.py b/spaces/Joeythemonster/prompt-extend/app.py
deleted file mode 100644
index 48a29d9619c45a3a223944fb144e3e9f76af907e..0000000000000000000000000000000000000000
--- a/spaces/Joeythemonster/prompt-extend/app.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from transformers import pipeline
-import gradio as gr
-
-pipe = pipeline('text-generation', model='daspartho/prompt-extend')
-
-def extend_prompt(prompt):
- return pipe(prompt+',', num_return_sequences=1)[0]["generated_text"]
-
-examples = [
- ['elon musk as thor'],
- ["giant dragon flying in the sky"],
- ['psychedelic liquids space'],
- ["a coconut laying on the beach"],
- ["peaceful village landscape"],
- ]
-
-iface = gr.Interface(
- description = "Enter a main idea for a prompt, and the model will attempt to add suitable style cues.",
- article = "Github
",
- fn=extend_prompt,
- inputs=gr.Text(label="Type the prompt here"),
- outputs=gr.TextArea(label='Extended prompt'),
- examples=examples,
- title="Prompt Extend"
- )
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Justin-Choo/Diffusion50XX/app.py b/spaces/Justin-Choo/Diffusion50XX/app.py
deleted file mode 100644
index 7e64c438002c341713616d7115b3cd2c7f7d5d01..0000000000000000000000000000000000000000
--- a/spaces/Justin-Choo/Diffusion50XX/app.py
+++ /dev/null
@@ -1,297 +0,0 @@
-import gradio as gr
-import os
-import sys
-from pathlib import Path
-
-models = [
- {"name": "7 Pa", "url": "AIARTCHAN/7pa"},
- {"name": "A Certain Model", "url": "JosephusCheung/ACertainModel"},
- {"name": "A Certain Thing", "url": "JosephusCheung/ACertainThing"},
- {"name": "A Certainity", "url": "JosephusCheung/ACertainty"},
- {"name": "Abyss Hell Hero", "url": "AIARTCHAN/AbyssHellHero"},
- {"name": "Abyss Maple 3", "url": "AIARTCHAN/AbyssMapleVer3"},
- {"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"},
- {"name": "Abyss Orange Mix", "url": "WarriorMama777/AbyssOrangeMix"},
- {"name": "AbyssHell 3", "url": "AIARTCHAN/AbyssHellVer3"},
- {"name": "Alice in Diffusion Land", "url": "Guizmus/SDArt_AliceInDiffusionLand"},
- {"name": "All 526 Animated", "url": "stablediffusionapi/all-526-animated"},
- {"name": "All 526", "url": "stablediffusionapi/all-526"},
- {"name": "Alloy Mix", "url": "p1atdev/alloy-mix"},
- {"name": "Alt Clip", "url": "BAAI/AltCLIP"},
- {"name": "Ambient Mix", "url": "OedoSoldier/ambientmix"},
- {"name": "AmiIReal", "url": "stablediffusionapi/amireal"},
- {"name": "Analog Diffusion", "url": "wavymulder/Analog-Diffusion"},
- {"name": "Anidosmix 3", "url": "AIARTCHAN/anidosmixV2"},
- {"name": "Anime Kawai Diffusion", "url": "Ojimi/anime-kawai-diffusion"},
- {"name": "Anime Screencap", "url": "Conflictx/AnimeScreencap"},
- {"name": "Animix", "url": "OedoSoldier/animix"},
- {"name": "Anireal 3D V2", "url": "circulus/sd-anireal-3d-v2"},
- {"name": "Any Pastel", "url": "m4gnett/any-pastel"},
- {"name": "AnyLORA", "url": "kubanemil/AnyLORA"},
- {"name": "Anything 2.1", "url": "swl-models/anything-v2.1"},
- {"name": "Anything 3.0 Light", "url": "mm00/anything-v3.0-light"},
- {"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"},
- {"name": "Anything 3.1", "url": "cag/anything-v3-1"},
- {"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"},
- {"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"},
- {"name": "Anything Else 4", "url": "stablediffusionapi/anythingelse-v4"},
- {"name": "Anything Else 5", "url": "stablediffusionapi/anything-v5"},
- {"name": "Anything Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"},
- {"name": "Arcane Diffusion", "url": "nitrosocke/Arcane-Diffusion"},
- {"name": "Archer Diffusion", "url": "nitrosocke/archer-diffusion"},
- {"name": "Asian Mix", "url": "D1b4l4p/AsianMix"},
- {"name": "Aurora", "url": "SweetLuna/Aurora"},
- {"name": "Balloon Art", "url": "Fictiverse/Stable_Diffusion_BalloonArt_Model"},
- {"name": "Blood Orange Mix", "url": "WarriorMama777/BloodOrangeMix"},
- {"name": "Blue pencil", "url": "bluepen5805/blue_pencil"},
- {"name": "CamelliaMix 2.5D","url": "stablediffusionapi/camelliamix25d"},
- {"name": "CamelliaMix Line","url": "stablediffusionapi/camelliamixline"},
- {"name": "Candy Punk", "url": "Conflictx/CandyPunk"},
- {"name": "Cetusmix", "url": "stablediffusionapi/cetusmix"},
- {"name": "CGI Animation", "url": "Conflictx/CGI_Animation"},
- {"name": "Chaos and Order", "url": "Guizmus/SDArt_ChaosAndOrder768"},
- {"name": "Chem Punk", "url": "Conflictx/Chempunk"},
- {"name": "Chikmix", "url": "stablediffusionapi/chikmix"},
- {"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"},
- {"name": "Chilloutclara", "url": "Fred99774/chilloutvlara"},
- {"name": "Circulus 2.8", "url": "circulus/sd-photoreal-v2.8"},
- {"name": "Circulus Photoreal V2", "url": "circulus/sd-photoreal-real-v2"},
- {"name": "Circulus Semi Real 2", "url": "circulus/sd-photoreal-semi-v2"},
- {"name": "Clam", "url": "Johnhex/Clam"},
- {"name": "Clarity", "url": "Schisim/Clarity"},
- {"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"},
- {"name": "Claudfuen 1", "url": "claudfuen/photorealistic-fuen-v1"},
- {"name": "Collage Diffusion", "url": "wavymulder/collage-diffusion"},
- {"name": "Colorful 4.5", "url": "Manseo/Colorful-v4.5-Plus"},
- {"name": "Comic Diffusion", "url": "ogkalu/Comic-Diffusion"},
- {"name": "Complex-Lineart", "url": "Conflictx/Complex-Lineart"},
- {"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"},
- {"name": "Cool Japan Diffusion", "url": "aipicasso/cool-japan-diffusion-2-1-2"},
- {"name": "Cosmic Babes", "url": "stablediffusionapi/cosmic-babes"},
- {"name": "Cosmic Horros 768", "url": "Guizmus/SDArt_cosmichorrors768"},
- {"name": "Cosmic Horros", "url": "Guizmus/SDArt_cosmichorrors"},
- {"name": "Counterfeit 1.0", "url": "gsdf/counterfeit-v1.0"},
- {"name": "Counterfeit 2", "url": "gsdf/Counterfeit-V2.0"},
- {"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"},
- {"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"},
- {"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"},
- {"name": "Cyberrealistic", "url": "stablediffusionapi/cyberrealistic"},
- {"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"},
- {"name": "Dash Sushi 25d", "url": "stablediffusionapi/dark-sushi-25d"},
- {"name": "Deliberate", "url": "Masagin/Deliberate"},
- {"name": "DGSpitzer", "url": "DGSpitzer/DGSpitzer-Art-Diffusion"},
- {"name": "Double Exposure Diffusion", "url": "joachimsallstrom/Double-Exposure-Diffusion"},
- {"name": "Dreamful 2", "url": "Hius/DreamFul-V2"},
- {"name": "DreamAnything", "url": "Yntec/DreamAnything"},
- {"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"},
- {"name": "Dreamlike Art", "url": "dreamlike-art/dreamlike-diffusion-1.0"},
- {"name": "Dreamlike Diffusion", "url": "dreamlike-art/dreamlike-diffusion-1.0"},
- {"name": "Dreamlike Photoreal", "url": "dreamlike-art/dreamlike-photoreal-2.0"},
- {"name": "Dreamshaper", "url": "Lykon/DreamShaper"},
- {"name": "DucHaiten Anime", "url": "DucHaiten/DucHaitenAnime"},
- {"name": "DucHaiten Art", "url": "DucHaiten/DucHaitenAIart"},
- {"name": "DucHaiten ClassicAnime", "url": "DucHaiten/DH_ClassicAnime"},
- {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"},
- {"name": "DucHaiten Journey", "url": "DucHaiten/DucHaitenJourney"},
- {"name": "DucHaiten StyleLikeMe", "url": "DucHaiten/DucHaiten-StyleLikeMe"},
- {"name": "DucHaiten SuperCute", "url": "DucHaiten/DucHaitenSuperCute"},
- {"name": "Dungeons and Diffusion", "url": "0xJustin/Dungeons-and-Diffusion"},
- {"name": "Eerie Orange Mix", "url": "WarriorMama777/EerieOrangeMix"},
- {"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"},
- {"name": "Elden Ring", "url": "nitrosocke/elden-ring-diffusion"},
- {"name": "Epic Diffusion 1.1", "url": "johnslegers/epic-diffusion-v1.1"},
- {"name": "Epic Diffusion", "url": "johnslegers/epic-diffusion"},
- {"name": "EpicMix Realism", "url": "Duskfallcrew/EpicMix_Realism"},
- {"name": "FantaStel", "url": "0RisingStar0/FantaStel"},
- {"name": "Fantasy Mix", "url": "theintuitiveye/FantasyMix"},
- {"name": "FeverDream", "url": "Cosmo-Hug/FeverDream"},
- {"name": "Fluid Art", "url": "Fictiverse/Stable_Diffusion_FluidArt_Model"},
- {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
- {"name": "Gakki Mix 768", "url": "Sa1i/gakki-mix-768"},
- {"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"},
- {"name": "Girl New 1", "url": "Fred99774/girlnew1"},
- {"name": "Gorynich Mix", "url": "JackAnon/GorynichMix"},
- {"name": "GorynichMix", "url": "JackAnon/GorynichMix"},
- {"name": "GrapeFruit", "url": "iZELX1/Grapefruit"},
- {"name": "Grapefruit", "url": "stablediffusionapi/grapefruit-hentai-mo"},
- {"name": "Grimoeresigils", "url": "ECarbenia/grimoiresigils"},
- {"name": "GTA5 Artwork Diffusion", "url": "ItsJayQz/GTA5_Artwork_Diffusion"},
- {"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"},
- {"name": "HARDBlend", "url": "theintuitiveye/HARDblend"},
- {"name": "HassanBlend 1.4", "url": "hassanblend/hassanblend1.4"},
- {"name": "HassanBlend 1.5.1.2", "url": "hassanblend/HassanBlend1.5.1.2"},
- {"name": "High Rise Mix 1", "url": "0RisingStar0/HighRiseMixV1"},
- {"name": "High Rise Mix 2", "url": "0RisingStar0/HighRiseMixV2"},
- {"name": "HimawariMixs", "url": "natsusakiyomi/HimawariMixs"},
- {"name": "Hrrzg Style 768px", "url": "TheLastBen/hrrzg-style-768px"},
- {"name": "Icomix 2", "url": "stablediffusionapi/icomix-2"},
- {"name": "InkPunk Diffusion", "url": "Envvi/Inkpunk-Diffusion"},
- {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
- {"name": "Kipaki Egyptian Sci Fi", "url": "Conflictx/Kipaki-EgyptianSciFi"},
- {"name": "Lit 6B", "url": "hakurei/lit-6B"},
- {"name": "LiveArca Mix", "url": "0RisingStar0/LiveArcaMix"},
- {"name": "Lomo Diffusion", "url": "wavymulder/lomo-diffusion"},
- {"name": "Luna Diffusion", "url": "proximasanfinetuning/luna-diffusion"},
- {"name": "Majic Mix Fantasy", "url": "stablediffusionapi/majicmixfantasy"},
- {"name": "Majic Mix Realistic", "url": "stablediffusionapi/majicmixrealistic"},
- {"name": "Majic Mix Sombre", "url": "stablediffusionapi/majicmixsombre"},
- {"name": "Mama Orange Mixs", "url": "WarriorMama777/OrangeMixs"},
- {"name": "Marvel WhatIf Diffusion", "url": "ItsJayQz/Marvel_WhatIf_Diffusion"},
- {"name": "Meina Alter", "url": "stablediffusionapi/meinaalter"},
- {"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"},
- {"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"},
- {"name": "MF-Base", "url": "MyneFactory/MF-Base"},
- {"name": "Microscopic Model", "url": "Fictiverse/Stable_Diffusion_Microscopic_model"},
- {"name": "Midjourney 4.0", "url": "flax/midjourney-v4-diffusion"},
- {"name": "Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"},
- {"name": "Mix Pro V4", "url": "AIARTCHAN/MIX-Pro-V4"},
- {"name": "Model Shoot", "url": "wavymulder/modelshoot"},
- {"name": "Mo-Di Diffusion", "url": "nitrosocke/mo-di-diffusion"},
- {"name": "MoistMix 1", "url": "MoistMix/MoistMixV1"},
- {"name": "Naruto Diffuser", "url": "lambdalabs/sd-naruto-diffusers"},
- {"name": "Never Ending Dream 2", "url": "luongphamit/NeverEnding-Dream2"},
- {"name": "NeverEnding-Dream", "url": "Lykon/NeverEnding-Dream"},
- {"name": "Nitro Diffusion", "url": "nitrosocke/Nitro-Diffusion"},
- {"name": "NotSoXJB", "url": "AerinK/NotSoXJB-Mix-1"},
- {"name": "Openjourney 4", "url": "prompthero/openjourney-v4"},
- {"name": "Openjourney V2", "url": "prompthero/openjourney-v2"},
- {"name": "Openjourney", "url": "prompthero/openjourney"},
- {"name": "OpenNiji", "url": "Korakoe/OpenNiji"},
- {"name": "Papercut", "url": "Fictiverse/Stable_Diffusion_PaperCut_Model"},
- {"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"},
- {"name": "Plat Diffusion", "url": "p1atdev/plat-diffusion"},
- {"name": "Pokemon Diffuser", "url": "lambdalabs/sd-pokemon-diffusers"},
- {"name": "Portrait Plus", "url": "wavymulder/portraitplus"},
- {"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"},
- {"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"},
- {"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"},
- {"name": "Protogen X 5.8", "url": "darkstorm2150/Protogen_x5.8_Official_Release"},
- {"name": "QuinceMix", "url": "Hemlok/QuinceMix"},
- {"name": "Realistic Vision 1.4", "url": "SG161222/Realistic_Vision_V1.4"},
- {"name": "Redshift 768", "url": "nitrosocke/redshift-diffusion-768"},
- {"name": "Redshift Diffusion 768", "url": "nitrosocke/redshift-diffusion-768"},
- {"name": "Redshift Diffusion", "url": "nitrosocke/redshift-diffusion"},
- {"name": "Replicant V2", "url": "gsdf/Replicant-V2.0"},
- {"name": "Replicant-V1.0", "url": "gsdf/Replicant-V1.0"},
- {"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"},
- {"name": "Rev Animated", "url": "coreml/coreml-ReV-Animated"},
- {"name": "Rev Animated", "url": "LottePeisch/RevAnimated-Diffusers"},
- {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
- {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
- {"name": "Salt_mix", "url": "AsAHuman/Salt_mix"},
- {"name": "Seek Art Mega", "url": "coreco/seek.art_MEGA"},
- {"name": "Semi Real Mix", "url": "robotjung/SemiRealMix"},
- {"name": "SEmix", "url": "Deyo/SEmix"},
- {"name": "Something V 2.2","url": "NoCrypt/SomethingV2_2"},
- {"name": "Something V2","url": "NoCrypt/SomethingV2"},
- {"name": "Something", "url": "Guizmus/SDArt_something"},
- {"name": "Spider Verse diffusion", "url": "nitrosocke/spider-verse-diffusion"},
- {"name": "SpyBG", "url": "stablediffusionapi/spybg"},
- {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"},
- {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"},
- {"name": "Stable Diffusion 2.1 Base","url": "stabilityai/stable-diffusion-2-1-base"},
- {"name": "Stable Diffusion 2.1 Unclip","url": "stabilityai/stable-diffusion-2-1-unclip"},
- {"name": "Stable Diffusion 2.1","url": "stabilityai/stable-diffusion-2-1"},
- {"name": "Subtly", "url": "ddPn08/subtly"},
- {"name": "Synthwave Punk 2", "url": "ItsJayQz/SynthwavePunk-v2"},
- {"name": "The Ally", "url": "stablediffusionapi/the-ally"},
- {"name": "Three Delicacy wonto", "url": "stablediffusionapi/three-delicacy-wonto"},
- {"name": "Three Delicacy", "url": "stablediffusionapi/three-delicacy"},
- {"name": "Timeless Diffusion", "url": "wavymulder/timeless-diffusion"},
- {"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"},
- {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
- {"name": "UltraSkin", "url": "VegaKH/Ultraskin"},
- {"name": "Valorant Diffusion", "url": "ItsJayQz/Valorant_Diffusion"},
- {"name": "Van Gogh Diffusion", "url": "dallinmackay/Van-Gogh-diffusion"},
- {"name": "Vectorartz Diffusion", "url": "coder119/Vectorartz_Diffusion"},
- {"name": "Vintedois 1.0", "url": "22h/vintedois-diffusion-v0-1"},
- {"name": "Vintedois 2.0", "url": "22h/vintedois-diffusion-v0-2"},
- {"name": "Vivid Watercolors", "url": "Evel/VividWatercolors"},
- {"name": "VoxelArt", "url": "Fictiverse/Stable_Diffusion_VoxelArt_Model"},
- {"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"},
- {"name": "Wavyfusion", "url": "wavymulder/wavyfusion"},
- {"name": "West Magic", "url": "xiaolxl/WestMagic"},
- {"name": "X Mix", "url": "les-chien/X-mix"},
-]
-
-current_model = models[0]
-
-text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
-
-models2 = []
-for model in models:
- model_url = f"models/{model['url']}"
- loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
- models2.append(loaded_model)
-
-
-def text_it(inputs, text_gen=text_gen):
- return text_gen(inputs)
-
-
-def set_model(current_model_index):
- global current_model
- current_model = models[current_model_index]
- return gr.update(value=f"{current_model['name']}")
-
-
-def send_it(inputs, model_choice):
- proc = models2[model_choice]
- return proc(inputs)
-
-
-with gr.Blocks() as myface:
- gr.HTML(
-
- )
-
- with gr.Row():
- with gr.Row():
- input_text = gr.Textbox(label="Prompt idea", placeholder="", lines=1)
- # Model selection dropdown
- model_name1 = gr.Dropdown(
- label="Choose Model",
- choices=[m["name"] for m in models],
- type="index",
- value=current_model["name"],
- interactive=True,
- )
- with gr.Row():
- see_prompts = gr.Button("Generate Prompts")
- run = gr.Button("Generate Images", variant="primary")
-
- with gr.Row():
- output1 = gr.Image(label="")
- output2 = gr.Image(label="")
- output3 = gr.Image(label="")
- with gr.Row():
- magic1 = gr.Textbox(label="Generated Prompt", lines=2)
- magic2 = gr.Textbox(label="Generated Prompt", lines=2)
- magic3 = gr.Textbox(label="Generated Prompt", lines=2)
- with gr.Row():
- output4 = gr.Image(label="")
- output5 = gr.Image(label="")
- output6 = gr.Image(label="")
- with gr.Row():
- magic4 = gr.Textbox(label="Generated Prompt", lines=2)
- magic5 = gr.Textbox(label="Generated Prompt", lines=2)
- magic6 = gr.Textbox(label="Generated Prompt", lines=2)
-
- model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6])
-
- run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
- run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
- run.click(send_it, inputs=[magic3, model_name1], outputs=[output3])
- run.click(send_it, inputs=[magic4, model_name1], outputs=[output4])
- run.click(send_it, inputs=[magic5, model_name1], outputs=[output5])
- run.click(send_it, inputs=[magic6, model_name1], outputs=[output6])
-
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic3])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic4])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic5])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic6])
-
-myface.queue(concurrency_count=200)
-myface.launch(inline=True, show_api=False, max_threads=400)
\ No newline at end of file
diff --git a/spaces/KPCGD/bingo/src/components/turn-counter.tsx b/spaces/KPCGD/bingo/src/components/turn-counter.tsx
deleted file mode 100644
index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000
--- a/spaces/KPCGD/bingo/src/components/turn-counter.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import React from 'react'
-import { Throttling } from '@/lib/bots/bing/types'
-
-export interface TurnCounterProps {
- throttling?: Throttling
-}
-
-export function TurnCounter({ throttling }: TurnCounterProps) {
- if (!throttling) {
- return null
- }
-
- return (
-
-
- {throttling.numUserMessagesInConversation}
- 共
- {throttling.maxNumUserMessagesInConversation}
-
-
-
- )
-}
diff --git a/spaces/KaguraNana/XiaokunChatGPT/app.py b/spaces/KaguraNana/XiaokunChatGPT/app.py
deleted file mode 100644
index f2397dc293899e19cb7bcc9afe7152de4bad8ea3..0000000000000000000000000000000000000000
--- a/spaces/KaguraNana/XiaokunChatGPT/app.py
+++ /dev/null
@@ -1,343 +0,0 @@
-import json
-import gradio as gr
-# import openai
-import os
-import sys
-import traceback
-import requests
-# import markdown
-import csv
-
-my_api_key = "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" # 在这里输入你的 API 密钥
-HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
-
-initial_prompt = "You are a helpful assistant."
-API_URL = "https://api.openai.com/v1/chat/completions"
-HISTORY_DIR = "history"
-TEMPLATES_DIR = "templates"
-
-
-
-#if we are running in Docker
-if os.environ.get('dockerrun') == 'yes':
- dockerflag = True
-else:
- dockerflag = False
-
-if dockerflag:
- my_api_key = os.environ.get('my_api_key')
- if my_api_key == "empty":
- print("Please give a api key!")
- sys.exit(1)
- #auth
- username = os.environ.get('USERNAME')
- password = os.environ.get('PASSWORD')
- if isinstance(username, type(None)) or isinstance(password, type(None)):
- authflag = False
- else:
- authflag = True
-
-
-def parse_text(text):
- lines = text.split("\n")
- lines = [line for line in lines if line != ""]
- count = 0
- firstline = False
- for i, line in enumerate(lines):
- if "```" in line:
- count += 1
- items = line.split('`')
- if count % 2 == 1:
- lines[i] = f''
- firstline = True
- else:
- lines[i] = f'
'
- else:
- if i > 0:
- if count % 2 == 1:
- line = line.replace("&", "&")
- line = line.replace("\"", "`\"`")
- line = line.replace("\'", "`\'`")
- line = line.replace("<", "<")
- line = line.replace(">", ">")
- line = line.replace(" ", " ")
- line = line.replace("*", "*")
- line = line.replace("_", "_")
- line = line.replace("#", "#")
- line = line.replace("-", "-")
- line = line.replace(".", ".")
- line = line.replace("!", "!")
- line = line.replace("(", "(")
- line = line.replace(")", ")")
- lines[i] = " "+line
- text = "".join(lines)
- return text
-
-def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False): # repetition_penalty, top_k
-
- print(f"chatbot 1: {chatbot}")
-
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {openai_api_key}"
- }
-
- chat_counter = len(history) // 2
-
- print(f"chat_counter - {chat_counter}")
-
- messages = [compose_system(system_prompt)]
- if chat_counter:
- for data in chatbot:
- temp1 = {}
- temp1["role"] = "user"
- temp1["content"] = data[0]
- temp2 = {}
- temp2["role"] = "assistant"
- temp2["content"] = data[1]
- if temp1["content"] != "":
- messages.append(temp1)
- messages.append(temp2)
- else:
- messages[-1]['content'] = temp2['content']
- if retry and chat_counter:
- messages.pop()
- elif summary:
- messages.append(compose_user(
- "请帮我总结一下上述对话的内容,实现减少字数的同时,保证对话的质量。在总结中不要加入这一句话。"))
- history = ["我们刚刚聊了什么?"]
- else:
- temp3 = {}
- temp3["role"] = "user"
- temp3["content"] = inputs
- messages.append(temp3)
- chat_counter += 1
- # messages
- payload = {
- "model": "gpt-3.5-turbo",
- "messages": messages, # [{"role": "user", "content": f"{inputs}"}],
- "temperature": temperature, # 1.0,
- "top_p": top_p, # 1.0,
- "n": 1,
- "stream": True,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- }
-
- if not summary:
- history.append(inputs)
- print(f"payload is - {payload}")
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
- response = requests.post(API_URL, headers=headers,
- json=payload, stream=True)
- #response = requests.post(API_URL, headers=headers, json=payload, stream=True)
-
- token_counter = 0
- partial_words = ""
-
- counter = 0
- chatbot.append((history[-1], ""))
- for chunk in response.iter_lines():
- if counter == 0:
- counter += 1
- continue
- counter += 1
- # check whether each line is non-empty
- if chunk:
- # decode each line as response data is in bytes
- try:
- if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
- break
- except Exception as e:
- chatbot.pop()
- chatbot.append((history[-1], f"☹️发生了错误 返回值:{response.text} 异常:{e}"))
- history.pop()
- yield chatbot, history
- break
- #print(json.loads(chunk.decode()[6:])['choices'][0]["delta"] ["content"])
- partial_words = partial_words + \
- json.loads(chunk.decode()[6:])[
- 'choices'][0]["delta"]["content"]
- if token_counter == 0:
- history.append(" " + partial_words)
- else:
- history[-1] = parse_text(partial_words)
- chatbot[-1] = (history[-2], history[-1])
- # chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
- token_counter += 1
- # resembles {chatbot: chat, state: history}
- yield chatbot, history
-
-
-
-def delete_last_conversation(chatbot, history):
- chatbot.pop()
- history.pop()
- history.pop()
- return chatbot, history
-
-def save_chat_history(filename, system, history, chatbot):
- if filename == "":
- return
- if not filename.endswith(".json"):
- filename += ".json"
- os.makedirs(HISTORY_DIR, exist_ok=True)
- json_s = {"system": system, "history": history, "chatbot": chatbot}
- with open(os.path.join(HISTORY_DIR, filename), "w") as f:
- json.dump(json_s, f)
-
-
-def load_chat_history(filename):
- with open(os.path.join(HISTORY_DIR, filename), "r") as f:
- json_s = json.load(f)
- return filename, json_s["system"], json_s["history"], json_s["chatbot"]
-
-
-def get_file_names(dir, plain=False, filetype=".json"):
- # find all json files in the current directory and return their names
- try:
- files = [f for f in os.listdir(dir) if f.endswith(filetype)]
- except FileNotFoundError:
- files = []
- if plain:
- return files
- else:
- return gr.Dropdown.update(choices=files)
-
-def get_history_names(plain=False):
- return get_file_names(HISTORY_DIR, plain)
-
-def load_template(filename):
- lines = []
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
- reader = csv.reader(csvfile)
- lines = list(reader)
- lines = lines[1:]
- return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=[row[0] for row in lines])
-
-def get_template_names(plain=False):
- return get_file_names(TEMPLATES_DIR, plain, filetype=".csv")
-
-def reset_state():
- return [], []
-
-
-def compose_system(system_prompt):
- return {"role": "system", "content": system_prompt}
-
-
-def compose_user(user_input):
- return {"role": "user", "content": user_input}
-
-
-def reset_textbox():
- return gr.update(value='')
-
-title = """Xiaokun's ChatGPT 🚀 """
-description = """
-
-由Xiaokun Qian at GitHub 开发
-
-此App使用 `gpt-3.5-turbo` 大语言模型
-
-"""
-with gr.Blocks() as demo:
- gr.HTML(title)
- gr.HTML(''' 强烈建议点击上面的按钮复制一份这个Space,在你自己的Space里运行,响应更迅速、也更安全👆 ''')
- keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...",
- value=my_api_key, label="API Key", type="password", visible=not HIDE_MY_KEY).style(container=True)
- chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
- history = gr.State([])
- promptTemplates = gr.State({})
- TRUECOMSTANT = gr.State(True)
- FALSECONSTANT = gr.State(False)
- topic = gr.State("未命名对话历史记录")
-
- with gr.Row():
- with gr.Column(scale=12):
- txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(
- container=False)
- with gr.Column(min_width=50, scale=1):
- submitBtn = gr.Button("🚀", variant="primary")
- with gr.Row():
- emptyBtn = gr.Button("🧹 新的对话")
- retryBtn = gr.Button("🔄 重新生成")
- delLastBtn = gr.Button("🗑️ 删除上条对话")
- reduceTokenBtn = gr.Button("♻️ 总结对话")
- systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
- label="System prompt", value=initial_prompt).style(container=True)
- with gr.Accordion(label="加载Prompt模板", open=False):
- with gr.Column():
- with gr.Row():
- with gr.Column(scale=6):
- templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件(.csv)", choices=get_template_names(plain=True), multiselect=False)
- with gr.Column(scale=1):
- templateRefreshBtn = gr.Button("🔄 刷新")
- templaeFileReadBtn = gr.Button("📂 读入模板")
- with gr.Row():
- with gr.Column(scale=6):
- templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=[], multiselect=False)
- with gr.Column(scale=1):
- templateApplyBtn = gr.Button("⬇️ 应用")
- with gr.Accordion(label="保存/加载对话历史记录(在文本框中输入文件名,点击“保存对话”按钮,历史记录文件会被存储到Python文件旁边)", open=False):
- with gr.Column():
- with gr.Row():
- with gr.Column(scale=6):
- saveFileName = gr.Textbox(
- show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
- with gr.Column(scale=1):
- saveBtn = gr.Button("💾 保存对话")
- with gr.Row():
- with gr.Column(scale=6):
- historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False)
- with gr.Column(scale=1):
- historyRefreshBtn = gr.Button("🔄 刷新")
- historyReadBtn = gr.Button("📂 读入对话")
- #inputs, top_p, temperature, top_k, repetition_penalty
- with gr.Accordion("参数", open=False):
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
- interactive=True, label="Top-p (nucleus sampling)",)
- temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0,
- step=0.1, interactive=True, label="Temperature",)
- #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
- #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
- gr.Markdown(description)
-
-
- txt.submit(predict, [txt, top_p, temperature, keyTxt,
- chatbot, history, systemPromptTxt], [chatbot, history])
- txt.submit(reset_textbox, [], [txt])
- submitBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot,
- history, systemPromptTxt], [chatbot, history], show_progress=True)
- submitBtn.click(reset_textbox, [], [txt])
- emptyBtn.click(reset_state, outputs=[chatbot, history])
- retryBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
- systemPromptTxt, TRUECOMSTANT], [chatbot, history], show_progress=True)
- delLastBtn.click(delete_last_conversation, [chatbot, history], [
- chatbot, history], show_progress=True)
- reduceTokenBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
- systemPromptTxt, FALSECONSTANT, TRUECOMSTANT], [chatbot, history], show_progress=True)
- saveBtn.click(save_chat_history, [
- saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
- saveBtn.click(get_history_names, None, [historyFileSelectDropdown])
- historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
- historyReadBtn.click(load_chat_history, [historyFileSelectDropdown], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
- templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
- templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
- templateApplyBtn.click(lambda x, y: x[y], [promptTemplates, templateSelectDropdown], [systemPromptTxt], show_progress=True)
-
-print("Xiaokun的温馨提示:访问 http://localhost:7860 查看界面")
-# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
-demo.title = "Xiaokun's ChatGPT 🚀"
-
-#if running in Docker
-if dockerflag:
- if authflag:
- demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
- else:
- demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
-#if not running in Docker
-else:
- demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
- #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
- #demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
diff --git a/spaces/KonradSzafer/HF-QA-Demo/data/scrapers/stack_overflow_scraper.py b/spaces/KonradSzafer/HF-QA-Demo/data/scrapers/stack_overflow_scraper.py
deleted file mode 100644
index 003b139b52206043ef74b079b46c8cfa44fc66cf..0000000000000000000000000000000000000000
--- a/spaces/KonradSzafer/HF-QA-Demo/data/scrapers/stack_overflow_scraper.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import re
-import csv
-import time
-import requests
-from typing import List
-import pandas as pd
-from tqdm import tqdm
-from bs4 import BeautifulSoup
-
-
-def scrape_question_with_answers(question_url: str) -> List[str]:
- url = 'https://stackoverflow.com/' + question_url
- response = requests.get(url)
- soup = BeautifulSoup(response.content, 'html.parser')
-
- title = soup.find('title').text.replace(' - Stack Overflow', '')
- question_div = soup.find('div', {'class': 'postcell post-layout--right'})
- question = question_div.find('p').text
- answers_div = soup.find('div', {'class': 'answercell post-layout--right'})
- answer = answers_div.find('div', {'class': 's-prose js-post-body'}).text
- return [title, question, answer, url]
-
-
-def scrape_questions_page(url: str, min_votes: int, min_answers: int) -> List[List[str]]:
- response = requests.get(url)
- soup = BeautifulSoup(response.content, 'html.parser')
- posts_summaries = soup.find_all('div', {'class':'s-post-summary js-post-summary'})
-
- qa_data = []
- for summary in posts_summaries:
- stats_div = summary.find('div', {'class': 's-post-summary--stats'})
- vote_div = stats_div.find('div', {
- 'class': 's-post-summary--stats-item s-post-summary--stats-item__emphasized',
- 'title': re.compile(r'^Score of \d+$')})
- if vote_div:
- vote_number = int(vote_div.find('span', {'class': 's-post-summary--stats-item-number'}).text)
- else:
- vote_number = 0
- answer_div = stats_div.find('div', {
- 'class': 's-post-summary--stats-item',
- 'title': re.compile(r'^\d+ answers$')})
- if answer_div:
- answer_number = int(answer_div.find('span', {'class': 's-post-summary--stats-item-number'}).text)
- else:
- answer_number = 0
-
- question_href = summary.find('a', {'class': 's-link'})['href']
- if vote_number >= min_votes and answer_number >= min_answers:
- try:
- qa_data.append(scrape_question_with_answers(question_href))
- except Exception as error:
- print(error)
-
- time.sleep(1.5)
- return qa_data
-
-
-def crawl_and_save_qa(
- filename: str,
- base_url: str,
- start_page: int,
- n_pages: int=10,
- min_votes: int=1,
- min_answers: int=1
-):
- with open(filename, 'a', newline='') as f:
- writer = csv.writer(f)
- if start_page == 1:
- writer.writerow(['title', 'question', 'answer', 'url'])
- for page_num in tqdm(range(start_page, start_page+n_pages)):
- page_data = scrape_questions_page(
- base_url.format(page_num),
- min_votes,
- min_answers
- )
- if page_data:
- for qa_data in page_data:
- writer.writerow(qa_data)
-
-
-if __name__ == '__main__':
- filename = '../datasets/stackoverflow_linux.csv'
- url = 'https://stackoverflow.com/questions/tagged/linux?tab=votes&page={}&pagesize=15'
- crawl_and_save_qa(
- filename=filename,
- base_url=url,
- start_page=21,
- n_pages=10,
- min_votes=1,
- min_answers=1
- )
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/single_stage.py b/spaces/KyanChen/RSPrompter/mmdet/models/detectors/single_stage.py
deleted file mode 100644
index 06c074085967bbc9040d93e5eb446b67a006087e..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/single_stage.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List, Tuple, Union
-
-from torch import Tensor
-
-from mmdet.registry import MODELS
-from mmdet.structures import OptSampleList, SampleList
-from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
-from .base import BaseDetector
-
-
-@MODELS.register_module()
-class SingleStageDetector(BaseDetector):
- """Base class for single-stage detectors.
-
- Single-stage detectors directly and densely predict bounding boxes on the
- output features of the backbone+neck.
- """
-
- def __init__(self,
- backbone: ConfigType,
- neck: OptConfigType = None,
- bbox_head: OptConfigType = None,
- train_cfg: OptConfigType = None,
- test_cfg: OptConfigType = None,
- data_preprocessor: OptConfigType = None,
- init_cfg: OptMultiConfig = None) -> None:
- super().__init__(
- data_preprocessor=data_preprocessor, init_cfg=init_cfg)
- self.backbone = MODELS.build(backbone)
- if neck is not None:
- self.neck = MODELS.build(neck)
- bbox_head.update(train_cfg=train_cfg)
- bbox_head.update(test_cfg=test_cfg)
- self.bbox_head = MODELS.build(bbox_head)
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
-
- def _load_from_state_dict(self, state_dict: dict, prefix: str,
- local_metadata: dict, strict: bool,
- missing_keys: Union[List[str], str],
- unexpected_keys: Union[List[str], str],
- error_msgs: Union[List[str], str]) -> None:
- """Exchange bbox_head key to rpn_head key when loading two-stage
- weights into single-stage model."""
- bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'
- bbox_head_keys = [
- k for k in state_dict.keys() if k.startswith(bbox_head_prefix)
- ]
- rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'
- rpn_head_keys = [
- k for k in state_dict.keys() if k.startswith(rpn_head_prefix)
- ]
- if len(bbox_head_keys) == 0 and len(rpn_head_keys) != 0:
- for rpn_head_key in rpn_head_keys:
- bbox_head_key = bbox_head_prefix + \
- rpn_head_key[len(rpn_head_prefix):]
- state_dict[bbox_head_key] = state_dict.pop(rpn_head_key)
- super()._load_from_state_dict(state_dict, prefix, local_metadata,
- strict, missing_keys, unexpected_keys,
- error_msgs)
-
- def loss(self, batch_inputs: Tensor,
- batch_data_samples: SampleList) -> Union[dict, list]:
- """Calculate losses from a batch of inputs and data samples.
-
- Args:
- batch_inputs (Tensor): Input images of shape (N, C, H, W).
- These should usually be mean centered and std scaled.
- batch_data_samples (list[:obj:`DetDataSample`]): The batch
- data samples. It usually includes information such
- as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
-
- Returns:
- dict: A dictionary of loss components.
- """
- x = self.extract_feat(batch_inputs)
- losses = self.bbox_head.loss(x, batch_data_samples)
- return losses
-
- def predict(self,
- batch_inputs: Tensor,
- batch_data_samples: SampleList,
- rescale: bool = True) -> SampleList:
- """Predict results from a batch of inputs and data samples with post-
- processing.
-
- Args:
- batch_inputs (Tensor): Inputs with shape (N, C, H, W).
- batch_data_samples (List[:obj:`DetDataSample`]): The Data
- Samples. It usually includes information such as
- `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
- rescale (bool): Whether to rescale the results.
- Defaults to True.
-
- Returns:
- list[:obj:`DetDataSample`]: Detection results of the
- input images. Each DetDataSample usually contain
- 'pred_instances'. And the ``pred_instances`` usually
- contains following keys.
-
- - scores (Tensor): Classification scores, has a shape
- (num_instance, )
- - labels (Tensor): Labels of bboxes, has a shape
- (num_instances, ).
- - bboxes (Tensor): Has a shape (num_instances, 4),
- the last dimension 4 arrange as (x1, y1, x2, y2).
- """
- x = self.extract_feat(batch_inputs)
- results_list = self.bbox_head.predict(
- x, batch_data_samples, rescale=rescale)
- batch_data_samples = self.add_pred_to_datasample(
- batch_data_samples, results_list)
- return batch_data_samples
-
- def _forward(
- self,
- batch_inputs: Tensor,
- batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
- """Network forward process. Usually includes backbone, neck and head
- forward without any post-processing.
-
- Args:
- batch_inputs (Tensor): Inputs with shape (N, C, H, W).
- batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
- the meta information of each image and corresponding
- annotations.
-
- Returns:
- tuple[list]: A tuple of features from ``bbox_head`` forward.
- """
- x = self.extract_feat(batch_inputs)
- results = self.bbox_head.forward(x)
- return results
-
- def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
- """Extract features.
-
- Args:
- batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
-
- Returns:
- tuple[Tensor]: Multi-level features that may have
- different resolutions.
- """
- x = self.backbone(batch_inputs)
- if self.with_neck:
- x = self.neck(x)
- return x
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/layers/transformer/__init__.py b/spaces/KyanChen/RSPrompter/mmdet/models/layers/transformer/__init__.py
deleted file mode 100644
index 0d70f845f8f48d1cabaab63ee33d65569d28a13e..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/layers/transformer/__init__.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
- ConditionalDetrTransformerDecoderLayer)
-from .dab_detr_layers import (DABDetrTransformerDecoder,
- DABDetrTransformerDecoderLayer,
- DABDetrTransformerEncoder)
-from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
- DeformableDetrTransformerDecoderLayer,
- DeformableDetrTransformerEncoder,
- DeformableDetrTransformerEncoderLayer)
-from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
- DetrTransformerEncoder, DetrTransformerEncoderLayer)
-from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
-from .mask2former_layers import (Mask2FormerTransformerDecoder,
- Mask2FormerTransformerDecoderLayer,
- Mask2FormerTransformerEncoder)
-from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
- PatchEmbed, PatchMerging, coordinate_to_encoding,
- inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
-
-__all__ = [
- 'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
- 'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
- 'DetrTransformerEncoder', 'DetrTransformerDecoder',
- 'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
- 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
- 'DeformableDetrTransformerEncoderLayer',
- 'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
- 'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
- 'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
- 'ConditionalDetrTransformerDecoder',
- 'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
- 'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
- 'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
-]
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/mse_loss.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/mse_loss.py
deleted file mode 100644
index 6048218ad36a8105e7fa182f40fae93ef7c9268f..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/mse_loss.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Optional
-
-import torch.nn as nn
-import torch.nn.functional as F
-from torch import Tensor
-
-from mmdet.registry import MODELS
-from .utils import weighted_loss
-
-
-@weighted_loss
-def mse_loss(pred: Tensor, target: Tensor) -> Tensor:
- """A Wrapper of MSE loss.
- Args:
- pred (Tensor): The prediction.
- target (Tensor): The learning target of the prediction.
-
- Returns:
- Tensor: loss Tensor
- """
- return F.mse_loss(pred, target, reduction='none')
-
-
-@MODELS.register_module()
-class MSELoss(nn.Module):
- """MSELoss.
-
- Args:
- reduction (str, optional): The method that reduces the loss to a
- scalar. Options are "none", "mean" and "sum".
- loss_weight (float, optional): The weight of the loss. Defaults to 1.0
- """
-
- def __init__(self,
- reduction: str = 'mean',
- loss_weight: float = 1.0) -> None:
- super().__init__()
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None) -> Tensor:
- """Forward function of loss.
-
- Args:
- pred (Tensor): The prediction.
- target (Tensor): The learning target of the prediction.
- weight (Tensor, optional): Weight of the loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
-
- Returns:
- Tensor: The calculated loss.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- loss = self.loss_weight * mse_loss(
- pred, target, weight, reduction=reduction, avg_factor=avg_factor)
- return loss
diff --git a/spaces/LP-art/Bing/README.md b/spaces/LP-art/Bing/README.md
deleted file mode 100644
index 210c16330516dde0629598ca32b56875544b5da7..0000000000000000000000000000000000000000
--- a/spaces/LP-art/Bing/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Bing
-emoji: 🏃
-colorFrom: yellow
-colorTo: yellow
-sdk: docker
-pinned: false
-license: mit
-app_port: 8080
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/LinkSoul/LLaSM/static/css/styles.css b/spaces/LinkSoul/LLaSM/static/css/styles.css
deleted file mode 100644
index 560dfcde21b0fcd93dfad0bf6e77d0faf12c5b53..0000000000000000000000000000000000000000
--- a/spaces/LinkSoul/LLaSM/static/css/styles.css
+++ /dev/null
@@ -1,215 +0,0 @@
-#avatar-video, #talking-vid {
- justify-content: center;
- align-items: center;
- display: flex;
- height: 320px;
- margin-top: 12px;
-}
-video {
- border-top-right-radius: 3rem;
- border-top-left-radius: 3rem;
- width: 100%;
- margin-bottom: 20px;
- margin-top: 120px;
- z-index: -1;
-}
-.btn {
- border-radius: 1.5rem !important;
- z-index: 2;
- /* background-color: #9e9e9e !important; */
- border:none;
-}
-
-.iceConnectionState-connected,
-.iceConnectionState-completed,
-.peerConnectionState-connected,
-#ice-gathering-status-label,
-.ice-status-label,
-.signalingState-stable,
-.streamingState-empty {
- color: green;
-}
-#video-select {
- box-shadow: 0 0 2rem rgba(0,0,0,.14)!important;
- border-radius: 0.7rem;
- padding: 12px;
- text-align: end !important;
-}
-.video-select {
- position: absolute;
- padding-top: 5px;
-}
-
-#user-text {
- position: absolute;
- width: 100%;
- z-index: 1;
- box-shadow: 0 0 2rem rgba(0,0,0,.14)!important;
- border-radius: 1.5rem;
- border: 1px;
-}
-#chat-window {
- box-shadow: 0 0 2rem rgba(0,0,0,.14)!important;
- border: none;
- border-radius: 1.5rem;
-}
-.col-md-12 {
- box-shadow: 0 0 2rem rgba(0,0,0,.14)!important;
- border-radius: 3rem;
- padding: 20px;
- padding-bottom: 80px;
- width: 750px;
- /* width: 485px; */
-}
-.input-group-append {
- right: 10px;
- position: absolute;
- z-index: 2;
-}
-
-.input-group-append {
- transition: width 0.8s ease-in-out;
- border-radius: 1.5rem !important;
-}
-
-.expanded {
- width: 100%;
- background-image: url('./images/record_waveform.gif');
- background-position: center;
- height: 59px;
- top: 2px;
- left: 0px;
- text-align: end;
- cursor: pointer;
-}
-
-.expanded button {
- margin-top: -3px;
- margin-right: 3px;
-}
-
-
-.btn-secondary {
- background-color: #198754 !important;
-}
-.input-group .btn {
- position: relative;
- z-index: 2;
- width: 50px;
- border-radius: 8px !important;
-}
-#info {
- text-align: center !important;
- border-radius: 3rem;
- font-size: 14px;
-}
-#info a {
- color: darkred;
- text-decoration: underline;
- }
-
- .final {
- color: black;
- padding-right: 3px;
- }
- .interim {
- color: gray;
- }
- .select-avatar {
- margin-top: 130px !important;
- }
- #results {
- font-size: 14px;
- font-weight: bold;
- border: 1.4px solid #ddd;
- padding: 15px;
- text-align: left;
- overflow-y: scroll;
- height: 400px;
- margin: 0 0 20px 0;
- border-radius: 0.7rem;
- }
- /* #llasaLoading {
- font-size: 14px;
- font-weight: bold;
- border: 1.4px solid #ddd;
- padding: 15px;
- text-align: left;
- overflow-y: scroll;
- height: 400px;
- margin: 0 0 20px 0;
- border-radius: 0.7rem;
- justify-content: center;
- } */
- .btn-success {
- background: #9e9e9e !important;
- }
-
- .sent-message {
- margin-left: 37px !important;
- }
- #start_button {
- border: 0;
- background-color:transparent;
- padding: 0;
- cursor: pointer;
- }
- #delete_button {
- border: 0;
- background-color:transparent;
- padding: 0;
- cursor: pointer;
- }
- .small {
- background-color: #d1e7dd !important;
- font-size: 14px;
- color: black !important;
- width: fit-content;
- }
- .time {
- text-align: center !important;
- }
- #start_img, #delete_img, #send_text_img {
- width: 30px;
- height: 30px;
- }
- #send_button {
- border: 0;
- background-color: transparent;
- padding: 0;
- }
- #status {
- font-size: 8px;
- color: #cacecccc;
- }
-
- .btn-primary, .btn-danger {
- width: 100px;
- margin: auto;
- }
- .alert {
- padding:0.5rem !important ;
- }
- select {
- padding: 5px 5px;
- }
- #select_dialect {
- width: 80px;
- }
- #select_language {
- width: 60px
- }
- @media screen and (max-width: 767px) {
- #select_dialect {
- position: absolute;
- right: 0;
- }
- }
-
- @media screen and (min-width: 768px) {
- select {
- margin-right: 10px;
- }
- }
-
-
\ No newline at end of file
diff --git a/spaces/MBZ/LoRA-DreamBooth-Training-UI/README.md b/spaces/MBZ/LoRA-DreamBooth-Training-UI/README.md
deleted file mode 100644
index b61f96a3f0f5df541bd4e0dfba3a468ceb1c54e9..0000000000000000000000000000000000000000
--- a/spaces/MBZ/LoRA-DreamBooth-Training-UI/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: LoRA DreamBooth Training UI
-emoji: ⚡
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.16.2
-python_version: 3.10.9
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: lora-library/LoRA-DreamBooth-Training-UI
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MISATO-dataset/Adaptability_protein_dynamics/README.md b/spaces/MISATO-dataset/Adaptability_protein_dynamics/README.md
deleted file mode 100644
index c287093d3f9e438ba0c57575af18a8c416296077..0000000000000000000000000000000000000000
--- a/spaces/MISATO-dataset/Adaptability_protein_dynamics/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Adaptability protein dynamics
-emoji: 🔥
-colorFrom: indigo
-colorTo: red
-sdk: docker
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MVV/3dTopDenoising/models/SAP/__init__.py b/spaces/MVV/3dTopDenoising/models/SAP/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Marshalls/testmtd/training/options/test_options.py b/spaces/Marshalls/testmtd/training/options/test_options.py
deleted file mode 100644
index 23cd4cbc3eb48b0a9c84a4ae9f0f9e0f3e9fc31c..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/training/options/test_options.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from .base_options import BaseOptions
-
-
-class TestOptions(BaseOptions):
- def __init__(self):
- super(TestOptions, self).__init__()
- parser = self.parser
- parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
- parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
- parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
- # Dropout and Batchnorm has different behavioir during training and test.
- parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
-
- parser.set_defaults(model='test')
- # To avoid cropping, the loadSize should be the same as fineSize
- parser.set_defaults(load_size=parser.get_default('fine_size'))
- self.is_train = False
- self.parser = parser
-
-
diff --git a/spaces/MirageML/sjc/guided_diffusion/unet.py b/spaces/MirageML/sjc/guided_diffusion/unet.py
deleted file mode 100644
index 96b46930006b7c9e49948d31568474824195cf8f..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/guided_diffusion/unet.py
+++ /dev/null
@@ -1,894 +0,0 @@
-from abc import abstractmethod
-
-import math
-
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .fp16_util import convert_module_to_f16, convert_module_to_f32
-from .nn import (
- checkpoint,
- conv_nd,
- linear,
- avg_pool_nd,
- zero_module,
- normalization,
- timestep_embedding,
-)
-
-
-class AttentionPool2d(nn.Module):
- """
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
- """
-
- def __init__(
- self,
- spacial_dim: int,
- embed_dim: int,
- num_heads_channels: int,
- output_dim: int = None,
- ):
- super().__init__()
- self.positional_embedding = nn.Parameter(
- th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
- )
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
- self.num_heads = embed_dim // num_heads_channels
- self.attention = QKVAttention(self.num_heads)
-
- def forward(self, x):
- b, c, *_spatial = x.shape
- x = x.reshape(b, c, -1) # NC(HW)
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
- x = self.qkv_proj(x)
- x = self.attention(x)
- x = self.c_proj(x)
- return x[:, :, 0]
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
- if self.use_conv:
- x = self.conv(x)
- return x
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=1
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
-
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
-
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
- )
-
- def _forward(self, x, emb):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- return self.skip_connection(x) + h
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
-
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- # split qkv before split heads
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True)
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
-
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- if self.num_classes is not None:
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
-
- ch = input_ch = int(channel_mult[0] * model_channels)
- self.input_blocks = nn.ModuleList(
- [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
- )
- self._feature_size = ch
- input_block_chans = [ch]
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=int(mult * model_channels),
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = int(mult * model_channels)
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(
- ch + ich,
- time_embed_dim,
- dropout,
- out_channels=int(model_channels * mult),
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = int(model_channels * mult)
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
- )
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
- self.output_blocks.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
- self.output_blocks.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps, y=None):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param y: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- assert (y is not None) == (
- self.num_classes is not None
- ), "must specify y if and only if the model is class-conditional"
-
- hs = []
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- hs.append(h)
- h = self.middle_block(h, emb)
- for module in self.output_blocks:
- h = th.cat([h, hs.pop()], dim=1)
- h = module(h, emb)
- h = h.type(x.dtype)
- return self.out(h)
-
-
-class SuperResModel(UNetModel):
- """
- A UNetModel that performs super-resolution.
-
- Expects an extra kwarg `low_res` to condition on a low-resolution image.
- """
-
- def __init__(self, image_size, in_channels, *args, **kwargs):
- super().__init__(image_size, in_channels * 2, *args, **kwargs)
-
- def forward(self, x, timesteps, low_res=None, **kwargs):
- _, _, new_height, new_width = x.shape
- upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
- x = th.cat([x, upsampled], dim=1)
- return super().forward(x, timesteps, **kwargs)
-
-
-class EncoderUNetModel(nn.Module):
- """
- The half UNet model with attention and timestep embedding.
-
- For usage, see UNet.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- pool="adaptive",
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- ch = int(channel_mult[0] * model_channels)
- self.input_blocks = nn.ModuleList(
- [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
- )
- self._feature_size = ch
- input_block_chans = [ch]
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=int(mult * model_channels),
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = int(mult * model_channels)
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
- self.pool = pool
- if pool == "adaptive":
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- nn.AdaptiveAvgPool2d((1, 1)),
- zero_module(conv_nd(dims, ch, out_channels, 1)),
- nn.Flatten(),
- )
- elif pool == "attention":
- assert num_head_channels != -1
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- AttentionPool2d(
- (image_size // ds), ch, num_head_channels, out_channels
- ),
- )
- elif pool == "spatial":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- nn.ReLU(),
- nn.Linear(2048, self.out_channels),
- )
- elif pool == "spatial_v2":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- normalization(2048),
- nn.SiLU(),
- nn.Linear(2048, self.out_channels),
- )
- else:
- raise NotImplementedError(f"Unexpected {pool} pooling")
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :return: an [N x K] Tensor of outputs.
- """
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- results = []
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = self.middle_block(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = th.cat(results, axis=-1)
- return self.out(h)
- else:
- h = h.type(x.dtype)
- return self.out(h)
diff --git a/spaces/NATSpeech/DiffSpeech/tasks/tts/fs.py b/spaces/NATSpeech/DiffSpeech/tasks/tts/fs.py
deleted file mode 100644
index f280bb922da6e8e714680700ba8d23114d4951f2..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/DiffSpeech/tasks/tts/fs.py
+++ /dev/null
@@ -1,184 +0,0 @@
-import torch
-import torch.distributions
-import torch.nn.functional as F
-import torch.optim
-import torch.utils.data
-
-from modules.tts.fs import FastSpeech
-from tasks.tts.dataset_utils import FastSpeechWordDataset
-from tasks.tts.speech_base import SpeechBaseTask
-from utils.audio.align import mel2token_to_dur
-from utils.audio.pitch.utils import denorm_f0
-from utils.commons.hparams import hparams
-
-
-class FastSpeechTask(SpeechBaseTask):
- def __init__(self):
- super().__init__()
- self.dataset_cls = FastSpeechWordDataset
- self.sil_ph = self.token_encoder.sil_phonemes()
-
- def build_tts_model(self):
- dict_size = len(self.token_encoder)
- self.model = FastSpeech(dict_size, hparams)
-
- def run_model(self, sample, infer=False, *args, **kwargs):
- txt_tokens = sample['txt_tokens'] # [B, T_t]
- spk_embed = sample.get('spk_embed')
- spk_id = sample.get('spk_ids')
- if not infer:
- target = sample['mels'] # [B, T_s, 80]
- mel2ph = sample['mel2ph'] # [B, T_s]
- f0 = sample.get('f0')
- uv = sample.get('uv')
- output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
- f0=f0, uv=uv, infer=False)
- losses = {}
- self.add_mel_loss(output['mel_out'], target, losses)
- self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
- if hparams['use_pitch_embed']:
- self.add_pitch_loss(output, sample, losses)
- return losses, output
- else:
- use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
- use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0'])
- mel2ph, uv, f0 = None, None, None
- if use_gt_dur:
- mel2ph = sample['mel2ph']
- if use_gt_f0:
- f0 = sample['f0']
- uv = sample['uv']
- output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
- f0=f0, uv=uv, infer=True)
- return output
-
- def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None):
- """
-
- :param dur_pred: [B, T], float, log scale
- :param mel2ph: [B, T]
- :param txt_tokens: [B, T]
- :param losses:
- :return:
- """
- B, T = txt_tokens.shape
- nonpadding = (txt_tokens != 0).float()
- dur_gt = mel2token_to_dur(mel2ph, T).float() * nonpadding
- is_sil = torch.zeros_like(txt_tokens).bool()
- for p in self.sil_ph:
- is_sil = is_sil | (txt_tokens == self.token_encoder.encode(p)[0])
- is_sil = is_sil.float() # [B, T_txt]
- losses['pdur'] = F.mse_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
- losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
- losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur']
- # use linear scale for sentence and word duration
- if hparams['lambda_word_dur'] > 0:
- word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long()
- word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:]
- word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:]
- wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
- word_nonpadding = (word_dur_g > 0).float()
- wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
- losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
- if hparams['lambda_sent_dur'] > 0:
- sent_dur_p = dur_pred.sum(-1)
- sent_dur_g = dur_gt.sum(-1)
- sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
- losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
-
- def add_pitch_loss(self, output, sample, losses):
- mel2ph = sample['mel2ph'] # [B, T_s]
- f0 = sample['f0']
- uv = sample['uv']
- nonpadding = (mel2ph != 0).float() if hparams['pitch_type'] == 'frame' \
- else (sample['txt_tokens'] != 0).float()
- p_pred = output['pitch_pred']
- assert p_pred[..., 0].shape == f0.shape
- if hparams['use_uv'] and hparams['pitch_type'] == 'frame':
- assert p_pred[..., 1].shape == uv.shape, (p_pred.shape, uv.shape)
- losses['uv'] = (F.binary_cross_entropy_with_logits(
- p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \
- / nonpadding.sum() * hparams['lambda_uv']
- nonpadding = nonpadding * (uv == 0).float()
- f0_pred = p_pred[:, :, 0]
- losses['f0'] = (F.l1_loss(f0_pred, f0, reduction='none') * nonpadding).sum() \
- / nonpadding.sum() * hparams['lambda_f0']
-
- def save_valid_result(self, sample, batch_idx, model_out):
- sr = hparams['audio_sample_rate']
- f0_gt = None
- mel_out = model_out['mel_out']
- if sample.get('f0') is not None:
- f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
- self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt)
- if self.global_step > 0:
- wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt)
- self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr)
- # with gt duration
- model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True)
- dur_info = self.get_plot_dur_info(sample, model_out)
- del dur_info['dur_pred']
- wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
- self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr)
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}',
- dur_info=dur_info, f0s=f0_gt)
-
- # with pred duration
- if not hparams['use_gt_dur']:
- model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False)
- dur_info = self.get_plot_dur_info(sample, model_out)
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}',
- dur_info=dur_info, f0s=f0_gt)
- wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
- self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr)
- # gt wav
- if self.global_step <= hparams['valid_infer_interval']:
- mel_gt = sample['mels'][0].cpu()
- wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
- self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr)
-
- def get_plot_dur_info(self, sample, model_out):
- T_txt = sample['txt_tokens'].shape[1]
- dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
- dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
- txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
- txt = txt.split(" ")
- return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
-
- def test_step(self, sample, batch_idx):
- """
-
- :param sample:
- :param batch_idx:
- :return:
- """
- assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
- outputs = self.run_model(sample, infer=True)
- text = sample['text'][0]
- item_name = sample['item_name'][0]
- tokens = sample['txt_tokens'][0].cpu().numpy()
- mel_gt = sample['mels'][0].cpu().numpy()
- mel_pred = outputs['mel_out'][0].cpu().numpy()
- mel2ph = sample['mel2ph'][0].cpu().numpy()
- mel2ph_pred = outputs['mel2ph'][0].cpu().numpy()
- str_phs = self.token_encoder.decode(tokens, strip_padding=True)
- base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
- if text is not None:
- base_fn += text.replace(":", "$3A")[:80]
- base_fn = base_fn.replace(' ', '_')
- gen_dir = self.gen_dir
- wav_pred = self.vocoder.spec2wav(mel_pred)
- self.saving_result_pool.add_job(self.save_result, args=[
- wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
- if hparams['save_gt']:
- wav_gt = self.vocoder.spec2wav(mel_gt)
- self.saving_result_pool.add_job(self.save_result, args=[
- wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
- print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
- return {
- 'item_name': item_name,
- 'text': text,
- 'ph_tokens': self.token_encoder.decode(tokens.tolist()),
- 'wav_fn_pred': base_fn % 'P',
- 'wav_fn_gt': base_fn % 'G',
- }
diff --git a/spaces/Nesip/meta-llama-Llama-2-70b-chat-hf/README.md b/spaces/Nesip/meta-llama-Llama-2-70b-chat-hf/README.md
deleted file mode 100644
index 2e7bbd855dee205a7294a9e77d5f009bdf819a25..0000000000000000000000000000000000000000
--- a/spaces/Nesip/meta-llama-Llama-2-70b-chat-hf/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Meta Llama Llama 2 70b Chat Hf
-emoji: 🔥
-colorFrom: red
-colorTo: gray
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/wav2vec2.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/wav2vec2.py
deleted file mode 100644
index 714fd3ab50443b8d15715b1cf5abd4eb517298c4..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/wav2vec2.py
+++ /dev/null
@@ -1,1016 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-from typing import List, Tuple
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.data.data_utils import compute_mask_indices
-from fairseq.dataclass import ChoiceEnum, FairseqDataclass
-from fairseq.models import BaseFairseqModel, register_model
-from fairseq.modules import (
- Fp32GroupNorm,
- Fp32LayerNorm,
- GradMultiply,
- GumbelVectorQuantizer,
- LayerNorm,
- MultiheadAttention,
- SamePad,
- TransposeLast,
-)
-from fairseq.modules.transformer_sentence_encoder import init_bert_params
-from fairseq.utils import buffered_arange, index_put, is_xla_tensor
-
-
-EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
-MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
-
-
-@dataclass
-class Wav2Vec2Config(FairseqDataclass):
- extractor_mode: EXTRACTOR_MODE_CHOICES = field(
- default="default",
- metadata={
- "help": "mode for feature extractor. default has a single group norm with d "
- "groups in the first conv block, whereas layer_norm has layer norms in "
- "every block (meant to use with normalize=True)"
- },
- )
- encoder_layers: int = field(
- default=12, metadata={"help": "num encoder layers in the transformer"}
- )
- encoder_embed_dim: int = field(
- default=768, metadata={"help": "encoder embedding dimension"}
- )
- encoder_ffn_embed_dim: int = field(
- default=3072, metadata={"help": "encoder embedding dimension for FFN"}
- )
- encoder_attention_heads: int = field(
- default=12, metadata={"help": "num encoder attention heads"}
- )
- activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
- default="gelu", metadata={"help": "activation function to use"}
- )
-
- # dropouts
- dropout: float = field(
- default=0.1, metadata={"help": "dropout probability for the transformer"}
- )
- attention_dropout: float = field(
- default=0.1, metadata={"help": "dropout probability for attention weights"}
- )
- activation_dropout: float = field(
- default=0.0, metadata={"help": "dropout probability after activation in FFN"}
- )
- encoder_layerdrop: float = field(
- default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
- )
- dropout_input: float = field(
- default=0.0,
- metadata={"help": "dropout to apply to the input (after feat extr)"},
- )
- dropout_features: float = field(
- default=0.0,
- metadata={"help": "dropout to apply to the features (after feat extr)"},
- )
-
- final_dim: int = field(
- default=0,
- metadata={
- "help": "project final representations and targets to this many dimensions."
- "set to encoder_embed_dim is <= 0"
- },
- )
- layer_norm_first: bool = field(
- default=False, metadata={"help": "apply layernorm first in the transformer"}
- )
- conv_feature_layers: str = field(
- default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
- metadata={
- "help": "string describing convolutional feature extraction layers in form of a python list that contains "
- "[(dim, kernel_size, stride), ...]"
- },
- )
- conv_bias: bool = field(
- default=False, metadata={"help": "include bias in conv encoder"}
- )
- logit_temp: float = field(
- default=0.1, metadata={"help": "temperature to divide logits by"}
- )
- quantize_targets: bool = field(
- default=False, metadata={"help": "use quantized targets"}
- )
- quantize_input: bool = field(
- default=False, metadata={"help": "use quantized inputs"}
- )
- same_quantizer: bool = field(
- default=False, metadata={"help": "use same quantizer for inputs and targets"}
- )
- target_glu: bool = field(
- default=False, metadata={"help": "adds projection + glu to targets"}
- )
- feature_grad_mult: float = field(
- default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
- )
- quantizer_depth: int = field(
- default=1,
- metadata={"help": "number of quantizer layers"},
- )
- quantizer_factor: int = field(
- default=3,
- metadata={
- "help": "dimensionality increase for inner quantizer layers (if depth > 1)"
- },
- )
- latent_vars: int = field(
- default=320,
- metadata={"help": "number of latent variables V in each group of the codebook"},
- )
- latent_groups: int = field(
- default=2,
- metadata={"help": "number of groups G of latent variables in the codebook"},
- )
- latent_dim: int = field(
- default=0,
- metadata={
- "help": "if > 0, uses this dimensionality for latent variables. "
- "otherwise uses final_dim / latent_groups"
- },
- )
-
- # masking
- mask_length: int = field(default=10, metadata={"help": "mask length"})
- mask_prob: float = field(
- default=0.65, metadata={"help": "probability of replacing a token with mask"}
- )
- mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
- default="static", metadata={"help": "how to choose mask length"}
- )
- mask_other: float = field(
- default=0,
- metadata={
- "help": "secondary mask argument (used for more complex distributions), "
- "see help in compute_mask_indices"
- },
- )
- no_mask_overlap: bool = field(
- default=False, metadata={"help": "whether to allow masks to overlap"}
- )
- mask_min_space: int = field(
- default=1,
- metadata={"help": "min space between spans (if no overlap is enabled)"},
- )
-
- # channel masking
- mask_channel_length: int = field(
- default=10, metadata={"help": "length of the mask for features (channels)"}
- )
- mask_channel_prob: float = field(
- default=0.0, metadata={"help": "probability of replacing a feature with 0"}
- )
- mask_channel_before: bool = False
- mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
- default="static",
- metadata={"help": "how to choose mask length for channel masking"},
- )
- mask_channel_other: float = field(
- default=0,
- metadata={
- "help": "secondary mask argument (used for more complex distributions), "
- "see help in compute_mask_indicesh"
- },
- )
- no_mask_channel_overlap: bool = field(
- default=False, metadata={"help": "whether to allow channel masks to overlap"}
- )
- mask_channel_min_space: int = field(
- default=1,
- metadata={"help": "min space between spans (if no overlap is enabled)"},
- )
-
- # negative selection
- num_negatives: int = field(
- default=100,
- metadata={"help": "number of negative examples from the same sample"},
- )
- negatives_from_everywhere: bool = field(
- default=False,
- metadata={"help": "sample negatives from everywhere, not just masked states"},
- )
- cross_sample_negatives: int = field(
- default=0, metadata={"help": "number of negative examples from the any sample"}
- )
- codebook_negatives: int = field(
- default=0, metadata={"help": "number of negative examples codebook"}
- )
-
- # positional embeddings
- conv_pos: int = field(
- default=128,
- metadata={"help": "number of filters for convolutional positional embeddings"},
- )
- conv_pos_groups: int = field(
- default=16,
- metadata={"help": "number of groups for convolutional positional embedding"},
- )
-
- latent_temp: Tuple[float, float, float] = field(
- default=(2, 0.5, 0.999995),
- metadata={
- "help": "temperature for latent variable sampling. "
- "can be tuple of 3 values (start, end, decay)"
- },
- )
-
-
-@register_model("wav2vec2", dataclass=Wav2Vec2Config)
-class Wav2Vec2Model(BaseFairseqModel):
- def __init__(self, cfg: Wav2Vec2Config):
- super().__init__()
- self.cfg = cfg
-
- feature_enc_layers = eval(cfg.conv_feature_layers)
- self.embed = feature_enc_layers[-1][0]
-
- self.feature_extractor = ConvFeatureExtractionModel(
- conv_layers=feature_enc_layers,
- dropout=0.0,
- mode=cfg.extractor_mode,
- conv_bias=cfg.conv_bias,
- )
-
- self.post_extract_proj = (
- nn.Linear(self.embed, cfg.encoder_embed_dim)
- if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
- else None
- )
-
- self.mask_prob = cfg.mask_prob
- self.mask_selection = cfg.mask_selection
- self.mask_other = cfg.mask_other
- self.mask_length = cfg.mask_length
- self.no_mask_overlap = cfg.no_mask_overlap
- self.mask_min_space = cfg.mask_min_space
-
- self.mask_channel_prob = cfg.mask_channel_prob
- self.mask_channel_before = cfg.mask_channel_before
- self.mask_channel_selection = cfg.mask_channel_selection
- self.mask_channel_other = cfg.mask_channel_other
- self.mask_channel_length = cfg.mask_channel_length
- self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
- self.mask_channel_min_space = cfg.mask_channel_min_space
-
- self.dropout_input = nn.Dropout(cfg.dropout_input)
- self.dropout_features = nn.Dropout(cfg.dropout_features)
-
- self.feature_grad_mult = cfg.feature_grad_mult
-
- self.quantizer = None
- self.input_quantizer = None
-
- self.n_negatives = cfg.num_negatives
- self.cross_sample_negatives = cfg.cross_sample_negatives
- self.codebook_negatives = cfg.codebook_negatives
- self.negatives_from_everywhere = cfg.negatives_from_everywhere
-
- self.logit_temp = cfg.logit_temp
-
- final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
-
- if cfg.quantize_targets:
- vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
- self.quantizer = GumbelVectorQuantizer(
- dim=self.embed,
- num_vars=cfg.latent_vars,
- temp=cfg.latent_temp,
- groups=cfg.latent_groups,
- combine_groups=False,
- vq_dim=vq_dim,
- time_first=True,
- weight_proj_depth=cfg.quantizer_depth,
- weight_proj_factor=cfg.quantizer_factor,
- )
- self.project_q = nn.Linear(vq_dim, final_dim)
- else:
- self.project_q = nn.Linear(self.embed, final_dim)
-
- if cfg.quantize_input:
- if cfg.same_quantizer and self.quantizer is not None:
- vq_dim = final_dim
- self.input_quantizer = self.quantizer
- else:
- vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
- self.input_quantizer = GumbelVectorQuantizer(
- dim=self.embed,
- num_vars=cfg.latent_vars,
- temp=cfg.latent_temp,
- groups=cfg.latent_groups,
- combine_groups=False,
- vq_dim=vq_dim,
- time_first=True,
- weight_proj_depth=cfg.quantizer_depth,
- weight_proj_factor=cfg.quantizer_factor,
- )
- self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
-
- self.mask_emb = nn.Parameter(
- torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
- )
-
- self.encoder = TransformerEncoder(cfg)
- self.layer_norm = LayerNorm(self.embed)
-
- self.target_glu = None
- if cfg.target_glu:
- self.target_glu = nn.Sequential(
- nn.Linear(final_dim, final_dim * 2), nn.GLU()
- )
-
- self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
-
- def upgrade_state_dict_named(self, state_dict, name):
- super().upgrade_state_dict_named(state_dict, name)
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- return state_dict
-
- @classmethod
- def build_model(cls, cfg: Wav2Vec2Config, task=None):
- """Build a new model instance."""
-
- return cls(cfg)
-
- def apply_mask(
- self,
- x,
- padding_mask,
- mask_indices=None,
- mask_channel_indices=None,
- ):
- B, T, C = x.shape
-
- if self.mask_channel_prob > 0 and self.mask_channel_before:
- mask_channel_indices = compute_mask_indices(
- (B, C),
- None,
- self.mask_channel_prob,
- self.mask_channel_length,
- self.mask_channel_selection,
- self.mask_channel_other,
- no_overlap=self.no_mask_channel_overlap,
- min_space=self.mask_channel_min_space,
- )
- mask_channel_indices = (
- torch.from_numpy(mask_channel_indices)
- .to(x.device)
- .unsqueeze(1)
- .expand(-1, T, -1)
- )
- x[mask_channel_indices] = 0
-
- if self.mask_prob > 0:
- if mask_indices is None:
- mask_indices = compute_mask_indices(
- (B, T),
- padding_mask,
- self.mask_prob,
- self.mask_length,
- self.mask_selection,
- self.mask_other,
- min_masks=2,
- no_overlap=self.no_mask_overlap,
- min_space=self.mask_min_space,
- )
- mask_indices = torch.from_numpy(mask_indices).to(x.device)
- x = index_put(x, mask_indices, self.mask_emb)
- else:
- mask_indices = None
-
- if self.mask_channel_prob > 0 and not self.mask_channel_before:
- if mask_channel_indices is None:
- mask_channel_indices = compute_mask_indices(
- (B, C),
- None,
- self.mask_channel_prob,
- self.mask_channel_length,
- self.mask_channel_selection,
- self.mask_channel_other,
- no_overlap=self.no_mask_channel_overlap,
- min_space=self.mask_channel_min_space,
- )
- mask_channel_indices = (
- torch.from_numpy(mask_channel_indices)
- .to(x.device)
- .unsqueeze(1)
- .expand(-1, T, -1)
- )
- x = index_put(x, mask_channel_indices, 0)
-
- return x, mask_indices
-
- def sample_negatives(self, y, num, padding_count=None):
-
- if self.n_negatives == 0 and self.cross_sample_negatives == 0:
- return y.new(0)
-
- bsz, tsz, fsz = y.shape
- y = y.view(-1, fsz) # BTC => (BxT)C
-
- # FIXME: what happens if padding_count is specified?
- cross_high = tsz * bsz
- high = tsz - (padding_count or 0)
- with torch.no_grad():
- assert high > 1, f"{bsz,tsz,fsz}"
-
- if self.n_negatives > 0:
- tszs = (
- buffered_arange(num)
- .unsqueeze(-1)
- .expand(-1, self.n_negatives)
- .flatten()
- )
-
- neg_idxs = torch.randint(
- low=0, high=high - 1, size=(bsz, self.n_negatives * num)
- )
- neg_idxs[neg_idxs >= tszs] += 1
-
- if self.cross_sample_negatives > 0:
- tszs = (
- buffered_arange(num)
- .unsqueeze(-1)
- .expand(-1, self.cross_sample_negatives)
- .flatten()
- )
-
- cross_neg_idxs = torch.randint(
- low=0,
- high=cross_high - 1,
- size=(bsz, self.cross_sample_negatives * num),
- )
- cross_neg_idxs[cross_neg_idxs >= tszs] += 1
-
- if self.n_negatives > 0:
- for i in range(1, bsz):
- neg_idxs[i] += i * high
- else:
- neg_idxs = cross_neg_idxs
-
- if self.cross_sample_negatives > 0 and self.n_negatives > 0:
- neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
-
- negs = y[neg_idxs.view(-1)]
- negs = negs.view(
- bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
- ).permute(
- 2, 0, 1, 3
- ) # to NxBxTxC
- return negs, neg_idxs
-
- def compute_preds(self, x, y, negatives):
-
- neg_is_pos = (y == negatives).all(-1)
- y = y.unsqueeze(0)
- targets = torch.cat([y, negatives], dim=0)
-
- logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
-
- logits = logits / self.logit_temp
-
- if is_xla_tensor(logits) or neg_is_pos.any():
- fillval = -float(2 ** 30)
- if not hasattr(self, "_inftensor"):
- self._inftensor = (
- torch.tensor(fillval).to(x.device)
- if is_xla_tensor(logits)
- else float("-inf")
- )
- logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
-
- return logits
-
- def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
- """
- Computes the output length of the convolutional layers
- """
-
- def _conv_out_length(input_length, kernel_size, stride):
- return torch.floor((input_length - kernel_size) / stride + 1)
-
- conv_cfg_list = eval(self.cfg.conv_feature_layers)
-
- for i in range(len(conv_cfg_list)):
- input_lengths = _conv_out_length(
- input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
- )
-
- return input_lengths.to(torch.long)
-
- def forward(
- self,
- source,
- padding_mask=None,
- mask=True,
- features_only=False,
- layer=None,
- mask_indices=None,
- mask_channel_indices=None,
- padding_count=None,
- ):
-
- if self.feature_grad_mult > 0:
- features = self.feature_extractor(source)
- if self.feature_grad_mult != 1.0:
- features = GradMultiply.apply(features, self.feature_grad_mult)
- else:
- with torch.no_grad():
- features = self.feature_extractor(source)
-
- features_pen = features.float().pow(2).mean()
-
- features = features.transpose(1, 2)
- features = self.layer_norm(features)
- unmasked_features = features.clone()
-
- if padding_mask is not None and padding_mask.any():
- input_lengths = (1 - padding_mask.long()).sum(-1)
- # apply conv formula to get real output_lengths
- output_lengths = self._get_feat_extract_output_lengths(input_lengths)
-
- padding_mask = torch.zeros(
- features.shape[:2], dtype=features.dtype, device=features.device
- )
-
- # these two operations makes sure that all values
- # before the output lengths indices are attended to
- padding_mask[
- (
- torch.arange(padding_mask.shape[0], device=padding_mask.device),
- output_lengths - 1,
- )
- ] = 1
- padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
- else:
- padding_mask = None
-
- if self.post_extract_proj is not None:
- features = self.post_extract_proj(features)
-
- features = self.dropout_input(features)
- unmasked_features = self.dropout_features(unmasked_features)
-
- num_vars = None
- code_ppl = None
- prob_ppl = None
- curr_temp = None
-
- if self.input_quantizer:
- q = self.input_quantizer(features, produce_targets=False)
- features = q["x"]
- num_vars = q["num_vars"]
- code_ppl = q["code_perplexity"]
- prob_ppl = q["prob_perplexity"]
- curr_temp = q["temp"]
- features = self.project_inp(features)
-
- if mask:
- x, mask_indices = self.apply_mask(
- features,
- padding_mask,
- mask_indices=mask_indices,
- mask_channel_indices=mask_channel_indices,
- )
- if not is_xla_tensor(x) and mask_indices is not None:
- # tpu-comment: reducing the size in a dynamic way causes
- # too many recompilations on xla.
- y = unmasked_features[mask_indices].view(
- unmasked_features.size(0), -1, unmasked_features.size(-1)
- )
- else:
- y = unmasked_features
- else:
- x = features
- y = unmasked_features
- mask_indices = None
-
- x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer)
-
- if features_only:
- return {
- "x": x,
- "padding_mask": padding_mask,
- "features": unmasked_features,
- "layer_results": layer_results,
- }
-
- if self.quantizer:
- q = self.quantizer(y, produce_targets=False)
- y = q["x"]
- num_vars = q["num_vars"]
- code_ppl = q["code_perplexity"]
- prob_ppl = q["prob_perplexity"]
- curr_temp = q["temp"]
-
- y = self.project_q(y)
-
- if self.negatives_from_everywhere:
- neg_cands = self.quantizer(unmasked_features, produce_targets=False)[
- "x"
- ]
- negs, _ = self.sample_negatives(
- neg_cands,
- y.size(1),
- padding_count=padding_count,
- )
- negs = self.project_q(negs)
-
- else:
- negs, _ = self.sample_negatives(
- y,
- y.size(1),
- padding_count=padding_count,
- )
-
- if self.codebook_negatives > 0:
- cb_negs = self.quantizer.sample_from_codebook(
- y.size(0) * y.size(1), self.codebook_negatives
- )
- cb_negs = cb_negs.view(
- self.codebook_negatives, y.size(0), y.size(1), -1
- ) # order doesnt matter
- cb_negs = self.project_q(cb_negs)
- negs = torch.cat([negs, cb_negs], dim=0)
- else:
- y = self.project_q(y)
-
- if self.negatives_from_everywhere:
- negs, _ = self.sample_negatives(
- unmasked_features,
- y.size(1),
- padding_count=padding_count,
- )
- negs = self.project_q(negs)
- else:
- negs, _ = self.sample_negatives(
- y,
- y.size(1),
- padding_count=padding_count,
- )
-
- if not is_xla_tensor(x):
- # tpu-comment: reducing the size in a dynamic way causes
- # too many recompilations on xla.
- x = x[mask_indices].view(x.size(0), -1, x.size(-1))
-
- if self.target_glu:
- y = self.target_glu(y)
- negs = self.target_glu(negs)
-
- x = self.final_proj(x)
- x = self.compute_preds(x, y, negs)
-
- result = {
- "x": x,
- "padding_mask": padding_mask,
- "features_pen": features_pen,
- }
-
- if prob_ppl is not None:
- result["prob_perplexity"] = prob_ppl
- result["code_perplexity"] = code_ppl
- result["num_vars"] = num_vars
- result["temp"] = curr_temp
-
- return result
-
- def quantize(self, x):
- assert self.quantizer is not None
- x = self.feature_extractor(x)
- x = x.transpose(1, 2)
- x = self.layer_norm(x)
- return self.quantizer.forward_idx(x)
-
- def extract_features(self, source, padding_mask, mask=False, layer=None):
- res = self.forward(
- source, padding_mask, mask=mask, features_only=True, layer=layer
- )
- return res
-
- def get_logits(self, net_output):
- logits = net_output["x"]
- logits = logits.transpose(0, 2)
- logits = logits.reshape(-1, logits.size(-1))
- return logits
-
- def get_targets(self, sample, net_output, expand_steps=True):
- x = net_output["x"]
- return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
-
- def get_extra_losses(self, net_output):
- pen = []
-
- if "prob_perplexity" in net_output:
- pen.append(
- (net_output["num_vars"] - net_output["prob_perplexity"])
- / net_output["num_vars"]
- )
-
- if "features_pen" in net_output:
- pen.append(net_output["features_pen"])
-
- return pen
-
- def remove_pretraining_modules(self):
- self.quantizer = None
- self.project_q = None
- self.target_glu = None
- self.final_proj = None
-
-
-class ConvFeatureExtractionModel(nn.Module):
- def __init__(
- self,
- conv_layers: List[Tuple[int, int, int]],
- dropout: float = 0.0,
- mode: str = "default",
- conv_bias: bool = False,
- ):
- super().__init__()
-
- assert mode in {"default", "layer_norm"}
-
- def block(
- n_in,
- n_out,
- k,
- stride,
- is_layer_norm=False,
- is_group_norm=False,
- conv_bias=False,
- ):
- def make_conv():
- conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
- nn.init.kaiming_normal_(conv.weight)
- return conv
-
- assert (
- is_layer_norm and is_group_norm
- ) == False, "layer norm and group norm are exclusive"
-
- if is_layer_norm:
- return nn.Sequential(
- make_conv(),
- nn.Dropout(p=dropout),
- nn.Sequential(
- TransposeLast(),
- Fp32LayerNorm(dim, elementwise_affine=True),
- TransposeLast(),
- ),
- nn.GELU(),
- )
- elif is_group_norm:
- return nn.Sequential(
- make_conv(),
- nn.Dropout(p=dropout),
- Fp32GroupNorm(dim, dim, affine=True),
- nn.GELU(),
- )
- else:
- return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
-
- in_d = 1
- self.conv_layers = nn.ModuleList()
- for i, cl in enumerate(conv_layers):
- assert len(cl) == 3, "invalid conv definition: " + str(cl)
- (dim, k, stride) = cl
-
- self.conv_layers.append(
- block(
- in_d,
- dim,
- k,
- stride,
- is_layer_norm=mode == "layer_norm",
- is_group_norm=mode == "default" and i == 0,
- conv_bias=conv_bias,
- )
- )
- in_d = dim
-
- def forward(self, x):
-
- # BxT -> BxCxT
- x = x.unsqueeze(1)
-
- for conv in self.conv_layers:
- x = conv(x)
-
- return x
-
-
-class TransformerEncoder(nn.Module):
- def __init__(self, args):
- super().__init__()
-
- self.dropout = args.dropout
- self.embedding_dim = args.encoder_embed_dim
-
- self.pos_conv = nn.Conv1d(
- self.embedding_dim,
- self.embedding_dim,
- kernel_size=args.conv_pos,
- padding=args.conv_pos // 2,
- groups=args.conv_pos_groups,
- )
- dropout = 0
- std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
- nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
- nn.init.constant_(self.pos_conv.bias, 0)
-
- self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
- self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
-
- self.layers = nn.ModuleList(
- [
- TransformerSentenceEncoderLayer(
- embedding_dim=self.embedding_dim,
- ffn_embedding_dim=args.encoder_ffn_embed_dim,
- num_attention_heads=args.encoder_attention_heads,
- dropout=self.dropout,
- attention_dropout=args.attention_dropout,
- activation_dropout=args.activation_dropout,
- activation_fn=args.activation_fn,
- layer_norm_first=args.layer_norm_first,
- )
- for _ in range(args.encoder_layers)
- ]
- )
-
- self.layer_norm_first = args.layer_norm_first
- self.layer_norm = LayerNorm(self.embedding_dim)
- self.layerdrop = args.encoder_layerdrop
-
- self.apply(init_bert_params)
-
- def forward(self, x, padding_mask=None, layer=None):
- x, layer_results = self.extract_features(x, padding_mask, layer)
-
- if self.layer_norm_first and layer is None:
- x = self.layer_norm(x)
-
- return x, layer_results
-
- def extract_features(self, x, padding_mask=None, tgt_layer=None):
-
- if padding_mask is not None:
- x = index_put(x, padding_mask, 0)
-
- x_conv = self.pos_conv(x.transpose(1, 2))
- x_conv = x_conv.transpose(1, 2)
- x = x + x_conv
-
- if not self.layer_norm_first:
- x = self.layer_norm(x)
-
- x = F.dropout(x, p=self.dropout, training=self.training)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- layer_results = []
- r = None
- for i, layer in enumerate(self.layers):
- dropout_probability = np.random.random()
- if not self.training or (dropout_probability > self.layerdrop):
- x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
- if tgt_layer is not None:
- layer_results.append((x, z))
- if i == tgt_layer:
- r = x
- break
-
- if r is not None:
- x = r
-
- # T x B x C -> B x T x C
- x = x.transpose(0, 1)
-
- return x, layer_results
-
- def max_positions(self):
- """Maximum output length supported by the encoder."""
- return self.args.max_positions
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- return state_dict
-
-
-class TransformerSentenceEncoderLayer(nn.Module):
- """
- Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
- models.
- """
-
- def __init__(
- self,
- embedding_dim: float = 768,
- ffn_embedding_dim: float = 3072,
- num_attention_heads: float = 8,
- dropout: float = 0.1,
- attention_dropout: float = 0.1,
- activation_dropout: float = 0.1,
- activation_fn: str = "relu",
- layer_norm_first: bool = False,
- ) -> None:
-
- super().__init__()
- # Initialize parameters
- self.embedding_dim = embedding_dim
- self.dropout = dropout
- self.activation_dropout = activation_dropout
-
- # Initialize blocks
- self.activation_fn = utils.get_activation_fn(activation_fn)
- self.self_attn = MultiheadAttention(
- self.embedding_dim,
- num_attention_heads,
- dropout=attention_dropout,
- self_attention=True,
- )
-
- self.dropout1 = nn.Dropout(dropout)
- self.dropout2 = nn.Dropout(self.activation_dropout)
- self.dropout3 = nn.Dropout(dropout)
-
- self.layer_norm_first = layer_norm_first
-
- # layer norm associated with the self attention layer
- self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
- self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
- self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
-
- # layer norm associated with the position wise feed-forward NN
- self.final_layer_norm = LayerNorm(self.embedding_dim)
-
- def forward(
- self,
- x: torch.Tensor,
- self_attn_mask: torch.Tensor = None,
- self_attn_padding_mask: torch.Tensor = None,
- need_weights: bool = False,
- att_args=None,
- ):
- """
- LayerNorm is applied either before or after the self-attention/ffn
- modules similar to the original Transformer imlementation.
- """
- residual = x
-
- if self.layer_norm_first:
- x = self.self_attn_layer_norm(x)
- x, attn = self.self_attn(
- query=x,
- key=x,
- value=x,
- key_padding_mask=self_attn_padding_mask,
- attn_mask=self_attn_mask,
- )
- x = self.dropout1(x)
- x = residual + x
-
- residual = x
- x = self.final_layer_norm(x)
- x = self.activation_fn(self.fc1(x))
- x = self.dropout2(x)
- x = self.fc2(x)
- x = self.dropout3(x)
- x = residual + x
- else:
- x, attn = self.self_attn(
- query=x,
- key=x,
- value=x,
- key_padding_mask=self_attn_padding_mask,
- )
-
- x = self.dropout1(x)
- x = residual + x
-
- x = self.self_attn_layer_norm(x)
-
- residual = x
- x = self.activation_fn(self.fc1(x))
- x = self.dropout2(x)
- x = self.fc2(x)
- x = self.dropout3(x)
- x = residual + x
- x = self.final_layer_norm(x)
-
- return x, attn
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/README.md
deleted file mode 100644
index 253c8af2516580bbc33e8ecc8efe4f7a526d7142..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/README.md
+++ /dev/null
@@ -1,376 +0,0 @@
-# wav2vec 2.0
-
-wav2vec 2.0 learns speech representations on unlabeled data as described in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations (Baevski et al., 2020)](https://arxiv.org/abs/2006.11477).
-
-We learned speech representations in multiple languages as well in [Unsupervised Cross-lingual Representation Learning for Speech Recognition (Conneau et al., 2020)](https://arxiv.org/abs/2006.13979).
-
-We also combined wav2vec 2.0 with self-training in [Self-training and Pre-training are Complementary for Speech Recognition (Xu et al., 2020)](https://arxiv.org/abs/2010.11430).
-
-We combined speech data from multiple domains in [Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training (Hsu, et al., 2021)](https://arxiv.org/abs/2104.01027)
-
-## Pre-trained models
-
-Model | Finetuning split | Dataset | Model
-|---|---|---|---
-Wav2Vec 2.0 Base | No finetuning | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt)
-Wav2Vec 2.0 Base | 10 minutes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_10m.pt)
-Wav2Vec 2.0 Base | 100 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_100h.pt)
-Wav2Vec 2.0 Base | 960 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_960h.pt)
-Wav2Vec 2.0 Large | No finetuning | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/libri960_big.pt)
-Wav2Vec 2.0 Large | 10 minutes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_10m.pt)
-Wav2Vec 2.0 Large | 100 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_100h.pt)
-Wav2Vec 2.0 Large | 960 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_960h.pt)
-Wav2Vec 2.0 Large (LV-60)* | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt)
-Wav2Vec 2.0 Large (LV-60)* | 10 minutes | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m_new.pt)
-Wav2Vec 2.0 Large (LV-60)* | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h_new.pt)
-Wav2Vec 2.0 Large (LV-60)* | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec2_vox_960h_new.pt)
-Wav2Vec 2.0 Large (LV-60) + Self Training * | 10 minutes | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m_pl.pt)
-Wav2Vec 2.0 Large (LV-60) + Self Training * | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h_pl.pt)
-Wav2Vec 2.0 Large (LV-60) + Self Training * | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt)
-Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH) ** | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) + [CommonVoice](https://commonvoice.mozilla.org/en/languages) + [Switchboard](https://catalog.ldc.upenn.edu/LDC97S62) + [Fisher](https://catalog.ldc.upenn.edu/LDC2004T19) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv.pt)
-Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH) ** | 960 hours Librispeech | [Libri-Light](https://github.com/facebookresearch/libri-light) + [CommonVoice](https://commonvoice.mozilla.org/en/languages) + [Switchboard](https://catalog.ldc.upenn.edu/LDC97S62) + [Fisher](https://catalog.ldc.upenn.edu/LDC2004T19) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv_ftls960.pt)
-Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH) ** | 300 hours Switchboard | [Libri-Light](https://github.com/facebookresearch/libri-light) + [CommonVoice](https://commonvoice.mozilla.org/en/languages) + [Switchboard](https://catalog.ldc.upenn.edu/LDC97S62) + [Fisher](https://catalog.ldc.upenn.edu/LDC2004T19) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv_ftsb300.pt)
-
-\* updated (Oct. 24, 2020)\
-** updated (Jul. 8, 2021)
-
-We also release multilingual pre-trained wav2vec 2.0 (XLSR) models:
-
-Model | Architecture | Hours | Languages | Datasets | Model
-|---|---|---|---|---|---
-XLSR-53 | Large | 56k | 53 | MLS, CommonVoice, BABEL | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt)
-
-The XLSR model uses the following datasets for multilingual pretraining:
-
-* **[MLS: Multilingual LibriSpeech](https://indico2.conference4me.psnc.pl/event/35/contributions/3585/attachments/1060/1101/Wed-2-6-10.pdf)** (8 languages, 50.7k hours): *Dutch, English, French, German, Italian, Polish, Portuguese, Spanish*
-
-* **[CommonVoice](https://commonvoice.mozilla.org/en/languages)** (36 languages, 3.6k hours): *Arabic, Basque, Breton, Chinese (CN), Chinese (HK), Chinese (TW), Chuvash, Dhivehi, Dutch, English, Esperanto, Estonian, French, German, Hakh-Chin, Indonesian, Interlingua, Irish, Italian, Japanese, Kabyle, Kinyarwanda, Kyrgyz, Latvian, Mongolian, Persian, Portuguese, Russian, Sakha, Slovenian, Spanish, Swedish, Tamil, Tatar, Turkish, Welsh* (see also [finetuning splits]([https://dl.fbaipublicfiles.com/cpc_audio/common_voices_splits.tar.gz]) from [this paper](https://arxiv.org/abs/2002.02848)).
-
-* **[Babel](https://catalog.ldc.upenn.edu/byyear)** (17 languages, 1.7k hours): *Assamese, Bengali, Cantonese, Cebuano, Georgian, Haitian, Kazakh, Kurmanji, Lao, Pashto, Swahili, Tagalog, Tamil, Tok, Turkish, Vietnamese, Zulu*
-
-
-## Training a new model with the CLI tools
-
-Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate file 10 to 30 seconds in length)
-
-### Prepare training data manifest:
-
-First, install the `soundfile` library:
-```shell script
-pip install soundfile
-```
-
-Next, run:
-
-```shell script
-$ python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext $ext --valid-percent $valid
-```
-
-$ext should be set to flac, wav, or whatever format your dataset happens to use that soundfile can read.
-
-$valid should be set to some reasonable percentage (like 0.01) of training data to use for validation.
-To use a pre-defined validation set (like dev-other from librispeech), set to it 0 and then overwrite valid.tsv with a
-separately pre-processed manifest file.
-
-### Train a wav2vec 2.0 base model:
-
-This configuration was used for the base model trained on the Librispeech dataset in the wav2vec 2.0 paper
-
-Note that the input is expected to be single channel, sampled at 16 kHz
-
-```shell script
-$ fairseq-hydra-train \
- task.data=/path/to/data \
- --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \
- --config-name wav2vec2_base_librispeech
-```
-
-Note: you can simulate 64 GPUs by using k GPUs and adding command line parameters (before `--config-dir`)
-`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 64/k
-
-### Train a wav2vec 2.0 large model:
-
-This configuration was used for the large model trained on the Libri-light dataset in the wav2vec 2.0 paper
-
-```shell script
-$ fairseq-hydra-train \
- task.data=/path/to/data \
- --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \
- --config-name wav2vec2_large_librivox
-```
-
-Note: you can simulate 128 GPUs by using k GPUs and adding command line parameters (before `--config-dir`)
-`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 128/k
-
-### Fine-tune a pre-trained model with CTC:
-
-Fine-tuning a model requires parallel audio and labels file, as well as a vocabulary file in fairseq format.
-A letter vocabulary can be downloaded [here](https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt).
-An example [script](libri_labels.py) that generates labels for the Librispeech dataset from the tsv file produced by wav2vec_manifest.py can be used as follows:
-
-```shell script
-split=train
-$ python libri_labels.py /path/to/tsv --output-dir /output/dir --output-name $split
-```
-
-Fine-tuning on 100h of Librispeech with letter targets:
-```shell script
-$ fairseq-hydra-train \
- distributed_training.distributed_port=$PORT \
- task.data=/path/to/data \
- model.w2v_path=/path/to/model.pt \
- --config-dir /path/to/fairseq-py/examples/wav2vec/config/finetuning \
- --config-name base_100h
-```
-
-There are other config files in the config/finetuning directory that can be used to fine-tune on other splits.
-You can specify the right config via the `--config-name` parameter.
-
-Note: you can simulate 24 GPUs by using k GPUs and adding command line parameters (before `--config-dir`)
-`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 24/k
-
-Decoding with a language model during training requires flashlight [python bindings](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) (previously called [wav2letter](https://github.com/facebookresearch/wav2letter).
-If you want to use a language model, add `+criterion.wer_args='[/path/to/kenlm, /path/to/lexicon, 2, -1]'` to the command line.
-
-### Evaluating a CTC model:
-
-Evaluating a CTC model with a language model requires [flashlight python bindings](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) (previously called [wav2letter](https://github.com/facebookresearch/wav2letter) to be installed.
-
-Fairseq transformer language model used in the wav2vec 2.0 paper can be obtained from the [wav2letter model repository](https://github.com/facebookresearch/wav2letter/tree/master/recipes/sota/2019).
-Be sure to upper-case the language model vocab after downloading it.
-
-Letter dictionary for pre-trained models can be found [here](https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt).
-
-Next, run the evaluation command:
-
-```shell script
-$subset=dev_other
-python examples/speech_recognition/infer.py /checkpoint/abaevski/data/speech/libri/10h/wav2vec/raw --task audio_finetuning \
---nbest 1 --path /path/to/model --gen-subset $subset --results-path /path/to/save/results/for/sclite --w2l-decoder kenlm \
---lm-model /path/to/kenlm.bin --lm-weight 2 --word-score -1 --sil-weight 0 --criterion ctc --labels ltr --max-tokens 4000000 \
---post-process letter
-```
-
-To get raw numbers, use --w2l-decoder viterbi and omit the lexicon. To use the transformer language model, use --w2l-decoder fairseqlm.
-
-## Use wav2vec 2.0 with 🤗Transformers:
-
-Wav2Vec2 is also available in the [🤗Transformers library](https://github.com/huggingface/transformers) since version 4.4.
-
-Pretrained Models can be found on the [hub](https://huggingface.co/models?filter=wav2vec2)
-and documentation can be found [here](https://huggingface.co/transformers/master/model_doc/wav2vec2.html).
-
-Usage example:
-
-```python
-# !pip install transformers
-# !pip install datasets
-import soundfile as sf
-import torch
-from datasets import load_dataset
-from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
-
-# load pretrained model
-processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
-model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
-
-
-librispeech_samples_ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
-
-# load audio
-audio_input, sample_rate = sf.read(librispeech_samples_ds[0]["file"])
-
-# pad input values and return pt tensor
-input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values
-
-# INFERENCE
-
-# retrieve logits & take argmax
-logits = model(input_values).logits
-predicted_ids = torch.argmax(logits, dim=-1)
-
-# transcribe
-transcription = processor.decode(predicted_ids[0])
-
-# FINE-TUNE
-
-target_transcription = "A MAN SAID TO THE UNIVERSE I EXIST"
-
-# encode labels
-with processor.as_target_processor():
- labels = processor(target_transcription, return_tensors="pt").input_ids
-
-# compute loss by passing labels
-loss = model(input_values, labels=labels).loss
-loss.backward()
-```
-
-# wav2vec
-
-Example to train a wav2vec model as described in [wav2vec: Unsupervised Pre-training for Speech Recognition (Schneider et al., 2019)](https://arxiv.org/abs/1904.05862).
-
-## Pre-trained models
-
-Description | Dataset | Model
----|---|---
-Wav2Vec large | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_large.pt)
-
-#### Example usage:
-```python
-import torch
-import fairseq
-
-cp_path = '/path/to/wav2vec.pt'
-model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
-model = model[0]
-model.eval()
-
-wav_input_16khz = torch.randn(1,10000)
-z = model.feature_extractor(wav_input_16khz)
-c = model.feature_aggregator(z)
-```
-
-## Training a new model with the CLI tools
-
-Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate files 10 to 30 seconds in length)
-
-### Prepare training data manifest:
-
-```
-$ python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext wav
-```
-
-### Train a wav2vec model:
-
-```
-$ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \
---arch wav2vec --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \
---conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \
---conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \
---skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \
---max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test
-```
-
-### Run wav2vec2 pre-training on Google Cloud TPUs:
-
-Wav2Vec2 is now supported on TPUs! It's currently pre-training only.
-
-#### Using hydra on a v3-8:
-
-```
-$ OMP_NUM_THREADS=1 fairseq-hydra-train \
- task.data=/manifest/path \
- --config-dir /PATH/TO/FAIRSEQ/examples/wav2vec/config/pretraining \
- --config-name wav2vec2_large_librivox_tpu.yaml
-```
-
-#### Using command line arguments on a v3-8:
-Note: Commandline arguments way of execution has a [known-problem](https://github.com/pytorch/fairseq/issues/3741) currently.
-
-```
-$ OMP_NUM_THREADS=1 python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \
---arch wav2vec2 --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \
---conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \
---conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \
---skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \
---max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test \
---tpu --distributed-world-size 8 --num-batch-buckets 3 --enable-padding \
---encoder-layerdrop 0 --mask-channel-prob 0.1
-```
-
-#### Using hydra on a pod slice (v3-N with N > 8):
-
-```
-$ OMP_NUM_THREADS=1 fairseq-hydra-train \
- task.data=/manifest/path \
- --config-dir /PATH/TO/FAIRSEQ/examples/wav2vec/config/pretraining \
- --config-name wav2vec2_large_librivox_tpu-pod.yaml # edit distributed-world-size accordingly
-```
-
-#### Using command line arguments on a pod slice (v3-N with N > 8):
-Note: Commandline arguments way of execution has a [known-problem](https://github.com/pytorch/fairseq/issues/3741) currently.
-
-```
-$ python -m torch_xla.distributed.xla_dist \
- --tpu ${TPUNAME} --conda-env=torch-xla-${TORCH_XLA_VERSION} --env OMP_NUM_THREADS=1 \
- -- \
-python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \
---arch wav2vec2 --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \
---conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \
---conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \
---skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \
---max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test \
---tpu --distributed-world-size ${WORLD_SIZE} --num-batch-buckets 3 --enable-padding \
---encoder-layerdrop 0 --mask-channel-prob 0.1
-```
-
-### Extract embeddings from the downstream task data:
-
-```
-$ PYTHONPATH=/path/to/fairseq python examples/wav2vec/wav2vec_featurize.py --input /path/to/task/waves --output /path/to/output \
---model /model/path/checkpoint_best.pt --split train valid test
-```
-
-# vq-wav2vec
-
-Example to train a vq-wav2vec model as described in [vq-wav2vec: Self-Supervised Learning of Discrete Speech Representations (Baevski et al., 2019)](https://arxiv.org/abs/1910.05453).
-
-These models are also used in [Effectiveness of self-supervised pre-training for speech recognition (Baevski et al., 2019)](https://arxiv.org/abs/1911.03912).
-
-## Pre-trained models
-
-Description | Dataset | Model
----|---|---
-vq-wav2vec Gumbel | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec.pt)
-vq-wav2vec K-means | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt)
-Roberta on K-means codes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/bert_kmeans.tar)
-
-#### Example usage:
-```python
-import torch
-import fairseq
-
-cp = torch.load('/path/to/vq-wav2vec.pt')
-model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp])
-model = model[0]
-model.eval()
-
-wav_input_16khz = torch.randn(1,10000)
-z = model.feature_extractor(wav_input_16khz)
-_, idxs = model.vector_quantizer.forward_idx(z)
-print(idxs.shape) # output: torch.Size([1, 60, 2]), 60 timesteps with 2 indexes corresponding to 2 groups in the model
-```
-
-## Training a new model with the CLI tools
-
-Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate file 10 to 30 seconds in length)
-
-### Prepare training data manifest:
-
-```
-$ python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext wav
-```
-
-### Train a gumbel vq-wav2vec model:
-
-```
-$ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 \
---save-interval 1 --no-epoch-checkpoints --arch wav2vec --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 \
---optimizer adam --lr 1e-05 --lr-scheduler cosine \
---conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)] \
---conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \
---activation gelu --offset auto --skip-connections-agg --residual-scale 0.5 \
---log-keys ["prob_perplexity","code_perplexity","temp"] --vq-type gumbel --vq-groups 2 --vq-depth 2 \
---combine-groups --vq-vars 320 --vq-temp (2,0.5,0.999995) --prediction-steps 12 --warmup-updates 1000 \
---warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 --max-sample-size 150000 \
---max-tokens 300000 --cross-sample-negatives 0 --update-freq 1 --seed 2 --skip-invalid-size-inputs-valid-test
-```
-
-for k-means training, set vq-type with "kmeans" and add --loss-weights [1] argument. Pre-trained models were trained on 16 GPUs.
-
-### Tokenize audio data (e.g. for BERT training):
-
-```
-$ PYTHONPATH=/path/to/fairseq python examples/wav2vec/vq-wav2vec_featurize.py --data-dir /manifest/path --output-dir /path/to/output \
---checkpoint /model/path/checkpoint_best.pt --split train valid test --extension tsv
-```
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adafactor.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adafactor.py
deleted file mode 100644
index c969b9fbc0d229a25f2046ec67c53c57a433814b..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adafactor.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.optim
-
-from . import LegacyFairseqOptimizer, register_optimizer
-
-
-@register_optimizer("adafactor")
-class FairseqAdafactor(LegacyFairseqOptimizer):
- def __init__(self, args, params):
- super().__init__(args)
- self._optimizer = Adafactor(params, **self.optimizer_config)
-
- @staticmethod
- def add_args(parser):
- """Add optimizer-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E",
- help='epsilons for Adafactor optimizer')
- parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C",
- help='threshold for clipping update root mean square')
- parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D",
- help='decay rate of the second moment estimator')
- parser.add_argument('--beta1', type=float, default=None, metavar="B",
- help='beta for first moment estimator. Optional')
- parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
- help='weight decay')
- parser.add_argument('--scale-parameter', action='store_true',
- help='scale learning rate by root mean square of parameter')
- parser.add_argument('--relative-step', action='store_true',
- help='set learning rate to inverse square root of timestep,'
- 'otherwise use external learning rate')
- parser.add_argument('--warmup-init', action='store_true',
- help='use relative step for warm-up learning rate schedule')
- # fmt: on
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- Note : Convergence issues empirically observed with fp16 on.
- Might require search for appropriate configuration.
- """
- return {
- "lr": self.args.lr[0],
- "eps": eval(self.args.adafactor_eps),
- "clip_threshold": self.args.clip_threshold,
- "decay_rate": self.args.decay_rate,
- "beta1": self.args.beta1,
- "weight_decay": self.args.weight_decay,
- "scale_parameter": self.args.scale_parameter, # defaults to False
- "relative_step": self.args.relative_step, # defaults to False
- "warmup_init": self.args.warmup_init,
- }
-
-
-class Adafactor(torch.optim.Optimizer):
- """Implements Adafactor algorithm.
-
- This implementation is based on:
- `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
- (see https://arxiv.org/abs/1804.04235)
-
- Note that this optimizer internally adjusts the learning rate
- depending on the *scale_parameter*, *relative_step* and
- *warmup_init* options. To use a manual (external) learning rate
- schedule you should set `scale_parameter=False` and
- `relative_step=False`.
-
- Args:
- params (iterable): iterable of parameters to optimize or dicts defining
- parameter groups
- lr (float, optional): external learning rate (default: None)
- eps (tuple[float, float]): regularization constans for square gradient
- and parameter scale respectively (default: (1e-30, 1e-3))
- clip_threshold (float): threshold of root mean square of
- final gradient update (default: 1.0)
- decay_rate (float): coefficient used to compute running averages of square
- gradient (default: -0.8)
- beta1 (float): coefficient used for computing running averages of gradient
- (default: None)
- weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
- scale_parameter (bool): if True, learning rate is scaled by root mean square of
- parameter (default: True)
- relative_step (bool): if True, time-dependent learning rate is computed
- instead of external learning rate (default: True)
- warmup_init (bool): time-dependent learning rate computation depends on
- whether warm-up initialization is being used (default: False)
- """
-
- def __init__(
- self,
- params,
- lr=None,
- eps=(1e-30, 1e-3),
- clip_threshold=1.0,
- decay_rate=-0.8,
- beta1=None,
- weight_decay=0.0,
- scale_parameter=True,
- relative_step=True,
- warmup_init=False,
- ):
- if lr is not None and relative_step:
- raise ValueError("Cannot combine manual lr and relative_step options")
- if warmup_init and not relative_step:
- raise ValueError("warmup_init requires relative_step=True")
-
- defaults = dict(
- lr=lr,
- eps=eps,
- clip_threshold=clip_threshold,
- decay_rate=decay_rate,
- beta1=beta1,
- weight_decay=weight_decay,
- scale_parameter=scale_parameter,
- relative_step=relative_step,
- warmup_init=warmup_init,
- )
- super(Adafactor, self).__init__(params, defaults)
-
- @property
- def supports_memory_efficient_fp16(self):
- return True
-
- @property
- def supports_flat_params(self):
- return False
-
- def _get_lr(self, param_group, param_state):
- rel_step_sz = param_group["lr"]
- if param_group["relative_step"]:
- min_step = (
- 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
- )
- rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
- param_scale = 1.0
- if param_group["scale_parameter"]:
- param_scale = max(param_group["eps"][1], param_state["RMS"])
- return param_scale * rel_step_sz
-
- def _get_options(self, param_group, param_shape):
- factored = len(param_shape) >= 2
- use_first_moment = param_group["beta1"] is not None
- return factored, use_first_moment
-
- def _rms(self, tensor):
- return tensor.norm(2) / (tensor.numel() ** 0.5)
-
- def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
- r_factor = (
- (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))
- .rsqrt_()
- .unsqueeze(-1)
- )
- c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
- return torch.mul(r_factor, c_factor)
-
- def step(self, closure=None):
- """Performs a single optimization step.
-
- Args:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
- for p in group["params"]:
- if p.grad is None:
- continue
- grad = p.grad.data
- if grad.dtype in {torch.float16, torch.bfloat16}:
- grad = grad.float()
- if grad.is_sparse:
- raise RuntimeError("Adafactor does not support sparse gradients.")
-
- state = self.state[p]
- grad_shape = grad.shape
-
- factored, use_first_moment = self._get_options(group, grad_shape)
- # State Initialization
- if len(state) == 0:
- state["step"] = 0
-
- if use_first_moment:
- # Exponential moving average of gradient values
- state["exp_avg"] = torch.zeros_like(grad)
- if factored:
- state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
- state["exp_avg_sq_col"] = torch.zeros(
- grad_shape[:-2] + grad_shape[-1:]
- ).to(grad)
- else:
- state["exp_avg_sq"] = torch.zeros_like(grad)
-
- state["RMS"] = 0
- else:
- if use_first_moment:
- state["exp_avg"] = state["exp_avg"].to(grad)
- if factored:
- state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
- state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
- else:
- state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
-
- p_data_fp32 = p.data
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p_data_fp32 = p_data_fp32.float()
-
- state["step"] += 1
- state["RMS"] = self._rms(p_data_fp32)
- group["lr"] = self._get_lr(group, state)
-
- beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
- update = (grad ** 2) + group["eps"][0]
- if factored:
- exp_avg_sq_row = state["exp_avg_sq_row"]
- exp_avg_sq_col = state["exp_avg_sq_col"]
-
- exp_avg_sq_row.mul_(beta2t).add_(
- update.mean(dim=-1), alpha=1.0 - beta2t
- )
- exp_avg_sq_col.mul_(beta2t).add_(
- update.mean(dim=-2), alpha=1.0 - beta2t
- )
-
- # Approximation of exponential moving average of square of gradient
- update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
- update.mul_(grad)
- else:
- exp_avg_sq = state["exp_avg_sq"]
-
- exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
- update = exp_avg_sq.rsqrt().mul_(grad)
-
- update.div_(
- (self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)
- )
- update.mul_(group["lr"])
-
- if use_first_moment:
- exp_avg = state["exp_avg"]
- exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"])
- update = exp_avg
-
- if group["weight_decay"] != 0:
- p_data_fp32.add_(
- p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
- )
-
- p_data_fp32.add_(-update)
-
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p.data.copy_(p_data_fp32)
-
- return loss
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_ema.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_ema.py
deleted file mode 100644
index 88ea65a434e49775d40f2b08ce6df0f8d9929c18..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_ema.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-from copy import deepcopy
-from dataclasses import dataclass
-from typing import Optional
-
-import torch
-from fairseq.models.ema import EMA
-
-
-class DummyModule(torch.nn.Module):
- def __init__(self) -> None:
- """LightningModule for testing purposes
-
- Args:
- epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum
- validation loss for testing purposes (zero based). If None this is ignored. Defaults to None.
- """
- super().__init__()
- self.layer = torch.nn.Linear(in_features=32, out_features=2)
- self.another_layer = torch.nn.Linear(in_features=2, out_features=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.layer(x)
- return self.another_layer(x)
-
-
-@dataclass
-class EMAConfig(object):
- ema_decay: float = 0.99
- ema_start_update: int = 0
- ema_fp32: bool = False
- ema_seed_model: Optional[str] = None
-
-
-class TestEMAGPU(unittest.TestCase):
- def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None):
- diff = x.float() - y.float()
- diff_norm = torch.norm(diff)
- other_norm = torch.norm(y.float())
-
- if msg is None:
- msg = "|input - other| > {} + {} * |other|".format(
- atol, rtol
- )
-
- self.assertLessEqual(
- diff_norm,
- atol + rtol * other_norm,
- msg=msg,
- )
-
- def test_ema(self):
- model = DummyModule()
- optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
- state = deepcopy(model.state_dict())
- config = EMAConfig()
- ema = EMA(model, config)
-
- # set decay
- ema._set_decay(config.ema_decay)
- self.assertEqual(ema.get_decay(), config.ema_decay)
-
- # get model
- self.assertEqual(ema.get_model(), ema.model)
-
- # Since fp32 params is not used, it should be of size 0
- self.assertEqual(len(ema.fp32_params), 0)
-
- # EMA step
- x = torch.randn(32)
- y = model(x)
- loss = y.sum()
- loss.backward()
- optimizer.step()
-
- ema.step(model)
-
- ema_state_dict = ema.get_model().state_dict()
-
- for key, param in model.state_dict().items():
- prev_param = state[key]
- ema_param = ema_state_dict[key]
-
- if "version" in key:
- # Do not decay a model.version pytorch param
- continue
- self.assertTorchAllClose(
- ema_param,
- config.ema_decay * prev_param + (1 - config.ema_decay) * param,
- )
-
- # Since fp32 params is not used, it should be of size 0
- self.assertEqual(len(ema.fp32_params), 0)
-
- # Load EMA into model
- model2 = DummyModule()
- ema.reverse(model2)
-
- for key, param in model2.state_dict().items():
- ema_param = ema_state_dict[key]
- self.assertTrue(
- torch.allclose(ema_param, param)
- )
-
- def test_ema_fp32(self):
- model = DummyModule().half()
- optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
- state = deepcopy(model.state_dict())
- config = EMAConfig(ema_fp32=True)
- ema = EMA(model, config)
-
- x = torch.randn(32)
- y = model(x.half())
- loss = y.sum()
- loss.backward()
- optimizer.step()
-
- ema.step(model)
-
- for key, param in model.state_dict().items():
- prev_param = state[key]
- ema_param = ema.get_model().state_dict()[key]
-
- if "version" in key:
- # Do not decay a model.version pytorch param
- continue
- self.assertIn(key, ema.fp32_params)
-
- # EMA update is done in fp32, and hence the EMA param must be
- # closer to the EMA update done in fp32 than in fp16.
- self.assertLessEqual(
- torch.norm(
- ema_param.float() -
- (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float()
- ),
- torch.norm(
- ema_param.float() -
- (config.ema_decay * prev_param + (1 - config.ema_decay) * param).float()
- ),
- )
- self.assertTorchAllClose(
- ema_param,
- (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half(),
- )
-
- def test_ema_fp16(self):
- model = DummyModule().half()
- optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
- state = deepcopy(model.state_dict())
- config = EMAConfig(ema_fp32=False)
- ema = EMA(model, config)
-
- # Since fp32 params is not used, it should be of size 0
- self.assertEqual(len(ema.fp32_params), 0)
-
- x = torch.randn(32)
- y = model(x.half())
- loss = y.sum()
- loss.backward()
- optimizer.step()
-
- ema.step(model)
-
- for key, param in model.state_dict().items():
- prev_param = state[key]
- ema_param = ema.get_model().state_dict()[key]
-
- if "version" in key:
- # Do not decay a model.version pytorch param
- continue
-
- # EMA update is done in fp16, and hence the EMA param must be
- # closer to the EMA update done in fp16 than in fp32.
- self.assertLessEqual(
- torch.norm(
- ema_param.float() -
- (config.ema_decay * prev_param + (1 - config.ema_decay) * param).float()
- ),
- torch.norm(
- ema_param.float() -
- (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float()
- ),
- )
- self.assertTorchAllClose(
- ema_param,
- config.ema_decay * prev_param + (1 - config.ema_decay) * param,
- )
-
- # Since fp32 params is not used, it should be of size 0
- self.assertEqual(len(ema.fp32_params), 0)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_lm_context_window.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_lm_context_window.py
deleted file mode 100644
index 7415e86abdf8ddc2d797092bf98f7a1331e038d6..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_lm_context_window.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-
-import torch
-from fairseq.data import MonolingualDataset
-from fairseq.tasks.language_modeling import LanguageModelingTask, LanguageModelingConfig
-from tests import utils as test_utils
-
-
-class TestLMContextWindow(unittest.TestCase):
-
- def test_eval_dataloader(self):
- dictionary = test_utils.dummy_dictionary(10)
- assert len(dictionary) == 14 # 4 extra special symbols
- assert dictionary.pad() == 1
-
- dataset = test_utils.TestDataset([
- torch.tensor([4, 5, 6, 7], dtype=torch.long),
- torch.tensor([8, 9, 10, 11], dtype=torch.long),
- torch.tensor([12, 13], dtype=torch.long),
- ])
- dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary)
-
- config = LanguageModelingConfig(tokens_per_sample=4)
- task = LanguageModelingTask(config, dictionary)
-
- eval_dataloader = task.eval_lm_dataloader(
- dataset=dataset,
- batch_size=1,
- context_window=2,
- )
-
- batch = next(eval_dataloader)
- assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1]
- assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1]
-
- batch = next(eval_dataloader)
- assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11]
- assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11]
-
- batch = next(eval_dataloader)
- assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13]
- assert batch["target"][0].tolist() == [1, 1, 12, 13]
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/OptimalScale/Robin-33b/lmflow/models/base_model.py b/spaces/OptimalScale/Robin-33b/lmflow/models/base_model.py
deleted file mode 100644
index 335dbe963e442d735667713c80152a452970c3f6..0000000000000000000000000000000000000000
--- a/spaces/OptimalScale/Robin-33b/lmflow/models/base_model.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-"""Base model class.
-"""
-
-from abc import ABC
-
-
-class BaseModel(ABC):
-
- def __init__(self, *args, **kwargs):
- pass
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/schedules/schedule_80k.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
deleted file mode 100644
index c190cee6bdc7922b688ea75dc8f152fa15c24617..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=80000)
-checkpoint_config = dict(by_epoch=False, interval=8000)
-evaluation = dict(interval=8000, metric='mIoU')
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
deleted file mode 100644
index 722d5d8d71f75486e2db3008907c4eadfca41d63..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch.nn as nn
-
-from .conv_module import ConvModule
-
-
-class DepthwiseSeparableConvModule(nn.Module):
- """Depthwise separable convolution module.
-
- See https://arxiv.org/pdf/1704.04861.pdf for details.
-
- This module can replace a ConvModule with the conv block replaced by two
- conv block: depthwise conv block and pointwise conv block. The depthwise
- conv block contains depthwise-conv/norm/activation layers. The pointwise
- conv block contains pointwise-conv/norm/activation layers. It should be
- noted that there will be norm/activation layer in the depthwise conv block
- if `norm_cfg` and `act_cfg` are specified.
-
- Args:
- in_channels (int): Number of channels in the input feature map.
- Same as that in ``nn._ConvNd``.
- out_channels (int): Number of channels produced by the convolution.
- Same as that in ``nn._ConvNd``.
- kernel_size (int | tuple[int]): Size of the convolving kernel.
- Same as that in ``nn._ConvNd``.
- stride (int | tuple[int]): Stride of the convolution.
- Same as that in ``nn._ConvNd``. Default: 1.
- padding (int | tuple[int]): Zero-padding added to both sides of
- the input. Same as that in ``nn._ConvNd``. Default: 0.
- dilation (int | tuple[int]): Spacing between kernel elements.
- Same as that in ``nn._ConvNd``. Default: 1.
- norm_cfg (dict): Default norm config for both depthwise ConvModule and
- pointwise ConvModule. Default: None.
- act_cfg (dict): Default activation config for both depthwise ConvModule
- and pointwise ConvModule. Default: dict(type='ReLU').
- dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
- 'default', it will be the same as `norm_cfg`. Default: 'default'.
- dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
- 'default', it will be the same as `act_cfg`. Default: 'default'.
- pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
- 'default', it will be the same as `norm_cfg`. Default: 'default'.
- pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
- 'default', it will be the same as `act_cfg`. Default: 'default'.
- kwargs (optional): Other shared arguments for depthwise and pointwise
- ConvModule. See ConvModule for ref.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- norm_cfg=None,
- act_cfg=dict(type='ReLU'),
- dw_norm_cfg='default',
- dw_act_cfg='default',
- pw_norm_cfg='default',
- pw_act_cfg='default',
- **kwargs):
- super(DepthwiseSeparableConvModule, self).__init__()
- assert 'groups' not in kwargs, 'groups should not be specified'
-
- # if norm/activation config of depthwise/pointwise ConvModule is not
- # specified, use default config.
- dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
- dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
- pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
- pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
-
- # depthwise convolution
- self.depthwise_conv = ConvModule(
- in_channels,
- in_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=in_channels,
- norm_cfg=dw_norm_cfg,
- act_cfg=dw_act_cfg,
- **kwargs)
-
- self.pointwise_conv = ConvModule(
- in_channels,
- out_channels,
- 1,
- norm_cfg=pw_norm_cfg,
- act_cfg=pw_act_cfg,
- **kwargs)
-
- def forward(self, x):
- x = self.depthwise_conv(x)
- x = self.pointwise_conv(x)
- return x
diff --git a/spaces/PascalNotin/Tranception_design/README.md b/spaces/PascalNotin/Tranception_design/README.md
deleted file mode 100644
index a7ad609a7e934c727acc2de77feaac0fcd6970af..0000000000000000000000000000000000000000
--- a/spaces/PascalNotin/Tranception_design/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Tranception Design
-emoji: 🧬
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.1.7
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
deleted file mode 100644
index 722d5d8d71f75486e2db3008907c4eadfca41d63..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch.nn as nn
-
-from .conv_module import ConvModule
-
-
-class DepthwiseSeparableConvModule(nn.Module):
- """Depthwise separable convolution module.
-
- See https://arxiv.org/pdf/1704.04861.pdf for details.
-
- This module can replace a ConvModule with the conv block replaced by two
- conv block: depthwise conv block and pointwise conv block. The depthwise
- conv block contains depthwise-conv/norm/activation layers. The pointwise
- conv block contains pointwise-conv/norm/activation layers. It should be
- noted that there will be norm/activation layer in the depthwise conv block
- if `norm_cfg` and `act_cfg` are specified.
-
- Args:
- in_channels (int): Number of channels in the input feature map.
- Same as that in ``nn._ConvNd``.
- out_channels (int): Number of channels produced by the convolution.
- Same as that in ``nn._ConvNd``.
- kernel_size (int | tuple[int]): Size of the convolving kernel.
- Same as that in ``nn._ConvNd``.
- stride (int | tuple[int]): Stride of the convolution.
- Same as that in ``nn._ConvNd``. Default: 1.
- padding (int | tuple[int]): Zero-padding added to both sides of
- the input. Same as that in ``nn._ConvNd``. Default: 0.
- dilation (int | tuple[int]): Spacing between kernel elements.
- Same as that in ``nn._ConvNd``. Default: 1.
- norm_cfg (dict): Default norm config for both depthwise ConvModule and
- pointwise ConvModule. Default: None.
- act_cfg (dict): Default activation config for both depthwise ConvModule
- and pointwise ConvModule. Default: dict(type='ReLU').
- dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
- 'default', it will be the same as `norm_cfg`. Default: 'default'.
- dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
- 'default', it will be the same as `act_cfg`. Default: 'default'.
- pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
- 'default', it will be the same as `norm_cfg`. Default: 'default'.
- pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
- 'default', it will be the same as `act_cfg`. Default: 'default'.
- kwargs (optional): Other shared arguments for depthwise and pointwise
- ConvModule. See ConvModule for ref.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- norm_cfg=None,
- act_cfg=dict(type='ReLU'),
- dw_norm_cfg='default',
- dw_act_cfg='default',
- pw_norm_cfg='default',
- pw_act_cfg='default',
- **kwargs):
- super(DepthwiseSeparableConvModule, self).__init__()
- assert 'groups' not in kwargs, 'groups should not be specified'
-
- # if norm/activation config of depthwise/pointwise ConvModule is not
- # specified, use default config.
- dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
- dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
- pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
- pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
-
- # depthwise convolution
- self.depthwise_conv = ConvModule(
- in_channels,
- in_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=in_channels,
- norm_cfg=dw_norm_cfg,
- act_cfg=dw_act_cfg,
- **kwargs)
-
- self.pointwise_conv = ConvModule(
- in_channels,
- out_channels,
- 1,
- norm_cfg=pw_norm_cfg,
- act_cfg=pw_act_cfg,
- **kwargs)
-
- def forward(self, x):
- x = self.depthwise_conv(x)
- x = self.pointwise_conv(x)
- return x
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/cc_attention.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/cc_attention.py
deleted file mode 100644
index 9207aa95e6730bd9b3362dee612059a5f0ce1c5e..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/cc_attention.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from annotator.uniformer.mmcv.cnn import PLUGIN_LAYERS, Scale
-
-
-def NEG_INF_DIAG(n, device):
- """Returns a diagonal matrix of size [n, n].
-
- The diagonal are all "-inf". This is for avoiding calculating the
- overlapped element in the Criss-Cross twice.
- """
- return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0)
-
-
-@PLUGIN_LAYERS.register_module()
-class CrissCrossAttention(nn.Module):
- """Criss-Cross Attention Module.
-
- .. note::
- Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch
- to a pure PyTorch and equivalent implementation. For more
- details, please refer to https://github.com/open-mmlab/mmcv/pull/1201.
-
- Speed comparison for one forward pass
-
- - Input size: [2,512,97,97]
- - Device: 1 NVIDIA GeForce RTX 2080 Ti
-
- +-----------------------+---------------+------------+---------------+
- | |PyTorch version|CUDA version|Relative speed |
- +=======================+===============+============+===============+
- |with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x |
- +-----------------------+---------------+------------+---------------+
- |no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x |
- +-----------------------+---------------+------------+---------------+
-
- Args:
- in_channels (int): Channels of the input feature map.
- """
-
- def __init__(self, in_channels):
- super().__init__()
- self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
- self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
- self.value_conv = nn.Conv2d(in_channels, in_channels, 1)
- self.gamma = Scale(0.)
- self.in_channels = in_channels
-
- def forward(self, x):
- """forward function of Criss-Cross Attention.
-
- Args:
- x (Tensor): Input feature. \
- shape (batch_size, in_channels, height, width)
- Returns:
- Tensor: Output of the layer, with shape of \
- (batch_size, in_channels, height, width)
- """
- B, C, H, W = x.size()
- query = self.query_conv(x)
- key = self.key_conv(x)
- value = self.value_conv(x)
- energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG(
- H, query.device)
- energy_H = energy_H.transpose(1, 2)
- energy_W = torch.einsum('bchw,bchj->bhwj', query, key)
- attn = F.softmax(
- torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)]
- out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H])
- out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:])
-
- out = self.gamma(out) + x
- out = out.contiguous()
-
- return out
-
- def __repr__(self):
- s = self.__class__.__name__
- s += f'(in_channels={self.in_channels})'
- return s
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/hooks/ema.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/hooks/ema.py
deleted file mode 100644
index 15c7e68088f019802a59e7ae41cc1fe0c7f28f96..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/hooks/ema.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from ...parallel import is_module_wrapper
-from ..hooks.hook import HOOKS, Hook
-
-
-@HOOKS.register_module()
-class EMAHook(Hook):
- r"""Exponential Moving Average Hook.
-
- Use Exponential Moving Average on all parameters of model in training
- process. All parameters have a ema backup, which update by the formula
- as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.
-
- .. math::
-
- \text{Xema\_{t+1}} = (1 - \text{momentum}) \times
- \text{Xema\_{t}} + \text{momentum} \times X_t
-
- Args:
- momentum (float): The momentum used for updating ema parameter.
- Defaults to 0.0002.
- interval (int): Update ema parameter every interval iteration.
- Defaults to 1.
- warm_up (int): During first warm_up steps, we may use smaller momentum
- to update ema parameters more slowly. Defaults to 100.
- resume_from (str): The checkpoint path. Defaults to None.
- """
-
- def __init__(self,
- momentum=0.0002,
- interval=1,
- warm_up=100,
- resume_from=None):
- assert isinstance(interval, int) and interval > 0
- self.warm_up = warm_up
- self.interval = interval
- assert momentum > 0 and momentum < 1
- self.momentum = momentum**interval
- self.checkpoint = resume_from
-
- def before_run(self, runner):
- """To resume model with it's ema parameters more friendly.
-
- Register ema parameter as ``named_buffer`` to model
- """
- model = runner.model
- if is_module_wrapper(model):
- model = model.module
- self.param_ema_buffer = {}
- self.model_parameters = dict(model.named_parameters(recurse=True))
- for name, value in self.model_parameters.items():
- # "." is not allowed in module's buffer name
- buffer_name = f"ema_{name.replace('.', '_')}"
- self.param_ema_buffer[name] = buffer_name
- model.register_buffer(buffer_name, value.data.clone())
- self.model_buffers = dict(model.named_buffers(recurse=True))
- if self.checkpoint is not None:
- runner.resume(self.checkpoint)
-
- def after_train_iter(self, runner):
- """Update ema parameter every self.interval iterations."""
- curr_step = runner.iter
- # We warm up the momentum considering the instability at beginning
- momentum = min(self.momentum,
- (1 + curr_step) / (self.warm_up + curr_step))
- if curr_step % self.interval != 0:
- return
- for name, parameter in self.model_parameters.items():
- buffer_name = self.param_ema_buffer[name]
- buffer_parameter = self.model_buffers[buffer_name]
- buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data)
-
- def after_train_epoch(self, runner):
- """We load parameter values from ema backup to model before the
- EvalHook."""
- self._swap_ema_parameters()
-
- def before_train_epoch(self, runner):
- """We recover model's parameter from ema backup after last epoch's
- EvalHook."""
- self._swap_ema_parameters()
-
- def _swap_ema_parameters(self):
- """Swap the parameter of model with parameter in ema_buffer."""
- for name, value in self.model_parameters.items():
- temp = value.data.clone()
- ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
- value.data.copy_(ema_buffer.data)
- ema_buffer.data.copy_(temp)
diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/utils.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/utils.py
deleted file mode 100644
index 2b3c3d53cd2b6c72b481b59834cf809d3735b394..0000000000000000000000000000000000000000
--- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/utils.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import collections
-import os
-import tarfile
-import urllib
-import zipfile
-from pathlib import Path
-
-import numpy as np
-import torch
-from taming.data.helper_types import Annotation
-from torch._six import string_classes
-from torch.utils.data._utils.collate import np_str_obj_array_pattern, default_collate_err_msg_format
-from tqdm import tqdm
-
-
-def unpack(path):
- if path.endswith("tar.gz"):
- with tarfile.open(path, "r:gz") as tar:
- tar.extractall(path=os.path.split(path)[0])
- elif path.endswith("tar"):
- with tarfile.open(path, "r:") as tar:
- tar.extractall(path=os.path.split(path)[0])
- elif path.endswith("zip"):
- with zipfile.ZipFile(path, "r") as f:
- f.extractall(path=os.path.split(path)[0])
- else:
- raise NotImplementedError(
- "Unknown file extension: {}".format(os.path.splitext(path)[1])
- )
-
-
-def reporthook(bar):
- """tqdm progress bar for downloads."""
-
- def hook(b=1, bsize=1, tsize=None):
- if tsize is not None:
- bar.total = tsize
- bar.update(b * bsize - bar.n)
-
- return hook
-
-
-def get_root(name):
- base = "data/"
- root = os.path.join(base, name)
- os.makedirs(root, exist_ok=True)
- return root
-
-
-def is_prepared(root):
- return Path(root).joinpath(".ready").exists()
-
-
-def mark_prepared(root):
- Path(root).joinpath(".ready").touch()
-
-
-def prompt_download(file_, source, target_dir, content_dir=None):
- targetpath = os.path.join(target_dir, file_)
- while not os.path.exists(targetpath):
- if content_dir is not None and os.path.exists(
- os.path.join(target_dir, content_dir)
- ):
- break
- print(
- "Please download '{}' from '{}' to '{}'.".format(file_, source, targetpath)
- )
- if content_dir is not None:
- print(
- "Or place its content into '{}'.".format(
- os.path.join(target_dir, content_dir)
- )
- )
- input("Press Enter when done...")
- return targetpath
-
-
-def download_url(file_, url, target_dir):
- targetpath = os.path.join(target_dir, file_)
- os.makedirs(target_dir, exist_ok=True)
- with tqdm(
- unit="B", unit_scale=True, unit_divisor=1024, miniters=1, desc=file_
- ) as bar:
- urllib.request.urlretrieve(url, targetpath, reporthook=reporthook(bar))
- return targetpath
-
-
-def download_urls(urls, target_dir):
- paths = dict()
- for fname, url in urls.items():
- outpath = download_url(fname, url, target_dir)
- paths[fname] = outpath
- return paths
-
-
-def quadratic_crop(x, bbox, alpha=1.0):
- """bbox is xmin, ymin, xmax, ymax"""
- im_h, im_w = x.shape[:2]
- bbox = np.array(bbox, dtype=np.float32)
- bbox = np.clip(bbox, 0, max(im_h, im_w))
- center = 0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])
- w = bbox[2] - bbox[0]
- h = bbox[3] - bbox[1]
- l = int(alpha * max(w, h))
- l = max(l, 2)
-
- required_padding = -1 * min(
- center[0] - l, center[1] - l, im_w - (center[0] + l), im_h - (center[1] + l)
- )
- required_padding = int(np.ceil(required_padding))
- if required_padding > 0:
- padding = [
- [required_padding, required_padding],
- [required_padding, required_padding],
- ]
- padding += [[0, 0]] * (len(x.shape) - 2)
- x = np.pad(x, padding, "reflect")
- center = center[0] + required_padding, center[1] + required_padding
- xmin = int(center[0] - l / 2)
- ymin = int(center[1] - l / 2)
- return np.array(x[ymin : ymin + l, xmin : xmin + l, ...])
-
-
-def custom_collate(batch):
- r"""source: pytorch 1.9.0, only one modification to original code """
-
- elem = batch[0]
- elem_type = type(elem)
- if isinstance(elem, torch.Tensor):
- out = None
- if torch.utils.data.get_worker_info() is not None:
- # If we're in a background process, concatenate directly into a
- # shared memory tensor to avoid an extra copy
- numel = sum([x.numel() for x in batch])
- storage = elem.storage()._new_shared(numel)
- out = elem.new(storage)
- return torch.stack(batch, 0, out=out)
- elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
- and elem_type.__name__ != 'string_':
- if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
- # array of string classes and object
- if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
- raise TypeError(default_collate_err_msg_format.format(elem.dtype))
-
- return custom_collate([torch.as_tensor(b) for b in batch])
- elif elem.shape == (): # scalars
- return torch.as_tensor(batch)
- elif isinstance(elem, float):
- return torch.tensor(batch, dtype=torch.float64)
- elif isinstance(elem, int):
- return torch.tensor(batch)
- elif isinstance(elem, string_classes):
- return batch
- elif isinstance(elem, collections.abc.Mapping):
- return {key: custom_collate([d[key] for d in batch]) for key in elem}
- elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
- return elem_type(*(custom_collate(samples) for samples in zip(*batch)))
- if isinstance(elem, collections.abc.Sequence) and isinstance(elem[0], Annotation): # added
- return batch # added
- elif isinstance(elem, collections.abc.Sequence):
- # check to make sure that the elements in batch have consistent size
- it = iter(batch)
- elem_size = len(next(it))
- if not all(len(elem) == elem_size for elem in it):
- raise RuntimeError('each element in list of batch should be of equal size')
- transposed = zip(*batch)
- return [custom_collate(samples) for samples in transposed]
-
- raise TypeError(default_collate_err_msg_format.format(elem_type))
diff --git a/spaces/QuoQA-NLP/QuoQaGo/README.md b/spaces/QuoQA-NLP/QuoQaGo/README.md
deleted file mode 100644
index ad53c59a871c2044b7b4685e2dc4d84f8a9fca90..0000000000000000000000000000000000000000
--- a/spaces/QuoQA-NLP/QuoQaGo/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: QuoQaGo
-emoji: 🐻
-colorFrom: blue
-colorTo: green
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Rakot2223/faster-whisper-webui/src/config.py b/spaces/Rakot2223/faster-whisper-webui/src/config.py
deleted file mode 100644
index c03abf92654e4ae47ebb044a58d7d2a27c4872c0..0000000000000000000000000000000000000000
--- a/spaces/Rakot2223/faster-whisper-webui/src/config.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from enum import Enum
-import urllib
-
-import os
-from typing import List
-from urllib.parse import urlparse
-import json5
-import torch
-
-from tqdm import tqdm
-
-class ModelConfig:
- def __init__(self, name: str, url: str, path: str = None, type: str = "whisper"):
- """
- Initialize a model configuration.
-
- name: Name of the model
- url: URL to download the model from
- path: Path to the model file. If not set, the model will be downloaded from the URL.
- type: Type of model. Can be whisper or huggingface.
- """
- self.name = name
- self.url = url
- self.path = path
- self.type = type
-
-class VadInitialPromptMode(Enum):
- PREPEND_ALL_SEGMENTS = 1
- PREPREND_FIRST_SEGMENT = 2
-
- @staticmethod
- def from_string(s: str):
- normalized = s.lower() if s is not None else None
-
- if normalized == "prepend_all_segments":
- return VadInitialPromptMode.PREPEND_ALL_SEGMENTS
- elif normalized == "prepend_first_segment":
- return VadInitialPromptMode.PREPREND_FIRST_SEGMENT
- else:
- raise ValueError(f"Invalid value for VadInitialPromptMode: {s}")
-
-class ApplicationConfig:
- def __init__(self, models: List[ModelConfig] = [], input_audio_max_duration: int = 600,
- share: bool = False, server_name: str = None, server_port: int = 7860,
- queue_concurrency_count: int = 1, delete_uploaded_files: bool = True,
- whisper_implementation: str = "whisper",
- default_model_name: str = "medium", default_vad: str = "silero-vad",
- vad_parallel_devices: str = "", vad_cpu_cores: int = 1, vad_process_timeout: int = 1800,
- auto_parallel: bool = False, output_dir: str = None,
- model_dir: str = None, device: str = None,
- verbose: bool = True, task: str = "transcribe", language: str = None,
- vad_initial_prompt_mode: str = "prepend_first_segment ",
- vad_merge_window: float = 5, vad_max_merge_size: float = 30,
- vad_padding: float = 1, vad_prompt_window: float = 3,
- temperature: float = 0, best_of: int = 5, beam_size: int = 5,
- patience: float = None, length_penalty: float = None,
- suppress_tokens: str = "-1", initial_prompt: str = None,
- condition_on_previous_text: bool = True, fp16: bool = True,
- compute_type: str = "float16",
- temperature_increment_on_fallback: float = 0.2, compression_ratio_threshold: float = 2.4,
- logprob_threshold: float = -1.0, no_speech_threshold: float = 0.6):
-
- self.models = models
-
- # WebUI settings
- self.input_audio_max_duration = input_audio_max_duration
- self.share = share
- self.server_name = server_name
- self.server_port = server_port
- self.queue_concurrency_count = queue_concurrency_count
- self.delete_uploaded_files = delete_uploaded_files
-
- self.whisper_implementation = whisper_implementation
- self.default_model_name = default_model_name
- self.default_vad = default_vad
- self.vad_parallel_devices = vad_parallel_devices
- self.vad_cpu_cores = vad_cpu_cores
- self.vad_process_timeout = vad_process_timeout
- self.auto_parallel = auto_parallel
- self.output_dir = output_dir
-
- self.model_dir = model_dir
- self.device = device
- self.verbose = verbose
- self.task = task
- self.language = language
- self.vad_initial_prompt_mode = vad_initial_prompt_mode
- self.vad_merge_window = vad_merge_window
- self.vad_max_merge_size = vad_max_merge_size
- self.vad_padding = vad_padding
- self.vad_prompt_window = vad_prompt_window
- self.temperature = temperature
- self.best_of = best_of
- self.beam_size = beam_size
- self.patience = patience
- self.length_penalty = length_penalty
- self.suppress_tokens = suppress_tokens
- self.initial_prompt = initial_prompt
- self.condition_on_previous_text = condition_on_previous_text
- self.fp16 = fp16
- self.compute_type = compute_type
- self.temperature_increment_on_fallback = temperature_increment_on_fallback
- self.compression_ratio_threshold = compression_ratio_threshold
- self.logprob_threshold = logprob_threshold
- self.no_speech_threshold = no_speech_threshold
-
- def get_model_names(self):
- return [ x.name for x in self.models ]
-
- def update(self, **new_values):
- result = ApplicationConfig(**self.__dict__)
-
- for key, value in new_values.items():
- setattr(result, key, value)
- return result
-
- @staticmethod
- def create_default(**kwargs):
- app_config = ApplicationConfig.parse_file(os.environ.get("WHISPER_WEBUI_CONFIG", "config.json5"))
-
- # Update with kwargs
- if len(kwargs) > 0:
- app_config = app_config.update(**kwargs)
- return app_config
-
- @staticmethod
- def parse_file(config_path: str):
- import json5
-
- with open(config_path, "r") as f:
- # Load using json5
- data = json5.load(f)
- data_models = data.pop("models", [])
-
- models = [ ModelConfig(**x) for x in data_models ]
-
- return ApplicationConfig(models, **data)
diff --git a/spaces/RamAnanth1/whisper_to_emotion/README.md b/spaces/RamAnanth1/whisper_to_emotion/README.md
deleted file mode 100644
index dcbdbfb581486abc309836b28aa5f9eae4cced0a..0000000000000000000000000000000000000000
--- a/spaces/RamAnanth1/whisper_to_emotion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Whisper To Emotion
-emoji: 🏃
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Redgon/bingo/src/components/turn-counter.tsx b/spaces/Redgon/bingo/src/components/turn-counter.tsx
deleted file mode 100644
index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000
--- a/spaces/Redgon/bingo/src/components/turn-counter.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import React from 'react'
-import { Throttling } from '@/lib/bots/bing/types'
-
-export interface TurnCounterProps {
- throttling?: Throttling
-}
-
-export function TurnCounter({ throttling }: TurnCounterProps) {
- if (!throttling) {
- return null
- }
-
- return (
-
-
- {throttling.numUserMessagesInConversation}
- 共
- {throttling.maxNumUserMessagesInConversation}
-
-
-
- )
-}
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/fcn_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/fcn_head.py
deleted file mode 100644
index edb32c283fa4baada6b4a0bf3f7540c3580c3468..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/fcn_head.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import torch
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from ..builder import HEADS
-from .decode_head import BaseDecodeHead
-
-
-@HEADS.register_module()
-class FCNHead(BaseDecodeHead):
- """Fully Convolution Networks for Semantic Segmentation.
-
- This head is implemented of `FCNNet `_.
-
- Args:
- num_convs (int): Number of convs in the head. Default: 2.
- kernel_size (int): The kernel size for convs in the head. Default: 3.
- concat_input (bool): Whether concat the input and output of convs
- before classification layer.
- dilation (int): The dilation rate for convs in the head. Default: 1.
- """
-
- def __init__(self,
- num_convs=2,
- kernel_size=3,
- concat_input=True,
- dilation=1,
- **kwargs):
- assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int)
- self.num_convs = num_convs
- self.concat_input = concat_input
- self.kernel_size = kernel_size
- super(FCNHead, self).__init__(**kwargs)
- if num_convs == 0:
- assert self.in_channels == self.channels
-
- conv_padding = (kernel_size // 2) * dilation
- convs = []
- convs.append(
- ConvModule(
- self.in_channels,
- self.channels,
- kernel_size=kernel_size,
- padding=conv_padding,
- dilation=dilation,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- for i in range(num_convs - 1):
- convs.append(
- ConvModule(
- self.channels,
- self.channels,
- kernel_size=kernel_size,
- padding=conv_padding,
- dilation=dilation,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- if num_convs == 0:
- self.convs = nn.Identity()
- else:
- self.convs = nn.Sequential(*convs)
- if self.concat_input:
- self.conv_cat = ConvModule(
- self.in_channels + self.channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- output = self.convs(x)
- if self.concat_input:
- output = self.conv_cat(torch.cat([x, output], dim=1))
- output = self.cls_seg(output)
- return output
diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/outpainting_example2.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/outpainting_example2.py
deleted file mode 100644
index febe614ee3ba2cf19b14733ac844e19ac8bc6c64..0000000000000000000000000000000000000000
--- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/outpainting_example2.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# %%
-# an example script of how to do outpainting with diffusers img2img pipeline
-# should be compatible with any stable diffusion model
-# (only tested with runwayml/stable-diffusion-v1-5)
-
-from typing import Callable, List, Optional, Union
-from PIL import Image
-import PIL
-import numpy as np
-import torch
-
-from diffusers import StableDiffusionImg2ImgPipeline
-from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import preprocess
-
-pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
- "runwayml/stable-diffusion-v1-5",
- revision="fp16",
- torch_dtype=torch.float16,
-)
-
-pipe.set_use_memory_efficient_attention_xformers(True)
-pipe.to("cuda")
-# %%
-# load the image, extract the mask
-rgba = Image.open('primed_image_with_alpha_channel.png')
-mask_full = np.array(rgba)[:, :, 3] == 0
-rgb = rgba.convert('RGB')
-# %%
-
-# resize/convert the mask to the right size
-# for 512x512, the mask should be 1x4x64x64
-hw = np.array(mask_full.shape)
-h, w = (hw - hw % 32) // 8
-mask_image = Image.fromarray(mask_full).resize((w, h), Image.NEAREST)
-mask = (np.array(mask_image) == 0)[None, None]
-mask = np.concatenate([mask]*4, axis=1)
-mask = torch.from_numpy(mask).to('cuda')
-mask.shape
-
-# %%
-
-
-@torch.no_grad()
-def outpaint(
- self: StableDiffusionImg2ImgPipeline,
- prompt: Union[str, List[str]] = None,
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- strength: float = 0.8,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: Optional[float] = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: Optional[int] = 1,
- **kwargs,
-):
- r"""
- copy of the original img2img pipeline's __call__()
- https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py
-
- Changes are marked with and
- """
- # message = "Please use `image` instead of `init_image`."
- # init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
- # image = init_image or image
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(prompt, strength, callback_steps,
- negative_prompt, prompt_embeds, negative_prompt_embeds)
-
- # 2. Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- prompt_embeds = self._encode_prompt(
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- )
-
- # 4. Preprocess image
- image = preprocess(image)
-
- # 5. set timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps, num_inference_steps = self.get_timesteps(
- num_inference_steps, strength, device)
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
-
- # 6. Prepare latent variables
- latents = self.prepare_latents(
- image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
- )
-
- #
- # store the encoded version of the original image to overwrite
- # what the UNET generates "underneath" our image on each step
- encoded_original = (self.vae.config.scaling_factor *
- self.vae.encode(
- image.to(latents.device, latents.dtype)
- ).latent_dist.mean)
- #
-
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 8. Denoising loop
- num_warmup_steps = len(timesteps) - \
- num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat(
- [latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(
- latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, t,
- encoder_hidden_states=prompt_embeds).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * \
- (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(
- noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # paste unmasked regions from the original image
- noise = torch.randn(
- encoded_original.shape, generator=generator, device=device)
- noised_encoded_original = self.scheduler.add_noise(
- encoded_original, noise, t).to(noise_pred.device, noise_pred.dtype)
- latents[mask] = noised_encoded_original[mask]
- #
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # 9. Post-processing
- image = self.decode_latents(latents)
-
- # 10. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(
- image, device, prompt_embeds.dtype)
-
- # 11. Convert to PIL
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
-
-
-# %%
-image = outpaint(
- pipe,
- image=rgb,
- prompt="forest in the style of Tim Hildebrandt",
- strength=0.5,
- num_inference_steps=50,
- guidance_scale=7.5,
-).images[0]
-image
-
-# %%
-# the vae does lossy encoding, we could get better quality if we pasted the original image into our result.
-# this may yield visible edges
diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/torch_utils/ops/conv2d_resample.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/torch_utils/ops/conv2d_resample.py
deleted file mode 100644
index cd4750744c83354bab78704d4ef51ad1070fcc4a..0000000000000000000000000000000000000000
--- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/torch_utils/ops/conv2d_resample.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""2D convolution with optional up/downsampling."""
-
-import torch
-
-from .. import misc
-from . import conv2d_gradfix
-from . import upfirdn2d
-from .upfirdn2d import _parse_padding
-from .upfirdn2d import _get_filter_size
-
-#----------------------------------------------------------------------------
-
-def _get_weight_shape(w):
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- shape = [int(sz) for sz in w.shape]
- misc.assert_shape(w, shape)
- return shape
-
-#----------------------------------------------------------------------------
-
-def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
- """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
- """
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
-
- # Flip weight if requested.
- if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
- w = w.flip([2, 3])
-
- # Workaround performance pitfall in cuDNN 8.0.5, triggered when using
- # 1x1 kernel + memory_format=channels_last + less than 64 channels.
- if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
- if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
- if out_channels <= 4 and groups == 1:
- in_shape = x.shape
- x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
- x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
- else:
- x = x.to(memory_format=torch.contiguous_format)
- w = w.to(memory_format=torch.contiguous_format)
- x = conv2d_gradfix.conv2d(x, w, groups=groups)
- return x.to(memory_format=torch.channels_last)
-
- # Otherwise => execute using conv2d_gradfix.
- op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
- return op(x, w, stride=stride, padding=padding, groups=groups)
-
-#----------------------------------------------------------------------------
-
-@misc.profiled_function
-def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
- r"""2D convolution with optional up/downsampling.
-
- Padding is performed only once at the beginning, not between the operations.
-
- Args:
- x: Input tensor of shape
- `[batch_size, in_channels, in_height, in_width]`.
- w: Weight tensor of shape
- `[out_channels, in_channels//groups, kernel_height, kernel_width]`.
- f: Low-pass filter for up/downsampling. Must be prepared beforehand by
- calling upfirdn2d.setup_filter(). None = identity (default).
- up: Integer upsampling factor (default: 1).
- down: Integer downsampling factor (default: 1).
- padding: Padding with respect to the upsampled image. Can be a single number
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- groups: Split input channels into N groups (default: 1).
- flip_weight: False = convolution, True = correlation (default: True).
- flip_filter: False = convolution, True = correlation (default: False).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- # Validate arguments.
- assert isinstance(x, torch.Tensor) and (x.ndim == 4)
- assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
- assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
- assert isinstance(up, int) and (up >= 1)
- assert isinstance(down, int) and (down >= 1)
- assert isinstance(groups, int) and (groups >= 1)
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
- fw, fh = _get_filter_size(f)
- px0, px1, py0, py1 = _parse_padding(padding)
-
- # Adjust padding to account for up/downsampling.
- if up > 1:
- px0 += (fw + up - 1) // 2
- px1 += (fw - up) // 2
- py0 += (fh + up - 1) // 2
- py1 += (fh - up) // 2
- if down > 1:
- px0 += (fw - down + 1) // 2
- px1 += (fw - down) // 2
- py0 += (fh - down + 1) // 2
- py1 += (fh - down) // 2
-
- # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
- if kw == 1 and kh == 1 and (down > 1 and up == 1):
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
- return x
-
- # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
- if kw == 1 and kh == 1 and (up > 1 and down == 1):
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
- x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
- return x
-
- # Fast path: downsampling only => use strided convolution.
- if down > 1 and up == 1:
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
- x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
- return x
-
- # Fast path: upsampling with optional downsampling => use transpose strided convolution.
- if up > 1:
- if groups == 1:
- w = w.transpose(0, 1)
- else:
- w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
- w = w.transpose(1, 2)
- w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
- px0 -= kw - 1
- px1 -= kw - up
- py0 -= kh - 1
- py1 -= kh - up
- pxt = max(min(-px0, -px1), 0)
- pyt = max(min(-py0, -py1), 0)
- x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
- if down > 1:
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
- return x
-
- # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
- if up == 1 and down == 1:
- if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
- return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
-
- # Fallback: Generic reference implementation.
- x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
- if down > 1:
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
- return x
-
-#----------------------------------------------------------------------------
diff --git a/spaces/Ryandhikaw/rvc-hololive/infer_pack/attentions.py b/spaces/Ryandhikaw/rvc-hololive/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/Ryandhikaw/rvc-hololive/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Samlund56/blip-image-captioning-large/app.py b/spaces/Samlund56/blip-image-captioning-large/app.py
deleted file mode 100644
index 9f8f3159b0cdc57d67c50e65a3fb858e4a7cac5f..0000000000000000000000000000000000000000
--- a/spaces/Samlund56/blip-image-captioning-large/app.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import gradio as gr
-import torch
-from transformers import BlipForConditionalGeneration, BlipProcessor
-
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-
-processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
-model_image_captioning = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
-
-def inference(raw_image, question, decoding_strategy):
- inputs = processor(images=raw_image, text=question, return_tensors="pt")
-
- if decoding_strategy == "Beam search":
- inputs["max_length"] = 20
- inputs["num_beams"] = 5
- elif decoding_strategy == "Nucleus sampling":
- inputs["max_length"] = 20
- inputs["num_beams"] = 1
- inputs["do_sample"] = True
- inputs["top_k"] = 50
- inputs["top_p"] = 0.95
- elif decoding_strategy == "Contrastive search":
- inputs["penalty_alpha"] = 0.6
- inputs["top_k"] = 4
- inputs["max_length"] = 512
-
- out = model_image_captioning.generate(**inputs)
- return processor.batch_decode(out, skip_special_tokens=True)[0]
-
-inputs = [
- gr.inputs.Image(type='pil', label="Image"),
- gr.inputs.Textbox(lines=2, label="Context (optional)", placeholder="Enter context text"),
- gr.inputs.Radio(
- choices=["Beam search", "Nucleus sampling", "Contrastive search"],
- type="value",
- default="Contrastive search",
- label="Caption Decoding Strategy"
- )
-]
-
-outputs = gr.outputs.Textbox(label="Caption")
-
-title = "BLIP Image Captioning"
-description = "Generate captions for images using BLIP model."
-
-gr.Interface(
- inference,
- inputs,
- outputs,
- title=title,
- description=description,
-).launch(enable_queue=True)
\ No newline at end of file
diff --git a/spaces/ServerX/PorcoDiaz/tools/infer/train-index.py b/spaces/ServerX/PorcoDiaz/tools/infer/train-index.py
deleted file mode 100644
index 44b447ef32148c181eb4bcd9013a22a82371b82c..0000000000000000000000000000000000000000
--- a/spaces/ServerX/PorcoDiaz/tools/infer/train-index.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个
-"""
-import os
-import logging
-
-logger = logging.getLogger(__name__)
-
-import faiss
-import numpy as np
-
-# ###########如果是原始特征要先写save
-inp_root = r"E:\codes\py39\dataset\mi\2-co256"
-npys = []
-for name in sorted(list(os.listdir(inp_root))):
- phone = np.load("%s/%s" % (inp_root, name))
- npys.append(phone)
-big_npy = np.concatenate(npys, 0)
-logger.debug(big_npy.shape) # (6196072, 192)#fp32#4.43G
-np.save("infer/big_src_feature_mi.npy", big_npy)
-
-##################train+add
-# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
-logger.debug(big_npy.shape)
-index = faiss.index_factory(256, "IVF512,Flat") # mi
-logger.info("Training...")
-index_ivf = faiss.extract_index_ivf(index) #
-index_ivf.nprobe = 9
-index.train(big_npy)
-faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index")
-logger.info("Adding...")
-index.add(big_npy)
-faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index")
-"""
-大小(都是FP32)
-big_src_feature 2.95G
- (3098036, 256)
-big_emb 4.43G
- (6196072, 192)
-big_emb双倍是因为求特征要repeat后再加pitch
-
-"""
diff --git a/spaces/Shawn37/UTR_LM/esm/._multihead_attention.py b/spaces/Shawn37/UTR_LM/esm/._multihead_attention.py
deleted file mode 100644
index c9959ba1b4b55ed6fdaca9e450120196a9d3a4dd..0000000000000000000000000000000000000000
Binary files a/spaces/Shawn37/UTR_LM/esm/._multihead_attention.py and /dev/null differ
diff --git a/spaces/SkyYeXianer/vits-uma-genshin-honkai/text/__init__.py b/spaces/SkyYeXianer/vits-uma-genshin-honkai/text/__init__.py
deleted file mode 100644
index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000
--- a/spaces/SkyYeXianer/vits-uma-genshin-honkai/text/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence, clean_text
-
-
-def cleaned_text_to_sequence(cleaned_text):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/Sonnt/Fracture_Webapp/backup/081222/Antuns/prediction_LGBM.py b/spaces/Sonnt/Fracture_Webapp/backup/081222/Antuns/prediction_LGBM.py
deleted file mode 100644
index 0eb544e4ef011fdb89015370efaaccc5dbd8a450..0000000000000000000000000000000000000000
--- a/spaces/Sonnt/Fracture_Webapp/backup/081222/Antuns/prediction_LGBM.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import pandas as pd
-
-def Prediction_LGBM(trained_models:list=None, data:pd.DataFrame=None, feature_names:list=None):
- """
- mode: "predict", "predict_proba"
- """
- data_copy = data.copy()
- # if mode == "predict":
- # for i, model in enumerate(trained_models):
- # y_preds = model.predict(data_copy[feature_names])
- # data_copy[f"model_{i}"] = y_preds
- #
- # else:
- for i, model in enumerate(trained_models):
- y_preds = model.predict(data_copy[feature_names])
- data_copy[f"model_{i}"] = y_preds
-
- return data_copy
-
-if __name__ == '__main__':
- Prediction_LGBM()
\ No newline at end of file
diff --git a/spaces/SpacesExamples/test-docker-go/Dockerfile b/spaces/SpacesExamples/test-docker-go/Dockerfile
deleted file mode 100644
index 15eda1fdbfffd2e5788c8545a5b655edc4443307..0000000000000000000000000000000000000000
--- a/spaces/SpacesExamples/test-docker-go/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM golang:1.18 as builder
-
-WORKDIR /workdir
-
-COPY main.go .
-
-RUN go build -o main main.go
-
-FROM golang:1.18
-WORKDIR /workdir
-
-COPY --from=builder /workdir/main /main
-CMD /main
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/colorable.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/colorable.py
deleted file mode 100644
index 1e3caef62b4a8efa897724591dfd36334ccbdd47..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/colorable.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#*****************************************************************************
-# Copyright (C) 2016 The IPython Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-"""
-Color managing related utilities
-"""
-
-import pygments
-
-from traitlets.config import Configurable
-from traitlets import Unicode
-
-
-available_themes = lambda : [s for s in pygments.styles.get_all_styles()]+['NoColor','LightBG','Linux', 'Neutral']
-
-class Colorable(Configurable):
- """
- A subclass of configurable for all the classes that have a `default_scheme`
- """
- default_style=Unicode('LightBG').tag(config=True)
-
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/expr/consts.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/expr/consts.py
deleted file mode 100644
index 974fb06a3c756a7e27106f4d1bb9c17b78a094fd..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/expr/consts.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from typing import Dict
-
-from .core import ConstExpression
-
-
-CONST_LISTING = {
- "NaN": "not a number (same as JavaScript literal NaN)",
- "LN10": "the natural log of 10 (alias to Math.LN10)",
- "E": "the transcendental number e (alias to Math.E)",
- "LOG10E": "the base 10 logarithm e (alias to Math.LOG10E)",
- "LOG2E": "the base 2 logarithm of e (alias to Math.LOG2E)",
- "SQRT1_2": "the square root of 0.5 (alias to Math.SQRT1_2)",
- "LN2": "the natural log of 2 (alias to Math.LN2)",
- "SQRT2": "the square root of 2 (alias to Math.SQRT1_2)",
- "PI": "the transcendental number pi (alias to Math.PI)",
-}
-
-NAME_MAP: Dict[str, str] = {}
-
-
-def _populate_namespace():
- globals_ = globals()
- for name, doc in CONST_LISTING.items():
- py_name = NAME_MAP.get(name, name)
- globals_[py_name] = ConstExpression(name, doc)
- yield py_name
-
-
-__all__ = list(_populate_namespace())
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/backoff/_decorator.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/backoff/_decorator.py
deleted file mode 100644
index 92dee1bb76178d0beaa2ae841d5d0325e3ac27d3..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/backoff/_decorator.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# coding:utf-8
-import asyncio
-import logging
-import operator
-from typing import Any, Callable, Iterable, Optional, Type, Union
-
-from backoff._common import (
- _prepare_logger,
- _config_handlers,
- _log_backoff,
- _log_giveup
-)
-from backoff._jitter import full_jitter
-from backoff import _async, _sync
-from backoff._typing import (
- _CallableT,
- _Handler,
- _Jitterer,
- _MaybeCallable,
- _MaybeLogger,
- _MaybeSequence,
- _Predicate,
- _WaitGenerator,
-)
-
-
-def on_predicate(wait_gen: _WaitGenerator,
- predicate: _Predicate[Any] = operator.not_,
- *,
- max_tries: Optional[_MaybeCallable[int]] = None,
- max_time: Optional[_MaybeCallable[float]] = None,
- jitter: Union[_Jitterer, None] = full_jitter,
- on_success: Union[_Handler, Iterable[_Handler], None] = None,
- on_backoff: Union[_Handler, Iterable[_Handler], None] = None,
- on_giveup: Union[_Handler, Iterable[_Handler], None] = None,
- logger: _MaybeLogger = 'backoff',
- backoff_log_level: int = logging.INFO,
- giveup_log_level: int = logging.ERROR,
- **wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
- """Returns decorator for backoff and retry triggered by predicate.
-
- Args:
- wait_gen: A generator yielding successive wait times in
- seconds.
- predicate: A function which when called on the return value of
- the target function will trigger backoff when considered
- truthily. If not specified, the default behavior is to
- backoff on falsey return values.
- max_tries: The maximum number of attempts to make before giving
- up. In the case of failure, the result of the last attempt
- will be returned. The default value of None means there
- is no limit to the number of tries. If a callable is passed,
- it will be evaluated at runtime and its return value used.
- max_time: The maximum total amount of time to try for before
- giving up. If this time expires, the result of the last
- attempt will be returned. If a callable is passed, it will
- be evaluated at runtime and its return value used.
- jitter: A function of the value yielded by wait_gen returning
- the actual time to wait. This distributes wait times
- stochastically in order to avoid timing collisions across
- concurrent clients. Wait times are jittered by default
- using the full_jitter function. Jittering may be disabled
- altogether by passing jitter=None.
- on_success: Callable (or iterable of callables) with a unary
- signature to be called in the event of success. The
- parameter is a dict containing details about the invocation.
- on_backoff: Callable (or iterable of callables) with a unary
- signature to be called in the event of a backoff. The
- parameter is a dict containing details about the invocation.
- on_giveup: Callable (or iterable of callables) with a unary
- signature to be called in the event that max_tries
- is exceeded. The parameter is a dict containing details
- about the invocation.
- logger: Name of logger or Logger object to log to. Defaults to
- 'backoff'.
- backoff_log_level: log level for the backoff event. Defaults to "INFO"
- giveup_log_level: log level for the give up event. Defaults to "ERROR"
- **wait_gen_kwargs: Any additional keyword args specified will be
- passed to wait_gen when it is initialized. Any callable
- args will first be evaluated and their return values passed.
- This is useful for runtime configuration.
- """
- def decorate(target):
- nonlocal logger, on_success, on_backoff, on_giveup
-
- logger = _prepare_logger(logger)
- on_success = _config_handlers(on_success)
- on_backoff = _config_handlers(
- on_backoff,
- default_handler=_log_backoff,
- logger=logger,
- log_level=backoff_log_level
- )
- on_giveup = _config_handlers(
- on_giveup,
- default_handler=_log_giveup,
- logger=logger,
- log_level=giveup_log_level
- )
-
- if asyncio.iscoroutinefunction(target):
- retry = _async.retry_predicate
- else:
- retry = _sync.retry_predicate
-
- return retry(
- target,
- wait_gen,
- predicate,
- max_tries=max_tries,
- max_time=max_time,
- jitter=jitter,
- on_success=on_success,
- on_backoff=on_backoff,
- on_giveup=on_giveup,
- wait_gen_kwargs=wait_gen_kwargs
- )
-
- # Return a function which decorates a target with a retry loop.
- return decorate
-
-
-def on_exception(wait_gen: _WaitGenerator,
- exception: _MaybeSequence[Type[Exception]],
- *,
- max_tries: Optional[_MaybeCallable[int]] = None,
- max_time: Optional[_MaybeCallable[float]] = None,
- jitter: Union[_Jitterer, None] = full_jitter,
- giveup: _Predicate[Exception] = lambda e: False,
- on_success: Union[_Handler, Iterable[_Handler], None] = None,
- on_backoff: Union[_Handler, Iterable[_Handler], None] = None,
- on_giveup: Union[_Handler, Iterable[_Handler], None] = None,
- raise_on_giveup: bool = True,
- logger: _MaybeLogger = 'backoff',
- backoff_log_level: int = logging.INFO,
- giveup_log_level: int = logging.ERROR,
- **wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
- """Returns decorator for backoff and retry triggered by exception.
-
- Args:
- wait_gen: A generator yielding successive wait times in
- seconds.
- exception: An exception type (or tuple of types) which triggers
- backoff.
- max_tries: The maximum number of attempts to make before giving
- up. Once exhausted, the exception will be allowed to escape.
- The default value of None means there is no limit to the
- number of tries. If a callable is passed, it will be
- evaluated at runtime and its return value used.
- max_time: The maximum total amount of time to try for before
- giving up. Once expired, the exception will be allowed to
- escape. If a callable is passed, it will be
- evaluated at runtime and its return value used.
- jitter: A function of the value yielded by wait_gen returning
- the actual time to wait. This distributes wait times
- stochastically in order to avoid timing collisions across
- concurrent clients. Wait times are jittered by default
- using the full_jitter function. Jittering may be disabled
- altogether by passing jitter=None.
- giveup: Function accepting an exception instance and
- returning whether or not to give up. Optional. The default
- is to always continue.
- on_success: Callable (or iterable of callables) with a unary
- signature to be called in the event of success. The
- parameter is a dict containing details about the invocation.
- on_backoff: Callable (or iterable of callables) with a unary
- signature to be called in the event of a backoff. The
- parameter is a dict containing details about the invocation.
- on_giveup: Callable (or iterable of callables) with a unary
- signature to be called in the event that max_tries
- is exceeded. The parameter is a dict containing details
- about the invocation.
- raise_on_giveup: Boolean indicating whether the registered exceptions
- should be raised on giveup. Defaults to `True`
- logger: Name or Logger object to log to. Defaults to 'backoff'.
- backoff_log_level: log level for the backoff event. Defaults to "INFO"
- giveup_log_level: log level for the give up event. Defaults to "ERROR"
- **wait_gen_kwargs: Any additional keyword args specified will be
- passed to wait_gen when it is initialized. Any callable
- args will first be evaluated and their return values passed.
- This is useful for runtime configuration.
- """
- def decorate(target):
- nonlocal logger, on_success, on_backoff, on_giveup
-
- logger = _prepare_logger(logger)
- on_success = _config_handlers(on_success)
- on_backoff = _config_handlers(
- on_backoff,
- default_handler=_log_backoff,
- logger=logger,
- log_level=backoff_log_level,
- )
- on_giveup = _config_handlers(
- on_giveup,
- default_handler=_log_giveup,
- logger=logger,
- log_level=giveup_log_level,
- )
-
- if asyncio.iscoroutinefunction(target):
- retry = _async.retry_exception
- else:
- retry = _sync.retry_exception
-
- return retry(
- target,
- wait_gen,
- exception,
- max_tries=max_tries,
- max_time=max_time,
- jitter=jitter,
- giveup=giveup,
- on_success=on_success,
- on_backoff=on_backoff,
- on_giveup=on_giveup,
- raise_on_giveup=raise_on_giveup,
- wait_gen_kwargs=wait_gen_kwargs
- )
-
- # Return a function which decorates a target with a retry loop.
- return decorate
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/contourpy/util/mpl_util.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/contourpy/util/mpl_util.py
deleted file mode 100644
index 0c970886faeac57427db27ca4510934de223ac8c..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/contourpy/util/mpl_util.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, cast
-
-import matplotlib.path as mpath
-import numpy as np
-
-from contourpy import FillType, LineType
-
-if TYPE_CHECKING:
- from contourpy._contourpy import (
- CodeArray, FillReturn, LineReturn, LineReturn_Separate, OffsetArray,
- )
-
-
-def filled_to_mpl_paths(filled: FillReturn, fill_type: FillType) -> list[mpath.Path]:
- if fill_type in (FillType.OuterCode, FillType.ChunkCombinedCode):
- paths = [mpath.Path(points, codes) for points, codes in zip(*filled) if points is not None]
- elif fill_type in (FillType.OuterOffset, FillType.ChunkCombinedOffset):
- paths = [mpath.Path(points, offsets_to_mpl_codes(offsets))
- for points, offsets in zip(*filled) if points is not None]
- elif fill_type == FillType.ChunkCombinedCodeOffset:
- paths = []
- for points, codes, outer_offsets in zip(*filled):
- if points is None:
- continue
- points = np.split(points, outer_offsets[1:-1])
- codes = np.split(codes, outer_offsets[1:-1])
- paths += [mpath.Path(p, c) for p, c in zip(points, codes)]
- elif fill_type == FillType.ChunkCombinedOffsetOffset:
- paths = []
- for points, offsets, outer_offsets in zip(*filled):
- if points is None:
- continue
- for i in range(len(outer_offsets)-1):
- offs = offsets[outer_offsets[i]:outer_offsets[i+1]+1]
- pts = points[offs[0]:offs[-1]]
- paths += [mpath.Path(pts, offsets_to_mpl_codes(offs - offs[0]))]
- else:
- raise RuntimeError(f"Conversion of FillType {fill_type} to MPL Paths is not implemented")
- return paths
-
-
-def lines_to_mpl_paths(lines: LineReturn, line_type: LineType) -> list[mpath.Path]:
- if line_type == LineType.Separate:
- if TYPE_CHECKING:
- lines = cast(LineReturn_Separate, lines)
- paths = []
- for line in lines:
- # Drawing as Paths so that they can be closed correctly.
- closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
- paths.append(mpath.Path(line, closed=closed))
- elif line_type in (LineType.SeparateCode, LineType.ChunkCombinedCode):
- paths = [mpath.Path(points, codes) for points, codes in zip(*lines) if points is not None]
- elif line_type == LineType.ChunkCombinedOffset:
- paths = []
- for points, offsets in zip(*lines):
- if points is None:
- continue
- for i in range(len(offsets)-1):
- line = points[offsets[i]:offsets[i+1]]
- closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
- paths.append(mpath.Path(line, closed=closed))
- else:
- raise RuntimeError(f"Conversion of LineType {line_type} to MPL Paths is not implemented")
- return paths
-
-
-def mpl_codes_to_offsets(codes: CodeArray) -> OffsetArray:
- offsets = np.nonzero(codes == 1)[0].astype(np.uint32)
- offsets = np.append(offsets, len(codes))
- return offsets
-
-
-def offsets_to_mpl_codes(offsets: OffsetArray) -> CodeArray:
- codes = np.full(offsets[-1]-offsets[0], 2, dtype=np.uint8) # LINETO = 2
- codes[offsets[:-1]] = 1 # MOVETO = 1
- codes[offsets[1:]-1] = 79 # CLOSEPOLY 79
- return codes
diff --git a/spaces/Superlang/ImageProcessor/annotator/base_annotator.py b/spaces/Superlang/ImageProcessor/annotator/base_annotator.py
deleted file mode 100644
index 27d007e9ee5fe42f9f14f7ee2debf61b89090250..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/base_annotator.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import os
-
-# # default cache
-default_home = os.path.join(os.path.expanduser("~"), ".cache")
-model_cache_home = os.path.expanduser(
- os.getenv(
- "HF_HOME",
- os.path.join(os.getenv("XDG_CACHE_HOME", default_home), "model"),
- )
-)
-
-
-class BaseProcessor:
- def __init__(self, **kwargs):
- self.device = kwargs.get("device") if kwargs.get("device") is not None else "cpu"
- self.models_path = kwargs.get("models_path") if kwargs.get("models_path") is not None else model_cache_home
-
- def unload_model(self):
- pass
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/backbone/build.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/backbone/build.py
deleted file mode 100644
index 63a4aaced2c2869294d2b16f4b95cdfdd01259b7..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/backbone/build.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from annotator.oneformer.detectron2.layers import ShapeSpec
-from annotator.oneformer.detectron2.utils.registry import Registry
-
-from .backbone import Backbone
-
-BACKBONE_REGISTRY = Registry("BACKBONE")
-BACKBONE_REGISTRY.__doc__ = """
-Registry for backbones, which extract feature maps from images
-
-The registered object must be a callable that accepts two arguments:
-
-1. A :class:`detectron2.config.CfgNode`
-2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification.
-
-Registered object must return instance of :class:`Backbone`.
-"""
-
-
-def build_backbone(cfg, input_shape=None):
- """
- Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
-
- Returns:
- an instance of :class:`Backbone`
- """
- if input_shape is None:
- input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
-
- backbone_name = cfg.MODEL.BACKBONE.NAME
- backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)
- assert isinstance(backbone, Backbone)
- return backbone
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/utils/sync_bn.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/utils/sync_bn.py
deleted file mode 100644
index f78f39181d75bb85c53e8c7c8eaf45690e9f0bee..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/utils/sync_bn.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import torch
-
-import annotator.uniformer.mmcv as mmcv
-
-
-class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
- """A general BatchNorm layer without input dimension check.
-
- Reproduced from @kapily's work:
- (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
- The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
- is `_check_input_dim` that is designed for tensor sanity checks.
- The check has been bypassed in this class for the convenience of converting
- SyncBatchNorm.
- """
-
- def _check_input_dim(self, input):
- return
-
-
-def revert_sync_batchnorm(module):
- """Helper function to convert all `SyncBatchNorm` (SyncBN) and
- `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
- `BatchNormXd` layers.
-
- Adapted from @kapily's work:
- (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
-
- Args:
- module (nn.Module): The module containing `SyncBatchNorm` layers.
-
- Returns:
- module_output: The converted module with `BatchNormXd` layers.
- """
- module_output = module
- module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
- if hasattr(mmcv, 'ops'):
- module_checklist.append(mmcv.ops.SyncBatchNorm)
- if isinstance(module, tuple(module_checklist)):
- module_output = _BatchNormXd(module.num_features, module.eps,
- module.momentum, module.affine,
- module.track_running_stats)
- if module.affine:
- # no_grad() may not be needed here but
- # just to be consistent with `convert_sync_batchnorm()`
- with torch.no_grad():
- module_output.weight = module.weight
- module_output.bias = module.bias
- module_output.running_mean = module.running_mean
- module_output.running_var = module.running_var
- module_output.num_batches_tracked = module.num_batches_tracked
- module_output.training = module.training
- # qconfig exists in quantized models
- if hasattr(module, 'qconfig'):
- module_output.qconfig = module.qconfig
- for name, child in module.named_children():
- module_output.add_module(name, revert_sync_batchnorm(child))
- del module
- return module_output
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py
deleted file mode 100644
index 3850ddaf412022ac00ffa515518962a8a4c4de5e..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import functools
-import importlib.metadata
-import logging
-import os
-import pathlib
-import sys
-import zipfile
-import zipimport
-from typing import Iterator, List, Optional, Sequence, Set, Tuple
-
-from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
-
-from pip._internal.metadata.base import BaseDistribution, BaseEnvironment
-from pip._internal.models.wheel import Wheel
-from pip._internal.utils.deprecation import deprecated
-from pip._internal.utils.filetypes import WHEEL_EXTENSION
-
-from ._compat import BadMetadata, BasePath, get_dist_name, get_info_location
-from ._dists import Distribution
-
-logger = logging.getLogger(__name__)
-
-
-def _looks_like_wheel(location: str) -> bool:
- if not location.endswith(WHEEL_EXTENSION):
- return False
- if not os.path.isfile(location):
- return False
- if not Wheel.wheel_file_re.match(os.path.basename(location)):
- return False
- return zipfile.is_zipfile(location)
-
-
-class _DistributionFinder:
- """Finder to locate distributions.
-
- The main purpose of this class is to memoize found distributions' names, so
- only one distribution is returned for each package name. At lot of pip code
- assumes this (because it is setuptools's behavior), and not doing the same
- can potentially cause a distribution in lower precedence path to override a
- higher precedence one if the caller is not careful.
-
- Eventually we probably want to make it possible to see lower precedence
- installations as well. It's useful feature, after all.
- """
-
- FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
-
- def __init__(self) -> None:
- self._found_names: Set[NormalizedName] = set()
-
- def _find_impl(self, location: str) -> Iterator[FoundResult]:
- """Find distributions in a location."""
- # Skip looking inside a wheel. Since a package inside a wheel is not
- # always valid (due to .data directories etc.), its .dist-info entry
- # should not be considered an installed distribution.
- if _looks_like_wheel(location):
- return
- # To know exactly where we find a distribution, we have to feed in the
- # paths one by one, instead of dumping the list to importlib.metadata.
- for dist in importlib.metadata.distributions(path=[location]):
- info_location = get_info_location(dist)
- try:
- raw_name = get_dist_name(dist)
- except BadMetadata as e:
- logger.warning("Skipping %s due to %s", info_location, e.reason)
- continue
- normalized_name = canonicalize_name(raw_name)
- if normalized_name in self._found_names:
- continue
- self._found_names.add(normalized_name)
- yield dist, info_location
-
- def find(self, location: str) -> Iterator[BaseDistribution]:
- """Find distributions in a location.
-
- The path can be either a directory, or a ZIP archive.
- """
- for dist, info_location in self._find_impl(location):
- if info_location is None:
- installed_location: Optional[BasePath] = None
- else:
- installed_location = info_location.parent
- yield Distribution(dist, info_location, installed_location)
-
- def find_linked(self, location: str) -> Iterator[BaseDistribution]:
- """Read location in egg-link files and return distributions in there.
-
- The path should be a directory; otherwise this returns nothing. This
- follows how setuptools does this for compatibility. The first non-empty
- line in the egg-link is read as a path (resolved against the egg-link's
- containing directory if relative). Distributions found at that linked
- location are returned.
- """
- path = pathlib.Path(location)
- if not path.is_dir():
- return
- for child in path.iterdir():
- if child.suffix != ".egg-link":
- continue
- with child.open() as f:
- lines = (line.strip() for line in f)
- target_rel = next((line for line in lines if line), "")
- if not target_rel:
- continue
- target_location = str(path.joinpath(target_rel))
- for dist, info_location in self._find_impl(target_location):
- yield Distribution(dist, info_location, path)
-
- def _find_eggs_in_dir(self, location: str) -> Iterator[BaseDistribution]:
- from pip._vendor.pkg_resources import find_distributions
-
- from pip._internal.metadata import pkg_resources as legacy
-
- with os.scandir(location) as it:
- for entry in it:
- if not entry.name.endswith(".egg"):
- continue
- for dist in find_distributions(entry.path):
- yield legacy.Distribution(dist)
-
- def _find_eggs_in_zip(self, location: str) -> Iterator[BaseDistribution]:
- from pip._vendor.pkg_resources import find_eggs_in_zip
-
- from pip._internal.metadata import pkg_resources as legacy
-
- try:
- importer = zipimport.zipimporter(location)
- except zipimport.ZipImportError:
- return
- for dist in find_eggs_in_zip(importer, location):
- yield legacy.Distribution(dist)
-
- def find_eggs(self, location: str) -> Iterator[BaseDistribution]:
- """Find eggs in a location.
-
- This actually uses the old *pkg_resources* backend. We likely want to
- deprecate this so we can eventually remove the *pkg_resources*
- dependency entirely. Before that, this should first emit a deprecation
- warning for some versions when using the fallback since importing
- *pkg_resources* is slow for those who don't need it.
- """
- if os.path.isdir(location):
- yield from self._find_eggs_in_dir(location)
- if zipfile.is_zipfile(location):
- yield from self._find_eggs_in_zip(location)
-
-
-@functools.lru_cache(maxsize=None) # Warn a distribution exactly once.
-def _emit_egg_deprecation(location: Optional[str]) -> None:
- deprecated(
- reason=f"Loading egg at {location} is deprecated.",
- replacement="to use pip for package installation.",
- gone_in="23.3",
- )
-
-
-class Environment(BaseEnvironment):
- def __init__(self, paths: Sequence[str]) -> None:
- self._paths = paths
-
- @classmethod
- def default(cls) -> BaseEnvironment:
- return cls(sys.path)
-
- @classmethod
- def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
- if paths is None:
- return cls(sys.path)
- return cls(paths)
-
- def _iter_distributions(self) -> Iterator[BaseDistribution]:
- finder = _DistributionFinder()
- for location in self._paths:
- yield from finder.find(location)
- for dist in finder.find_eggs(location):
- _emit_egg_deprecation(dist.location)
- yield dist
- # This must go last because that's how pkg_resources tie-breaks.
- yield from finder.find_linked(location)
-
- def get_distribution(self, name: str) -> Optional[BaseDistribution]:
- matches = (
- distribution
- for distribution in self.iter_all_distributions()
- if distribution.canonical_name == canonicalize_name(name)
- )
- return next(matches, None)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py
deleted file mode 100644
index 125189c6fa57d61bea0012080eba12f90faf740c..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py
+++ /dev/null
@@ -1,146 +0,0 @@
-"""
- pygments.formatters.rtf
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- A formatter that generates RTF files.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pip._vendor.pygments.formatter import Formatter
-from pip._vendor.pygments.util import get_int_opt, surrogatepair
-
-
-__all__ = ['RtfFormatter']
-
-
-class RtfFormatter(Formatter):
- """
- Format tokens as RTF markup. This formatter automatically outputs full RTF
- documents with color information and other useful stuff. Perfect for Copy and
- Paste into Microsoft(R) Word(R) documents.
-
- Please note that ``encoding`` and ``outencoding`` options are ignored.
- The RTF format is ASCII natively, but handles unicode characters correctly
- thanks to escape sequences.
-
- .. versionadded:: 0.6
-
- Additional options accepted:
-
- `style`
- The style to use, can be a string or a Style subclass (default:
- ``'default'``).
-
- `fontface`
- The used font family, for example ``Bitstream Vera Sans``. Defaults to
- some generic font which is supposed to have fixed width.
-
- `fontsize`
- Size of the font used. Size is specified in half points. The
- default is 24 half-points, giving a size 12 font.
-
- .. versionadded:: 2.0
- """
- name = 'RTF'
- aliases = ['rtf']
- filenames = ['*.rtf']
-
- def __init__(self, **options):
- r"""
- Additional options accepted:
-
- ``fontface``
- Name of the font used. Could for example be ``'Courier New'``
- to further specify the default which is ``'\fmodern'``. The RTF
- specification claims that ``\fmodern`` are "Fixed-pitch serif
- and sans serif fonts". Hope every RTF implementation thinks
- the same about modern...
-
- """
- Formatter.__init__(self, **options)
- self.fontface = options.get('fontface') or ''
- self.fontsize = get_int_opt(options, 'fontsize', 0)
-
- def _escape(self, text):
- return text.replace('\\', '\\\\') \
- .replace('{', '\\{') \
- .replace('}', '\\}')
-
- def _escape_text(self, text):
- # empty strings, should give a small performance improvement
- if not text:
- return ''
-
- # escape text
- text = self._escape(text)
-
- buf = []
- for c in text:
- cn = ord(c)
- if cn < (2**7):
- # ASCII character
- buf.append(str(c))
- elif (2**7) <= cn < (2**16):
- # single unicode escape sequence
- buf.append('{\\u%d}' % cn)
- elif (2**16) <= cn:
- # RTF limits unicode to 16 bits.
- # Force surrogate pairs
- buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
-
- return ''.join(buf).replace('\n', '\\par\n')
-
- def format_unencoded(self, tokensource, outfile):
- # rtf 1.8 header
- outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
- '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
- '{\\colortbl;' % (self.fontface and
- ' ' + self._escape(self.fontface) or
- ''))
-
- # convert colors and save them in a mapping to access them later.
- color_mapping = {}
- offset = 1
- for _, style in self.style:
- for color in style['color'], style['bgcolor'], style['border']:
- if color and color not in color_mapping:
- color_mapping[color] = offset
- outfile.write('\\red%d\\green%d\\blue%d;' % (
- int(color[0:2], 16),
- int(color[2:4], 16),
- int(color[4:6], 16)
- ))
- offset += 1
- outfile.write('}\\f0 ')
- if self.fontsize:
- outfile.write('\\fs%d' % self.fontsize)
-
- # highlight stream
- for ttype, value in tokensource:
- while not self.style.styles_token(ttype) and ttype.parent:
- ttype = ttype.parent
- style = self.style.style_for_token(ttype)
- buf = []
- if style['bgcolor']:
- buf.append('\\cb%d' % color_mapping[style['bgcolor']])
- if style['color']:
- buf.append('\\cf%d' % color_mapping[style['color']])
- if style['bold']:
- buf.append('\\b')
- if style['italic']:
- buf.append('\\i')
- if style['underline']:
- buf.append('\\ul')
- if style['border']:
- buf.append('\\chbrdr\\chcfpat%d' %
- color_mapping[style['border']])
- start = ''.join(buf)
- if start:
- outfile.write('{%s ' % start)
- outfile.write(self._escape_text(value))
- if start:
- outfile.write('}')
-
- outfile.write('}')
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/wait.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/wait.py
deleted file mode 100644
index 21b4590b3dc9b58902b0d47164b9023e54a85ef8..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/wait.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import errno
-import select
-import sys
-from functools import partial
-
-try:
- from time import monotonic
-except ImportError:
- from time import time as monotonic
-
-__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
-
-
-class NoWayToWaitForSocketError(Exception):
- pass
-
-
-# How should we wait on sockets?
-#
-# There are two types of APIs you can use for waiting on sockets: the fancy
-# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
-# select/poll. The stateful APIs are more efficient when you have a lots of
-# sockets to keep track of, because you can set them up once and then use them
-# lots of times. But we only ever want to wait on a single socket at a time
-# and don't want to keep track of state, so the stateless APIs are actually
-# more efficient. So we want to use select() or poll().
-#
-# Now, how do we choose between select() and poll()? On traditional Unixes,
-# select() has a strange calling convention that makes it slow, or fail
-# altogether, for high-numbered file descriptors. The point of poll() is to fix
-# that, so on Unixes, we prefer poll().
-#
-# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
-# for it), but that's OK, because on Windows, select() doesn't have this
-# strange calling convention; plain select() works fine.
-#
-# So: on Windows we use select(), and everywhere else we use poll(). We also
-# fall back to select() in case poll() is somehow broken or missing.
-
-if sys.version_info >= (3, 5):
- # Modern Python, that retries syscalls by default
- def _retry_on_intr(fn, timeout):
- return fn(timeout)
-
-else:
- # Old and broken Pythons.
- def _retry_on_intr(fn, timeout):
- if timeout is None:
- deadline = float("inf")
- else:
- deadline = monotonic() + timeout
-
- while True:
- try:
- return fn(timeout)
- # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
- except (OSError, select.error) as e:
- # 'e.args[0]' incantation works for both OSError and select.error
- if e.args[0] != errno.EINTR:
- raise
- else:
- timeout = deadline - monotonic()
- if timeout < 0:
- timeout = 0
- if timeout == float("inf"):
- timeout = None
- continue
-
-
-def select_wait_for_socket(sock, read=False, write=False, timeout=None):
- if not read and not write:
- raise RuntimeError("must specify at least one of read=True, write=True")
- rcheck = []
- wcheck = []
- if read:
- rcheck.append(sock)
- if write:
- wcheck.append(sock)
- # When doing a non-blocking connect, most systems signal success by
- # marking the socket writable. Windows, though, signals success by marked
- # it as "exceptional". We paper over the difference by checking the write
- # sockets for both conditions. (The stdlib selectors module does the same
- # thing.)
- fn = partial(select.select, rcheck, wcheck, wcheck)
- rready, wready, xready = _retry_on_intr(fn, timeout)
- return bool(rready or wready or xready)
-
-
-def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
- if not read and not write:
- raise RuntimeError("must specify at least one of read=True, write=True")
- mask = 0
- if read:
- mask |= select.POLLIN
- if write:
- mask |= select.POLLOUT
- poll_obj = select.poll()
- poll_obj.register(sock, mask)
-
- # For some reason, poll() takes timeout in milliseconds
- def do_poll(t):
- if t is not None:
- t *= 1000
- return poll_obj.poll(t)
-
- return bool(_retry_on_intr(do_poll, timeout))
-
-
-def null_wait_for_socket(*args, **kwargs):
- raise NoWayToWaitForSocketError("no select-equivalent available")
-
-
-def _have_working_poll():
- # Apparently some systems have a select.poll that fails as soon as you try
- # to use it, either due to strange configuration or broken monkeypatching
- # from libraries like eventlet/greenlet.
- try:
- poll_obj = select.poll()
- _retry_on_intr(poll_obj.poll, 0)
- except (AttributeError, OSError):
- return False
- else:
- return True
-
-
-def wait_for_socket(*args, **kwargs):
- # We delay choosing which implementation to use until the first time we're
- # called. We could do it at import time, but then we might make the wrong
- # decision if someone goes wild with monkeypatching select.poll after
- # we're imported.
- global wait_for_socket
- if _have_working_poll():
- wait_for_socket = poll_wait_for_socket
- elif hasattr(select, "select"):
- wait_for_socket = select_wait_for_socket
- else: # Platform-specific: Appengine.
- wait_for_socket = null_wait_for_socket
- return wait_for_socket(*args, **kwargs)
-
-
-def wait_for_read(sock, timeout=None):
- """Waits for reading to be available on a given socket.
- Returns True if the socket is readable, or False if the timeout expired.
- """
- return wait_for_socket(sock, read=True, timeout=timeout)
-
-
-def wait_for_write(sock, timeout=None):
- """Waits for writing to be available on a given socket.
- Returns True if the socket is readable, or False if the timeout expired.
- """
- return wait_for_socket(sock, write=True, timeout=timeout)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/extern/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/extern/__init__.py
deleted file mode 100644
index 948bcc6094d13684a39abd17b9519b2b78ca2813..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/extern/__init__.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import importlib.util
-import sys
-
-
-class VendorImporter:
- """
- A PEP 302 meta path importer for finding optionally-vendored
- or otherwise naturally-installed packages from root_name.
- """
-
- def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
- self.root_name = root_name
- self.vendored_names = set(vendored_names)
- self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
-
- @property
- def search_path(self):
- """
- Search first the vendor package then as a natural package.
- """
- yield self.vendor_pkg + '.'
- yield ''
-
- def _module_matches_namespace(self, fullname):
- """Figure out if the target module is vendored."""
- root, base, target = fullname.partition(self.root_name + '.')
- return not root and any(map(target.startswith, self.vendored_names))
-
- def load_module(self, fullname):
- """
- Iterate over the search path to locate and load fullname.
- """
- root, base, target = fullname.partition(self.root_name + '.')
- for prefix in self.search_path:
- try:
- extant = prefix + target
- __import__(extant)
- mod = sys.modules[extant]
- sys.modules[fullname] = mod
- return mod
- except ImportError:
- pass
- else:
- raise ImportError(
- "The '{target}' package is required; "
- "normally this is bundled with this package so if you get "
- "this warning, consult the packager of your "
- "distribution.".format(**locals())
- )
-
- def create_module(self, spec):
- return self.load_module(spec.name)
-
- def exec_module(self, module):
- pass
-
- def find_spec(self, fullname, path=None, target=None):
- """Return a module spec for vendored names."""
- return (
- importlib.util.spec_from_loader(fullname, self)
- if self._module_matches_namespace(fullname)
- else None
- )
-
- def install(self):
- """
- Install this importer into sys.meta_path if not already present.
- """
- if self not in sys.meta_path:
- sys.meta_path.append(self)
-
-
-names = (
- 'packaging',
- 'platformdirs',
- 'jaraco',
- 'importlib_resources',
- 'more_itertools',
-)
-VendorImporter(__name__, names).install()
diff --git a/spaces/Terminus0501/vits-uma-genshin-honkai/text/symbols.py b/spaces/Terminus0501/vits-uma-genshin-honkai/text/symbols.py
deleted file mode 100644
index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000
--- a/spaces/Terminus0501/vits-uma-genshin-honkai/text/symbols.py
+++ /dev/null
@@ -1,39 +0,0 @@
-'''
-Defines the set of symbols used in text input to the model.
-'''
-
-'''# japanese_cleaners
-_pad = '_'
-_punctuation = ',.!?-'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
-'''
-
-'''# japanese_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
-'''
-
-'''# korean_cleaners
-_pad = '_'
-_punctuation = ',.!?…~'
-_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
-'''
-
-'''# chinese_cleaners
-_pad = '_'
-_punctuation = ',。!?—…'
-_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
-'''
-
-# zh_ja_mixture_cleaners
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
-
-
-# Export all symbols:
-symbols = [_pad] + list(_punctuation) + list(_letters)
-
-# Special symbol ids
-SPACE_ID = symbols.index(" ")
\ No newline at end of file
diff --git a/spaces/Toraong/color_textual_inversion/textual_inversion.py b/spaces/Toraong/color_textual_inversion/textual_inversion.py
deleted file mode 100644
index 61416fafccf593fc74968aea8e1334ae7f19a247..0000000000000000000000000000000000000000
--- a/spaces/Toraong/color_textual_inversion/textual_inversion.py
+++ /dev/null
@@ -1,769 +0,0 @@
-import argparse
-import itertools
-import math
-import os
-import random
-from pathlib import Path
-from typing import Optional
-
-import numpy as np
-import PIL
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import set_seed
-from diffusers import (
- AutoencoderKL,
- DDPMScheduler,
- PNDMScheduler,
- StableDiffusionPipeline,
- UNet2DConditionModel,
-)
-from diffusers.optimization import get_scheduler
-from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
-
-# from diffusers.utils import check_min_version
-from huggingface_hub import HfFolder, Repository, whoami
-
-# TODO: remove and import from diffusers.utils when the new version of diffusers is released
-from packaging import version
-from PIL import Image
-from torch.utils.data import Dataset
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
-
-if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
- PIL_INTERPOLATION = {
- "linear": PIL.Image.Resampling.BILINEAR,
- "bilinear": PIL.Image.Resampling.BILINEAR,
- "bicubic": PIL.Image.Resampling.BICUBIC,
- "lanczos": PIL.Image.Resampling.LANCZOS,
- "nearest": PIL.Image.Resampling.NEAREST,
- }
-else:
- PIL_INTERPOLATION = {
- "linear": PIL.Image.LINEAR,
- "bilinear": PIL.Image.BILINEAR,
- "bicubic": PIL.Image.BICUBIC,
- "lanczos": PIL.Image.LANCZOS,
- "nearest": PIL.Image.NEAREST,
- }
-# ------------------------------------------------------------------------------
-
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-# check_min_version("0.10.0.dev0")
-
-
-logger = get_logger(__name__)
-
-
-def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path):
- logger.info("Saving embeddings")
- learned_embeds = (
- accelerator.unwrap_model(text_encoder)
- .get_input_embeddings()
- .weight[placeholder_token_id]
- )
- learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
- torch.save(learned_embeds_dict, save_path)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--save_steps",
- type=int,
- default=500,
- help="Save learned_embeds.bin every X updates steps.",
- )
- parser.add_argument(
- "--only_save_embeds",
- action="store_true",
- default=False,
- help="Save only the embeddings for the new concept.",
- )
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--revision",
- type=str,
- default=None,
- required=False,
- help="Revision of pretrained model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--train_data_dir",
- type=str,
- default=None,
- required=True,
- help="A folder containing the training data.",
- )
- parser.add_argument(
- "--placeholder_token",
- type=str,
- default=None,
- required=True,
- help="A token to use as a placeholder for the concept.",
- )
- parser.add_argument(
- "--initializer_token",
- type=str,
- default=None,
- required=True,
- help="A token to use as initializer word.",
- )
- parser.add_argument(
- "--learnable_property",
- type=str,
- default="object",
- help="Choose between 'object' and 'style'",
- )
- parser.add_argument(
- "--repeats",
- type=int,
- default=100,
- help="How many times to repeat the training data.",
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="text-inversion-model",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument(
- "--seed", type=int, default=None, help="A seed for reproducible training."
- )
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop",
- action="store_true",
- help="Whether to center crop images before resizing to resolution",
- )
- parser.add_argument(
- "--train_batch_size",
- type=int,
- default=16,
- help="Batch size (per device) for the training dataloader.",
- )
- parser.add_argument("--num_train_epochs", type=int, default=100)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=5000,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=1e-4,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=True,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps",
- type=int,
- default=500,
- help="Number of steps for the warmup in the lr scheduler.",
- )
- parser.add_argument(
- "--adam_beta1",
- type=float,
- default=0.9,
- help="The beta1 parameter for the Adam optimizer.",
- )
- parser.add_argument(
- "--adam_beta2",
- type=float,
- default=0.999,
- help="The beta2 parameter for the Adam optimizer.",
- )
- parser.add_argument(
- "--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use."
- )
- parser.add_argument(
- "--adam_epsilon",
- type=float,
- default=1e-08,
- help="Epsilon value for the Adam optimizer",
- )
- parser.add_argument(
- "--push_to_hub",
- action="store_true",
- help="Whether or not to push the model to the Hub.",
- )
- parser.add_argument(
- "--hub_token",
- type=str,
- default=None,
- help="The token to use to push to the Model Hub.",
- )
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
- parser.add_argument(
- "--local_rank",
- type=int,
- default=-1,
- help="For distributed training: local_rank",
- )
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- if args.train_data_dir is None:
- raise ValueError("You must specify a train data directory.")
-
- return args
-
-
-imagenet_templates_small = [
- "a photo of a {}",
- "a rendering of a {}",
- "a cropped photo of the {}",
- "the photo of a {}",
- "a photo of a clean {}",
- "a photo of a dirty {}",
- "a dark photo of the {}",
- "a photo of my {}",
- "a photo of the cool {}",
- "a close-up photo of a {}",
- "a bright photo of the {}",
- "a cropped photo of a {}",
- "a photo of the {}",
- "a good photo of the {}",
- "a photo of one {}",
- "a close-up photo of the {}",
- "a rendition of the {}",
- "a photo of the clean {}",
- "a rendition of a {}",
- "a photo of a nice {}",
- "a good photo of a {}",
- "a photo of the nice {}",
- "a photo of the small {}",
- "a photo of the weird {}",
- "a photo of the large {}",
- "a photo of a cool {}",
- "a photo of a small {}",
-]
-
-imagenet_style_templates_small = [
- "a painting of {}, art by *",
- "a rendering of {}, art by *",
- "a cropped painting of {}, art by *",
- "the painting of {}, art by *",
- "a clean painting of {}, art by *",
- "a dirty painting of {}, art by *",
- "a dark painting of {}, art by *",
- "a picture of {}, art by *",
- "a cool painting of {}, art by *",
- "a close-up painting of {}, art by *",
- "a bright painting of {}, art by *",
- "a cropped painting of {}, art by *",
- "a good painting of {}, art by *",
- "a close-up painting of {}, art by *",
- "a rendition of {}, art by *",
- "a nice painting of {}, art by *",
- "a small painting of {}, art by *",
- "a weird painting of {}, art by *",
- "a large painting of {}, art by *",
-]
-
-
-class TextualInversionDataset(Dataset):
- def __init__(
- self,
- data_root,
- tokenizer,
- learnable_property="object", # [object, style]
- size=512,
- repeats=100,
- interpolation="bicubic",
- flip_p=0.5,
- set="train",
- placeholder_token="*",
- center_crop=False,
- ):
- self.data_root = data_root
- self.tokenizer = tokenizer
- self.learnable_property = learnable_property
- self.size = size
- self.placeholder_token = placeholder_token
- self.center_crop = center_crop
- self.flip_p = flip_p
-
- self.image_paths = [
- os.path.join(self.data_root, file_path)
- for file_path in os.listdir(self.data_root)
- ]
-
- self.num_images = len(self.image_paths)
- self._length = self.num_images
-
- if set == "train":
- self._length = self.num_images * repeats
-
- self.interpolation = {
- "linear": PIL_INTERPOLATION["linear"],
- "bilinear": PIL_INTERPOLATION["bilinear"],
- "bicubic": PIL_INTERPOLATION["bicubic"],
- "lanczos": PIL_INTERPOLATION["lanczos"],
- }[interpolation]
-
- self.templates = (
- imagenet_style_templates_small
- if learnable_property == "style"
- else imagenet_templates_small
- )
- self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, i):
- example = {}
- image = Image.open(self.image_paths[i % self.num_images])
-
- if image.mode != "RGB":
- image = image.convert("RGB")
-
- placeholder_string = self.placeholder_token
- text = random.choice(self.templates).format(placeholder_string)
-
- example["input_ids"] = self.tokenizer(
- text,
- padding="max_length",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- return_tensors="pt",
- ).input_ids[0]
-
- # default to score-sde preprocessing
- img = np.array(image).astype(np.uint8)
-
- if self.center_crop:
- crop = min(img.shape[0], img.shape[1])
- h, w, = (
- img.shape[0],
- img.shape[1],
- )
- img = img[
- (h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2
- ]
-
- image = Image.fromarray(img)
- image = image.resize((self.size, self.size), resample=self.interpolation)
-
- image = self.flip_transform(image)
- image = np.array(image).astype(np.uint8)
- image = (image / 127.5 - 1.0).astype(np.float32)
-
- example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
- return example
-
-
-def get_full_repo_name(
- model_id: str, organization: Optional[str] = None, token: Optional[str] = None
-):
- if token is None:
- token = HfFolder.get_token()
- if organization is None:
- username = whoami(token)["name"]
- return f"{username}/{model_id}"
- else:
- return f"{organization}/{model_id}"
-
-
-def freeze_params(params):
- for param in params:
- param.requires_grad = False
-
-
-def main():
- args = parse_args()
- # logging_dir = os.path.join(args.output_dir, args.logging_dir)
-
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- )
-
- # If passed along, set the training seed now.
- if args.seed is not None:
- set_seed(args.seed)
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.push_to_hub:
- if args.hub_model_id is None:
- repo_name = get_full_repo_name(
- Path(args.output_dir).name, token=args.hub_token
- )
- else:
- repo_name = args.hub_model_id
- repo = Repository(args.output_dir, clone_from=repo_name)
-
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
- if "step_*" not in gitignore:
- gitignore.write("step_*\n")
- if "epoch_*" not in gitignore:
- gitignore.write("epoch_*\n")
- elif args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- # Load the tokenizer and add the placeholder token as a additional special token
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="tokenizer"
- )
-
- # Add the placeholder token in tokenizer
- num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
- if num_added_tokens == 0:
- raise ValueError(
- f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
- " `placeholder_token` that is not already in the tokenizer."
- )
-
- # Convert the initializer_token, placeholder_token to ids
- token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
- # Check if initializer_token is a single token or a sequence of tokens
- if len(token_ids) > 1:
- raise ValueError("The initializer token must be a single token.")
-
- initializer_token_id = token_ids[0]
- placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
-
- # Load models and create wrapper for stable diffusion
- text_encoder = CLIPTextModel.from_pretrained(
- args.pretrained_model_name_or_path,
- subfolder="text_encoder",
- revision=args.revision,
- )
- vae = AutoencoderKL.from_pretrained(
- args.pretrained_model_name_or_path,
- subfolder="vae",
- revision=args.revision,
- )
- unet = UNet2DConditionModel.from_pretrained(
- args.pretrained_model_name_or_path,
- subfolder="unet",
- revision=args.revision,
- )
-
- # Resize the token embeddings as we are adding new special tokens to the tokenizer
- text_encoder.resize_token_embeddings(len(tokenizer))
-
- # Initialise the newly added placeholder token with the embeddings of the initializer token
- token_embeds = text_encoder.get_input_embeddings().weight.data
- token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
-
- # Freeze vae and unet
- freeze_params(vae.parameters())
- freeze_params(unet.parameters())
- # Freeze all parameters except for the token embeddings in text encoder
- params_to_freeze = itertools.chain(
- text_encoder.text_model.encoder.parameters(),
- text_encoder.text_model.final_layer_norm.parameters(),
- text_encoder.text_model.embeddings.position_embedding.parameters(),
- )
- freeze_params(params_to_freeze)
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate
- * args.gradient_accumulation_steps
- * args.train_batch_size
- * accelerator.num_processes
- )
-
- # Initialize the optimizer
- optimizer = torch.optim.AdamW(
- text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="scheduler"
- )
-
- train_dataset = TextualInversionDataset(
- data_root=args.train_data_dir,
- tokenizer=tokenizer,
- size=args.resolution,
- placeholder_token=args.placeholder_token,
- repeats=args.repeats,
- learnable_property=args.learnable_property,
- center_crop=args.center_crop,
- set="train",
- )
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(
- len(train_dataloader) / args.gradient_accumulation_steps
- )
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- )
-
- text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- text_encoder, optimizer, train_dataloader, lr_scheduler
- )
-
- # Move vae and unet to device
- vae.to(accelerator.device)
- unet.to(accelerator.device)
-
- # Keep vae and unet in eval model as we don't train these
- vae.eval()
- unet.eval()
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(
- len(train_dataloader) / args.gradient_accumulation_steps
- )
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("textual_inversion", config=vars(args))
-
- # Train!
- total_batch_size = (
- args.train_batch_size
- * accelerator.num_processes
- * args.gradient_accumulation_steps
- )
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(
- f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
- )
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(
- range(args.max_train_steps), disable=not accelerator.is_local_main_process
- )
- progress_bar.set_description("Steps")
- global_step = 0
-
- for epoch in range(args.num_train_epochs):
- text_encoder.train()
- for step, batch in enumerate(train_dataloader):
- with accelerator.accumulate(text_encoder):
- # Convert images to latent space
- latents = (
- vae.encode(batch["pixel_values"]).latent_dist.sample().detach()
- )
- latents = latents * 0.18215
-
- # Sample noise that we'll add to the latents
- noise = torch.randn(latents.shape).to(latents.device)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(
- 0,
- noise_scheduler.config.num_train_timesteps,
- (bsz,),
- device=latents.device,
- ).long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- model_pred = unet(
- noisy_latents, timesteps, encoder_hidden_states
- ).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(
- f"Unknown prediction type {noise_scheduler.config.prediction_type}"
- )
-
- loss = (
- F.mse_loss(model_pred, target, reduction="none")
- .mean([1, 2, 3])
- .mean()
- )
- accelerator.backward(loss)
-
- # Zero out the gradients for all token embeddings except the newly added
- # embeddings for the concept, as we only want to optimize the concept embeddings
- if accelerator.num_processes > 1:
- grads = text_encoder.module.get_input_embeddings().weight.grad
- else:
- grads = text_encoder.get_input_embeddings().weight.grad
- # Get the index for tokens that we want to zero the grads for
- index_grads_to_zero = (
- torch.arange(len(tokenizer)) != placeholder_token_id
- )
- grads.data[index_grads_to_zero, :] = grads.data[
- index_grads_to_zero, :
- ].fill_(0)
-
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
- if global_step % args.save_steps == 0:
- save_path = os.path.join(
- args.output_dir, f"learned_embeds-steps-{global_step}.bin"
- )
- save_progress(
- text_encoder, placeholder_token_id, accelerator, args, save_path
- )
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- accelerator.wait_for_everyone()
-
- # Create the pipeline using using the trained modules and save it.
- if accelerator.is_main_process:
- if args.push_to_hub and args.only_save_embeds:
- logger.warn(
- "Enabling full model saving because --push_to_hub=True was specified."
- )
- save_full_model = True
- else:
- save_full_model = not args.only_save_embeds
- if save_full_model:
- pipeline = StableDiffusionPipeline(
- text_encoder=accelerator.unwrap_model(text_encoder),
- vae=vae,
- unet=unet,
- tokenizer=tokenizer,
- scheduler=PNDMScheduler.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="scheduler"
- ),
- safety_checker=StableDiffusionSafetyChecker.from_pretrained(
- "CompVis/stable-diffusion-safety-checker"
- ),
- feature_extractor=CLIPFeatureExtractor.from_pretrained(
- "openai/clip-vit-base-patch32"
- ),
- )
- pipeline.save_pretrained(args.output_dir)
- # Save the newly trained embeddings
- save_path = os.path.join(args.output_dir, "learned_embeds.bin")
- save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path)
-
- if args.push_to_hub:
- repo.push_to_hub(
- commit_message="End of training", blocking=False, auto_lfs_prune=True
- )
-
- accelerator.end_training()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Wayben/ChatGPT/Dockerfile b/spaces/Wayben/ChatGPT/Dockerfile
deleted file mode 100644
index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000
--- a/spaces/Wayben/ChatGPT/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
-COPY requirements.txt .
-RUN pip install --user -r requirements.txt
-
-FROM python:3.9
-MAINTAINER iskoldt
-COPY --from=builder /root/.local /root/.local
-ENV PATH=/root/.local/bin:$PATH
-COPY . /app
-WORKDIR /app
-ENV my_api_key empty
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/spaces/WiNE-iNEFF/MinecraftSkin-Diffusion/README.md b/spaces/WiNE-iNEFF/MinecraftSkin-Diffusion/README.md
deleted file mode 100644
index acb375952a5a1c31a34c1ca395f49d4d8b4c4d4e..0000000000000000000000000000000000000000
--- a/spaces/WiNE-iNEFF/MinecraftSkin-Diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Minecraft Skin Diffusion
-emoji: 🦀
-colorFrom: yellow
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/quantization/base.py b/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/quantization/base.py
deleted file mode 100644
index 1b16c130d266fbd021d3fc29bb9f98c33dd3c588..0000000000000000000000000000000000000000
--- a/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/quantization/base.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Base class for all quantizers.
-"""
-
-from dataclasses import dataclass, field
-import typing as tp
-
-import torch
-from torch import nn
-
-
-@dataclass
-class QuantizedResult:
- x: torch.Tensor
- codes: torch.Tensor
- bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item.
- penalty: tp.Optional[torch.Tensor] = None
- metrics: dict = field(default_factory=dict)
-
-
-class BaseQuantizer(nn.Module):
- """Base class for quantizers.
- """
-
- def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult:
- """
- Given input tensor x, returns first the quantized (or approximately quantized)
- representation along with quantized codes, bandwidth, and any penalty term for the loss.
- Finally, this returns a dict of metrics to update logging etc.
- Frame rate must be passed so that the bandwidth is properly computed.
- """
- raise NotImplementedError()
-
- def encode(self, x: torch.Tensor) -> torch.Tensor:
- """Encode a given input tensor with the specified sample rate at the given bandwidth.
- """
- raise NotImplementedError()
-
- def decode(self, codes: torch.Tensor) -> torch.Tensor:
- """Decode the given codes to the quantized representation.
- """
- raise NotImplementedError()
-
- @property
- def total_codebooks(self):
- """Total number of codebooks.
- """
- raise NotImplementedError()
-
- @property
- def num_codebooks(self):
- """Number of active codebooks.
- """
- raise NotImplementedError()
-
- def set_num_codebooks(self, n: int):
- """Set the number of active codebooks.
- """
- raise NotImplementedError()
-
-
-class DummyQuantizer(BaseQuantizer):
- """Fake quantizer that actually does not perform any quantization.
- """
- def __init__(self):
- super().__init__()
-
- def forward(self, x: torch.Tensor, frame_rate: int):
- q = x.unsqueeze(1)
- return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x))
-
- def encode(self, x: torch.Tensor) -> torch.Tensor:
- """Encode a given input tensor with the specified sample rate at the given bandwidth.
- In the case of the DummyQuantizer, the codes are actually identical
- to the input and resulting quantized representation as no quantization is done.
- """
- return x.unsqueeze(1)
-
- def decode(self, codes: torch.Tensor) -> torch.Tensor:
- """Decode the given codes to the quantized representation.
- In the case of the DummyQuantizer, the codes are actually identical
- to the input and resulting quantized representation as no quantization is done.
- """
- return codes.squeeze(1)
-
- @property
- def total_codebooks(self):
- """Total number of codebooks.
- """
- return 1
-
- @property
- def num_codebooks(self):
- """Total number of codebooks.
- """
- return self.total_codebooks
-
- def set_num_codebooks(self, n: int):
- """Set the number of active codebooks.
- """
- raise AttributeError("Cannot override the number of codebooks for the dummy quantizer")
diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/tabular/data.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/tabular/data.py
deleted file mode 100644
index c763bdc7784e04b7c321ea78189bbc35d1fb4175..0000000000000000000000000000000000000000
--- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/tabular/data.py
+++ /dev/null
@@ -1,178 +0,0 @@
-"Data loading pipeline for structured data support. Loads from pandas DataFrame"
-from ..torch_core import *
-from .transform import *
-from ..basic_data import *
-from ..data_block import *
-from ..basic_train import *
-from .models import *
-from pandas.api.types import is_numeric_dtype, is_categorical_dtype
-
-__all__ = ['TabularDataBunch', 'TabularLine', 'TabularList', 'TabularProcessor', 'tabular_learner']
-
-OptTabTfms = Optional[Collection[TabularProc]]
-
-#def emb_sz_rule(n_cat:int)->int: return min(50, (n_cat//2)+1)
-def emb_sz_rule(n_cat:int)->int: return min(600, round(1.6 * n_cat**0.56))
-
-def def_emb_sz(classes, n, sz_dict=None):
- "Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`."
- sz_dict = ifnone(sz_dict, {})
- n_cat = len(classes[n])
- sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb
- return n_cat,sz
-
-class TabularLine(ItemBase):
- "Basic item for tabular data."
- def __init__(self, cats, conts, classes, names):
- self.cats,self.conts,self.classes,self.names = cats,conts,classes,names
- self.data = [tensor(cats), tensor(conts)]
-
- def __str__(self):
- res = ''
- for c, n in zip(self.cats, self.names[:len(self.cats)]):
- res += f"{n} {(self.classes[n][c])}; "
- for c,n in zip(self.conts, self.names[len(self.cats):]):
- res += f'{n} {c:.4f}; '
- return res
-
-class TabularProcessor(PreProcessor):
- "Regroup the `procs` in one `PreProcessor`."
- def __init__(self, ds:ItemBase=None, procs=None):
- procs = ifnone(procs, ds.procs if ds is not None else None)
- self.procs = listify(procs)
-
- def process_one(self, item):
- df = pd.DataFrame([item,item])
- for proc in self.procs: proc(df, test=True)
- if len(self.cat_names) != 0:
- codes = np.stack([c.cat.codes.values for n,c in df[self.cat_names].items()], 1).astype(np.int64) + 1
- else: codes = [[]]
- if len(self.cont_names) != 0:
- conts = np.stack([c.astype('float32').values for n,c in df[self.cont_names].items()], 1)
- else: conts = [[]]
- classes = None
- col_names = list(df[self.cat_names].columns.values) + list(df[self.cont_names].columns.values)
- return TabularLine(codes[0], conts[0], classes, col_names)
-
- def process(self, ds):
- if ds.inner_df is None:
- ds.classes,ds.cat_names,ds.cont_names = self.classes,self.cat_names,self.cont_names
- ds.col_names = self.cat_names + self.cont_names
- ds.preprocessed = True
- return
- for i,proc in enumerate(self.procs):
- if isinstance(proc, TabularProc): proc(ds.inner_df, test=True)
- else:
- #cat and cont names may have been changed by transform (like Fill_NA)
- proc = proc(ds.cat_names, ds.cont_names)
- proc(ds.inner_df)
- ds.cat_names,ds.cont_names = proc.cat_names,proc.cont_names
- self.procs[i] = proc
- self.cat_names,self.cont_names = ds.cat_names,ds.cont_names
- if len(ds.cat_names) != 0:
- ds.codes = np.stack([c.cat.codes.values for n,c in ds.inner_df[ds.cat_names].items()], 1).astype(np.int64) + 1
- self.classes = ds.classes = OrderedDict({n:np.concatenate([['#na#'],c.cat.categories.values])
- for n,c in ds.inner_df[ds.cat_names].items()})
- cat_cols = list(ds.inner_df[ds.cat_names].columns.values)
- else: ds.codes,ds.classes,self.classes,cat_cols = None,None,None,[]
- if len(ds.cont_names) != 0:
- ds.conts = np.stack([c.astype('float32').values for n,c in ds.inner_df[ds.cont_names].items()], 1)
- cont_cols = list(ds.inner_df[ds.cont_names].columns.values)
- else: ds.conts,cont_cols = None,[]
- ds.col_names = cat_cols + cont_cols
- ds.preprocessed = True
-
-class TabularDataBunch(DataBunch):
- "Create a `DataBunch` suitable for tabular data."
- @classmethod
- def from_df(cls, path, df:DataFrame, dep_var:str, valid_idx:Collection[int], procs:OptTabTfms=None,
- cat_names:OptStrList=None, cont_names:OptStrList=None, classes:Collection=None,
- test_df=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
- device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False)->DataBunch:
- "Create a `DataBunch` from `df` and `valid_idx` with `dep_var`. `kwargs` are passed to `DataBunch.create`."
- cat_names = ifnone(cat_names, []).copy()
- cont_names = ifnone(cont_names, list(set(df)-set(cat_names)-{dep_var}))
- procs = listify(procs)
- src = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
- .split_by_idx(valid_idx))
- src = src.label_from_df(cols=dep_var) if classes is None else src.label_from_df(cols=dep_var, classes=classes)
- if test_df is not None: src.add_test(TabularList.from_df(test_df, cat_names=cat_names, cont_names=cont_names,
- processor = src.train.x.processor))
- return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device,
- collate_fn=collate_fn, no_check=no_check)
-
-class TabularList(ItemList):
- "Basic `ItemList` for tabular data."
- _item_cls=TabularLine
- _processor=TabularProcessor
- _bunch=TabularDataBunch
- def __init__(self, items:Iterator, cat_names:OptStrList=None, cont_names:OptStrList=None,
- procs=None, **kwargs)->'TabularList':
- super().__init__(range_of(items), **kwargs)
- #dataframe is in inner_df, items is just a range of index
- if cat_names is None: cat_names = []
- if cont_names is None: cont_names = []
- self.cat_names,self.cont_names,self.procs = cat_names,cont_names,procs
- self.copy_new += ['cat_names', 'cont_names', 'procs']
- self.preprocessed = False
-
- @classmethod
- def from_df(cls, df:DataFrame, cat_names:OptStrList=None, cont_names:OptStrList=None, procs=None, **kwargs)->'ItemList':
- "Get the list of inputs in the `col` of `path/csv_name`."
- return cls(items=range(len(df)), cat_names=cat_names, cont_names=cont_names, procs=procs, inner_df=df.copy(), **kwargs)
-
- def get(self, o):
- if not self.preprocessed: return self.inner_df.iloc[o] if hasattr(self, 'inner_df') else self.items[o]
- codes = [] if self.codes is None else self.codes[o]
- conts = [] if self.conts is None else self.conts[o]
- return self._item_cls(codes, conts, self.classes, self.col_names)
-
- def get_emb_szs(self, sz_dict=None):
- "Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`."
- return [def_emb_sz(self.classes, n, sz_dict) for n in self.cat_names]
-
- def reconstruct(self, t:Tensor):
- return self._item_cls(t[0], t[1], self.classes, self.col_names)
-
- def show_xys(self, xs, ys)->None:
- "Show the `xs` (inputs) and `ys` (targets)."
- from IPython.display import display, HTML
- items,names = [], xs[0].names + ['target']
- for i, (x,y) in enumerate(zip(xs,ys)):
- res = []
- cats = x.cats if len(x.cats.size()) > 0 else []
- conts = x.conts if len(x.conts.size()) > 0 else []
- for c, n in zip(cats, x.names[:len(cats)]):
- res.append(x.classes[n][c])
- res += [f'{c:.4f}' for c in conts] + [y]
- items.append(res)
- items = np.array(items)
- df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
- with pd.option_context('display.max_colwidth', -1):
- display(HTML(df.to_html(index=False)))
-
- def show_xyzs(self, xs, ys, zs):
- "Show `xs` (inputs), `ys` (targets) and `zs` (predictions)."
- from IPython.display import display, HTML
- items,names = [], xs[0].names + ['target', 'prediction']
- for i, (x,y,z) in enumerate(zip(xs,ys,zs)):
- res = []
- cats = x.cats if len(x.cats.size()) > 0 else []
- conts = x.conts if len(x.conts.size()) > 0 else []
- for c, n in zip(cats, x.names[:len(cats)]):
- res.append(str(x.classes[n][c]))
- res += [f'{c:.4f}' for c in conts] + [y, z]
- items.append(res)
- items = np.array(items)
- df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
- with pd.option_context('display.max_colwidth', -1):
- display(HTML(df.to_html(index=False)))
-
-def tabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None,
- ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **learn_kwargs):
- "Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params."
- emb_szs = data.get_emb_szs(ifnone(emb_szs, {}))
- model = TabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop,
- y_range=y_range, use_bn=use_bn)
- return Learner(data, model, metrics=metrics, **learn_kwargs)
-
diff --git a/spaces/Xenova/ai-code-playground/assets/worker-5130d00f.js b/spaces/Xenova/ai-code-playground/assets/worker-5130d00f.js
deleted file mode 100644
index cf08c32b21ad186f9ae3580250b2a7f5e16a456b..0000000000000000000000000000000000000000
--- a/spaces/Xenova/ai-code-playground/assets/worker-5130d00f.js
+++ /dev/null
@@ -1,1790 +0,0 @@
-var fn=Object.defineProperty;var gn=(nt,_,n)=>_ in nt?fn(nt,_,{enumerable:!0,configurable:!0,writable:!0,value:n}):nt[_]=n;var Ie=(nt,_,n)=>(gn(nt,typeof _!="symbol"?_+"":_,n),n);(function(){var nt;"use strict";function _mergeNamespaces(_,n){return n.forEach(function(a){a&&typeof a!="string"&&!Array.isArray(a)&&Object.keys(a).forEach(function(u){if(u!=="default"&&!(u in _)){var l=Object.getOwnPropertyDescriptor(a,u);Object.defineProperty(_,u,l.get?l:{enumerable:!0,get:function(){return a[u]}})}})}),Object.freeze(_)}function dispatchCallback(_,n){_!==null&&_(n)}function reverseDictionary(_){return Object.fromEntries(Object.entries(_).map(([n,a])=>[a,n]))}function escapeRegExp(_){return _.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}const Callable=class{constructor(){let _=function(...n){return _._call(...n)};return Object.setPrototypeOf(_,new.target.prototype)}_call(..._){throw Error("Must implement _call method in subclass")}};function isString(_){return typeof _=="string"||_ instanceof String}function isTypedArray(_){var n,a,u;return((u=(a=(n=_==null?void 0:_.prototype)==null?void 0:n.__proto__)==null?void 0:a.constructor)==null?void 0:u.name)==="TypedArray"}function isIntegralNumber(_){return Number.isInteger(_)||typeof _=="bigint"}function exists(_){return _!=null}function calculateDimensions(_){const n=[];let a=_;for(;Array.isArray(a);)n.push(a.length),a=a[0];return n}function pop(_,n,a=void 0){const u=_[n];if(u!==void 0)return delete _[n],u;if(a===void 0)throw Error(`Key ${n} does not exist in object.`);return a}function mergeArrays(..._){return Array.prototype.concat.apply([],_)}var fs={},ONNX_NODE=Object.freeze({__proto__:null,default:fs});function getDefaultExportFromCjs(_){return _&&_.__esModule&&Object.prototype.hasOwnProperty.call(_,"default")?_.default:_}function getAugmentedNamespace(_){if(_.__esModule)return _;var n=_.default;if(typeof n=="function"){var a=function u(){return this instanceof u?Reflect.construct(n,arguments,this.constructor):n.apply(this,arguments)};a.prototype=n.prototype}else a={};return Object.defineProperty(a,"__esModule",{value:!0}),Object.keys(_).forEach(function(u){var l=Object.getOwnPropertyDescriptor(_,u);Object.defineProperty(a,u,l.get?l:{enumerable:!0,get:function(){return _[u]}})}),a}var ortWeb_min$1={exports:{}};const backends={},backendsSortedByPriority=[],registerBackend=(_,n,a)=>{if(n&&typeof n.init=="function"&&typeof n.createSessionHandler=="function"){const u=backends[_];if(u===void 0)backends[_]={backend:n,priority:a};else{if(u.priority>a)return;if(u.priority===a&&u.backend!==n)throw new Error(`cannot register backend "${_}" using priority ${a}`)}if(a>=0){const l=backendsSortedByPriority.indexOf(_);l!==-1&&backendsSortedByPriority.splice(l,1);for(let p=0;p{const n=_.length===0?backendsSortedByPriority:_,a=[];for(const u of n){const l=backends[u];if(l){if(l.initialized)return l.backend;if(l.aborted)continue;const p=!!l.initPromise;try{return p||(l.initPromise=l.backend.init()),await l.initPromise,l.initialized=!0,l.backend}catch(s){p||a.push({name:u,err:s}),l.aborted=!0}finally{delete l.initPromise}}}throw new Error(`no available backend found. ERR: ${a.map(u=>`[${u.name}] ${u.err}`).join(", ")}`)};class EnvImpl{constructor(){this.wasm={},this.webgl={},this.logLevelInternal="warning"}set logLevel(n){if(n!==void 0){if(typeof n!="string"||["verbose","info","warning","error","fatal"].indexOf(n)===-1)throw new Error(`Unsupported logging level: ${n}`);this.logLevelInternal=n}}get logLevel(){return this.logLevelInternal}}const env$1=new EnvImpl,isBigInt64ArrayAvailable=typeof BigInt64Array<"u"&&typeof BigInt64Array.from=="function",isBigUint64ArrayAvailable=typeof BigUint64Array<"u"&&typeof BigUint64Array.from=="function",NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP=new Map([["float32",Float32Array],["uint8",Uint8Array],["int8",Int8Array],["uint16",Uint16Array],["int16",Int16Array],["int32",Int32Array],["bool",Uint8Array],["float64",Float64Array],["uint32",Uint32Array]]),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP=new Map([[Float32Array,"float32"],[Uint8Array,"uint8"],[Int8Array,"int8"],[Uint16Array,"uint16"],[Int16Array,"int16"],[Int32Array,"int32"],[Float64Array,"float64"],[Uint32Array,"uint32"]]);isBigInt64ArrayAvailable&&(NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("int64",BigInt64Array),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigInt64Array,"int64")),isBigUint64ArrayAvailable&&(NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("uint64",BigUint64Array),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigUint64Array,"uint64"));const calculateSize=_=>{let n=1;for(let a=0;a<_.length;a++){const u=_[a];if(typeof u!="number"||!Number.isSafeInteger(u))throw new TypeError(`dims[${a}] must be an integer, got: ${u}`);if(u<0)throw new RangeError(`dims[${a}] must be a non-negative integer, got: ${u}`);n*=u}return n};let Tensor$2=class ut{constructor(n,a,u){let l,p,s;if(typeof n=="string")if(l=n,s=u,n==="string"){if(!Array.isArray(a))throw new TypeError("A string tensor's data must be a string array.");p=a}else{const f=NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(n);if(f===void 0)throw new TypeError(`Unsupported tensor type: ${n}.`);if(Array.isArray(a))p=f.from(a);else if(a instanceof f)p=a;else throw new TypeError(`A ${l} tensor's data must be type of ${f}`)}else if(s=a,Array.isArray(n)){if(n.length===0)throw new TypeError("Tensor type cannot be inferred from an empty array.");const f=typeof n[0];if(f==="string")l="string",p=n;else if(f==="boolean")l="bool",p=Uint8Array.from(n);else throw new TypeError(`Invalid element type of data array: ${f}.`)}else{const f=NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.get(n.constructor);if(f===void 0)throw new TypeError(`Unsupported type for tensor data: ${n.constructor}.`);l=f,p=n}if(s===void 0)s=[p.length];else if(!Array.isArray(s))throw new TypeError("A tensor's dims must be a number array");const h=calculateSize(s);if(h!==p.length)throw new Error(`Tensor's size(${h}) does not match data length(${p.length}).`);this.dims=s,this.type=l,this.data=p,this.size=h}static bufferToTensor(n,a){if(n===void 0)throw new Error("Image buffer must be defined");if(a.height===void 0||a.width===void 0)throw new Error("Image height and width must be defined");const{height:u,width:l}=a,p=a.norm;let s,h;p===void 0||p.mean===void 0?s=255:s=p.mean,p===void 0||p.bias===void 0?h=0:h=p.bias;const f=a.bitmapFormat!==void 0?a.bitmapFormat:"RGBA",c=a.tensorFormat!==void 0&&a.tensorFormat!==void 0?a.tensorFormat:"RGB",o=u*l,t=c==="RGBA"?new Float32Array(o*4):new Float32Array(o*3);let e=4,r=0,i=1,d=2,g=3,m=0,b=o,y=o*2,w=-1;f==="RGB"&&(e=3,r=0,i=1,d=2,g=-1),c==="RGBA"?w=o*3:c==="RBG"?(m=0,y=o,b=o*2):c==="BGR"&&(y=0,b=o,m=o*2);for(let S=0;S{const t=document.createElement("canvas"),e=t.getContext("2d");if(!n||!e)return o();const r=new Image;r.crossOrigin="Anonymous",r.src=n,r.onload=()=>{t.width=r.width,t.height=r.height,e.drawImage(r,0,0,t.width,t.height);const i=e.getImageData(0,0,t.width,t.height);if(a!==void 0){if(a.height!==void 0&&a.height!==t.height)throw new Error("Image input config height doesn't match ImageBitmap height");if(f.height=t.height,a.width!==void 0&&a.width!==t.width)throw new Error("Image input config width doesn't match ImageBitmap width");f.width=t.width}else f.height=t.height,f.width=t.width;c(ut.bufferToTensor(i.data,f))}});throw new Error("Input data provided is not supported - aborted tensor creation")}if(h!==void 0)return ut.bufferToTensor(h,f);throw new Error("Input data provided is not supported - aborted tensor creation")}toImageData(n){var a,u;const l=document.createElement("canvas").getContext("2d");let p;if(l!=null){const s=this.dims[3],h=this.dims[2],f=this.dims[1],c=n!==void 0&&n.format!==void 0?n.format:"RGB",o=n!==void 0&&((a=n.norm)===null||a===void 0?void 0:a.mean)!==void 0?n.norm.mean:255,t=n!==void 0&&((u=n.norm)===null||u===void 0?void 0:u.bias)!==void 0?n.norm.bias:0,e=h*s;if(n!==void 0){if(n.height!==void 0&&n.height!==h)throw new Error("Image output config height doesn't match tensor height");if(n.width!==void 0&&n.width!==s)throw new Error("Image output config width doesn't match tensor width");if(n.format!==void 0&&f===4&&n.format!=="RGBA"||f===3&&n.format!=="RGB"&&n.format!=="BGR")throw new Error("Tensor format doesn't match input tensor dims")}const r=4;let i=0,d=1,g=2,m=3,b=0,y=e,w=e*2,v=-1;c==="RGBA"?(b=0,y=e,w=e*2,v=e*3):c==="RGB"?(b=0,y=e,w=e*2):c==="RBG"&&(b=0,w=e,y=e*2),p=l.createImageData(s,h);for(let S=0;S"u")throw new Error(`input '${c}' is missing in 'feeds'.`);if(s)for(const c of this.outputNames)l[c]=null;const h=await this.handler.run(n,l,p),f={};for(const c in h)Object.hasOwnProperty.call(h,c)&&(f[c]=new Tensor$1(h[c].type,h[c].data,h[c].dims));return f}static async create(n,a,u,l){let p,s={};if(typeof n=="string"){if(p=n,typeof a=="object"&&a!==null)s=a;else if(typeof a<"u")throw new TypeError("'options' must be an object.")}else if(n instanceof Uint8Array){if(p=n,typeof a=="object"&&a!==null)s=a;else if(typeof a<"u")throw new TypeError("'options' must be an object.")}else if(n instanceof ArrayBuffer||typeof SharedArrayBuffer<"u"&&n instanceof SharedArrayBuffer){const t=n;let e=0,r=n.byteLength;if(typeof a=="object"&&a!==null)s=a;else if(typeof a=="number"){if(e=a,!Number.isSafeInteger(e))throw new RangeError("'byteOffset' must be an integer.");if(e<0||e>=t.byteLength)throw new RangeError(`'byteOffset' is out of range [0, ${t.byteLength}).`);if(r=n.byteLength-e,typeof u=="number"){if(r=u,!Number.isSafeInteger(r))throw new RangeError("'byteLength' must be an integer.");if(r<=0||e+r>t.byteLength)throw new RangeError(`'byteLength' is out of range (0, ${t.byteLength-e}].`);if(typeof l=="object"&&l!==null)s=l;else if(typeof l<"u")throw new TypeError("'options' must be an object.")}else if(typeof u<"u")throw new TypeError("'byteLength' must be a number.")}else if(typeof a<"u")throw new TypeError("'options' must be an object.");p=new Uint8Array(t,e,r)}else throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'.");const f=(s.executionProviders||[]).map(t=>typeof t=="string"?t:t.name),o=await(await resolveBackend(f)).createSessionHandler(p,s);return new dn(o)}startProfiling(){this.handler.startProfiling()}endProfiling(){this.handler.endProfiling()}get inputNames(){return this.handler.inputNames}get outputNames(){return this.handler.outputNames}};const InferenceSession$1=InferenceSession$2;var lib=Object.freeze({__proto__:null,InferenceSession:InferenceSession$1,Tensor:Tensor$1,env:env$1,registerBackend}),require$$0=getAugmentedNamespace(lib);/*!
-* ONNX Runtime Web v1.14.0
-* Copyright (c) Microsoft Corporation. All rights reserved.
-* Licensed under the MIT License.
-*/(function(module,exports){(function(_,n){module.exports=n(require$$0)})(self,__WEBPACK_EXTERNAL_MODULE__1670__=>(()=>{var __webpack_modules__={3474:(_,n,a)=>{var u,l=(u=(u=typeof document<"u"&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(p){function s(){return X.buffer!=ee&&Oe(X.buffer),ue}function h(){return X.buffer!=ee&&Oe(X.buffer),Se}function f(){return X.buffer!=ee&&Oe(X.buffer),ve}function c(){return X.buffer!=ee&&Oe(X.buffer),oe}function o(){return X.buffer!=ee&&Oe(X.buffer),ye}var t,e,r;p=p||{},t||(t=p!==void 0?p:{}),t.ready=new Promise(function(T,E){e=T,r=E});var i,d,g,m,b,y,w=Object.assign({},t),v="./this.program",S=(T,E)=>{throw E},A=typeof window=="object",O=typeof importScripts=="function",x=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string",I=t.ENVIRONMENT_IS_PTHREAD||!1,N="";function R(T){return t.locateFile?t.locateFile(T,N):N+T}if(x){let T;N=O?a(908).dirname(N)+"/":"//",y=()=>{b||(m=a(1384),b=a(908))},i=function(E,k){return y(),E=b.normalize(E),m.readFileSync(E,k?void 0:"utf8")},g=E=>((E=i(E,!0)).buffer||(E=new Uint8Array(E)),E),d=(E,k,C)=>{y(),E=b.normalize(E),m.readFile(E,function(B,V){B?C(B):k(V.buffer)})},1{if(qe())throw process.exitCode=E,k;k instanceof Qe||j("exiting due to exception: "+k),process.exit(E)},t.inspect=function(){return"[Emscripten Module object]"};try{T=a(9925)}catch(E){throw console.error('The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?'),E}a.g.Worker=T.Worker}else(A||O)&&(O?N=self.location.href:typeof document<"u"&&document.currentScript&&(N=document.currentScript.src),u&&(N=u),N=N.indexOf("blob:")!==0?N.substr(0,N.replace(/[?#].*/,"").lastIndexOf("/")+1):"",x||(i=T=>{var E=new XMLHttpRequest;return E.open("GET",T,!1),E.send(null),E.responseText},O&&(g=T=>{var E=new XMLHttpRequest;return E.open("GET",T,!1),E.responseType="arraybuffer",E.send(null),new Uint8Array(E.response)}),d=(T,E,k)=>{var C=new XMLHttpRequest;C.open("GET",T,!0),C.responseType="arraybuffer",C.onload=()=>{C.status==200||C.status==0&&C.response?E(C.response):k()},C.onerror=k,C.send(null)}));x&&typeof performance>"u"&&(a.g.performance=a(6953).performance);var L=console.log.bind(console),$=console.warn.bind(console);x&&(y(),L=T=>m.writeSync(1,T+`
-`),$=T=>m.writeSync(2,T+`
-`));var G,D=t.print||L,j=t.printErr||$;Object.assign(t,w),w=null,t.thisProgram&&(v=t.thisProgram),t.quit&&(S=t.quit),t.wasmBinary&&(G=t.wasmBinary);var Z=t.noExitRuntime||!1;typeof WebAssembly!="object"&&fe("no native wasm support detected");var X,J,ee,ue,Se,ve,oe,ye,be=!1,ke=typeof TextDecoder<"u"?new TextDecoder("utf8"):void 0;function Fe(T,E,k){var C=(E>>>=0)+k;for(k=E;T[k]&&!(k>=C);)++k;if(16(B=(240&B)==224?(15&B)<<12|V<<6|K:(7&B)<<18|V<<12|K<<6|63&T[E++])?C+=String.fromCharCode(B):(B-=65536,C+=String.fromCharCode(55296|B>>10,56320|1023&B))}}else C+=String.fromCharCode(B)}return C}function xe(T,E){return(T>>>=0)?Fe(h(),T,E):""}function Ne(T,E,k,C){if(!(0>>=0;C=k+C-1;for(var V=0;V=K&&(K=65536+((1023&K)<<10)|1023&T.charCodeAt(++V)),127>=K){if(k>=C)break;E[k++>>>0]=K}else{if(2047>=K){if(k+1>=C)break;E[k++>>>0]=192|K>>6}else{if(65535>=K){if(k+2>=C)break;E[k++>>>0]=224|K>>12}else{if(k+3>=C)break;E[k++>>>0]=240|K>>18,E[k++>>>0]=128|K>>12&63}E[k++>>>0]=128|K>>6&63}E[k++>>>0]=128|63&K}}return E[k>>>0]=0,k-B}function Ce(T){for(var E=0,k=0;k=C?E++:2047>=C?E+=2:55296<=C&&57343>=C?(E+=4,++k):E+=3}return E}function Oe(T){ee=T,t.HEAP8=ue=new Int8Array(T),t.HEAP16=new Int16Array(T),t.HEAP32=ve=new Int32Array(T),t.HEAPU8=Se=new Uint8Array(T),t.HEAPU16=new Uint16Array(T),t.HEAPU32=oe=new Uint32Array(T),t.HEAPF32=new Float32Array(T),t.HEAPF64=ye=new Float64Array(T)}I&&(ee=t.buffer);var Ae=t.INITIAL_MEMORY||16777216;if(I)X=t.wasmMemory,ee=t.buffer;else if(t.wasmMemory)X=t.wasmMemory;else if(!((X=new WebAssembly.Memory({initial:Ae/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw j("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),x&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");X&&(ee=X.buffer),Ae=ee.byteLength,Oe(ee);var Be,Ge=[],Ve=[],Xe=[],Ze=[];function qe(){return Z||!1}function Ue(){var T=t.preRun.shift();Ge.unshift(T)}var Pe,je=0,Ye=null;function fe(T){throw I?postMessage({cmd:"onAbort",arg:T}):t.onAbort&&t.onAbort(T),j(T="Aborted("+T+")"),be=!0,T=new WebAssembly.RuntimeError(T+". Build with -sASSERTIONS for more info."),r(T),T}function pt(){return Pe.startsWith("data:application/octet-stream;base64,")}function lt(){var T=Pe;try{if(T==Pe&&G)return new Uint8Array(G);if(g)return g(T);throw"both async and sync fetching of the wasm failed"}catch(E){fe(E)}}Pe="ort-wasm-threaded.wasm",pt()||(Pe=R(Pe));var Pt={};function Qe(T){this.name="ExitStatus",this.message="Program terminated with exit("+T+")",this.status=T}function ct(T){(T=re.Vb[T])||fe(),re.mc(T)}function dt(T){var E=re.Cc();if(!E)return 6;re.ac.push(E),re.Vb[T.Ub]=E,E.Ub=T.Ub;var k={cmd:"run",start_routine:T.Ic,arg:T.zc,pthread_ptr:T.Ub};return E.$b=()=>{k.time=performance.now(),E.postMessage(k,T.Nc)},E.loaded&&(E.$b(),delete E.$b),0}function Re(T){if(I)return Q(1,1,T);qe()||(re.oc(),t.onExit&&t.onExit(T),be=!0),S(T,new Qe(T))}function it(T,E){if(!E&&I)throw kt(T),"unwind";qe()||I||(Wt(),rt(Xe),qt(0),$t[1].length&&Ft(1,10),$t[2].length&&Ft(2,10),re.oc()),Re(T)}var re={Yb:[],ac:[],qc:[],Vb:{},fc:function(){I&&re.Ec()},Pc:function(){},Ec:function(){re.receiveObjectTransfer=re.Gc,re.threadInitTLS=re.pc,re.setExitStatus=re.nc,Z=!1},nc:function(){},oc:function(){for(var T of Object.values(re.Vb))re.mc(T);for(T of re.Yb)T.terminate();re.Yb=[]},mc:function(T){var E=T.Ub;delete re.Vb[E],re.Yb.push(T),re.ac.splice(re.ac.indexOf(T),1),T.Ub=0,Rt(E)},Gc:function(){},pc:function(){re.qc.forEach(T=>T())},Fc:function(T,E){T.onmessage=k=>{var C=(k=k.data).cmd;if(T.Ub&&(re.Bc=T.Ub),k.targetThread&&k.targetThread!=Dt()){var B=re.Vb[k.Qc];B?B.postMessage(k,k.transferList):j('Internal error! Worker sent a message "'+C+'" to target pthread '+k.targetThread+", but that thread no longer exists!")}else C==="processProxyingQueue"?F(k.queue):C==="spawnThread"?dt(k):C==="cleanupThread"?ct(k.thread):C==="killThread"?(k=k.thread,C=re.Vb[k],delete re.Vb[k],C.terminate(),Rt(k),re.ac.splice(re.ac.indexOf(C),1),C.Ub=0):C==="cancelThread"?re.Vb[k.thread].postMessage({cmd:"cancel"}):C==="loaded"?(T.loaded=!0,E&&E(T),T.$b&&(T.$b(),delete T.$b)):C==="print"?D("Thread "+k.threadId+": "+k.text):C==="printErr"?j("Thread "+k.threadId+": "+k.text):C==="alert"?alert("Thread "+k.threadId+": "+k.text):k.target==="setimmediate"?T.postMessage(k):C==="onAbort"?t.onAbort&&t.onAbort(k.arg):C&&j("worker sent an unknown command "+C);re.Bc=void 0},T.onerror=k=>{throw j("worker sent an error! "+k.filename+":"+k.lineno+": "+k.message),k},x&&(T.on("message",function(k){T.onmessage({data:k})}),T.on("error",function(k){T.onerror(k)}),T.on("detachedExit",function(){})),T.postMessage({cmd:"load",urlOrBlob:t.mainScriptUrlOrBlob||u,wasmMemory:X,wasmModule:J})},yc:function(){var T=R("ort-wasm-threaded.worker.js");re.Yb.push(new Worker(T))},Cc:function(){return re.Yb.length==0&&(re.yc(),re.Fc(re.Yb[0])),re.Yb.pop()}};function rt(T){for(;0>2>>>0];T=f()[T+48>>2>>>0],Zt(E,E-T),ce(E)};var Je=[];function we(T){var E=Je[T];return E||(T>=Je.length&&(Je.length=T+1),Je[T]=E=Be.get(T)),E}t.invokeEntryPoint=function(T,E){T=we(T)(E),qe()?re.nc(T):Kt(T)};var ot,ft,st=[],ae=0,ie=0;function se(T){this.Zb=T,this.Sb=T-24,this.xc=function(E){c()[this.Sb+4>>2>>>0]=E},this.bc=function(){return c()[this.Sb+4>>2>>>0]},this.wc=function(E){c()[this.Sb+8>>2>>>0]=E},this.Dc=function(){return c()[this.Sb+8>>2>>>0]},this.rc=function(){f()[this.Sb>>2>>>0]=0},this.hc=function(E){E=E?1:0,s()[this.Sb+12>>0>>>0]=E},this.uc=function(){return s()[this.Sb+12>>0>>>0]!=0},this.ic=function(E){E=E?1:0,s()[this.Sb+13>>0>>>0]=E},this.kc=function(){return s()[this.Sb+13>>0>>>0]!=0},this.fc=function(E,k){this.cc(0),this.xc(E),this.wc(k),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(f(),this.Sb>>2,1)},this.Hc=function(){return Atomics.sub(f(),this.Sb>>2,1)===1},this.cc=function(E){c()[this.Sb+16>>2>>>0]=E},this.tc=function(){return c()[this.Sb+16>>2>>>0]},this.vc=function(){if(Qt(this.bc()))return c()[this.Zb>>2>>>0];var E=this.tc();return E!==0?E:this.Zb}}function gt(T){return Vt(new se(T).Sb)}function at(T,E,k,C){return I?Q(3,1,T,E,k,C):mt(T,E,k,C)}function mt(T,E,k,C){if(typeof SharedArrayBuffer>"u")return j("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var B=[];return I&&B.length===0?at(T,E,k,C):(T={Ic:k,Ub:T,zc:C,Nc:B},I?(T.Oc="spawnThread",postMessage(T,B),0):dt(T))}function bt(T,E,k){return I?Q(4,1,T,E,k):0}function _t(T,E){if(I)return Q(5,1,T,E)}function yt(T,E){if(I)return Q(6,1,T,E)}function wt(T,E,k){if(I)return Q(7,1,T,E,k)}function vt(T,E,k){return I?Q(8,1,T,E,k):0}function xt(T,E){if(I)return Q(9,1,T,E)}function Tt(T,E,k){if(I)return Q(10,1,T,E,k)}function St(T,E,k,C){if(I)return Q(11,1,T,E,k,C)}function At(T,E,k,C){if(I)return Q(12,1,T,E,k,C)}function Ot(T,E,k,C){if(I)return Q(13,1,T,E,k,C)}function Et(T){if(I)return Q(14,1,T)}function P(T,E){if(I)return Q(15,1,T,E)}function M(T,E,k){if(I)return Q(16,1,T,E,k)}function F(T){Atomics.store(f(),T>>2,1),Dt()&&Yt(T),Atomics.compareExchange(f(),T>>2,1,0)}function z(T){return c()[T>>>2]+4294967296*f()[T+4>>>2]}function U(T,E,k,C,B,V){return I?Q(17,1,T,E,k,C,B,V):-52}function H(T,E,k,C,B,V){if(I)return Q(18,1,T,E,k,C,B,V)}function Y(T){var E=Ce(T)+1,k=Lt(E);return k&&Ne(T,s(),k,E),k}function te(T,E,k){function C(ge){return(ge=ge.toTimeString().match(/\(([A-Za-z ]+)\)$/))?ge[1]:"GMT"}if(I)return Q(19,1,T,E,k);var B=new Date().getFullYear(),V=new Date(B,0,1),K=new Date(B,6,1);B=V.getTimezoneOffset();var ne=K.getTimezoneOffset(),pe=Math.max(B,ne);f()[T>>2>>>0]=60*pe,f()[E>>2>>>0]=+(B!=ne),T=C(V),E=C(K),T=Y(T),E=Y(E),ne>2>>>0]=T,c()[k+4>>2>>>0]=E):(c()[k>>2>>>0]=E,c()[k+4>>2>>>0]=T)}function Q(T,E){var k=arguments.length-2,C=arguments;return It(()=>{for(var B=jt(8*k),V=B>>3,K=0;K>>0]=ne}return Xt(T,k,B,E)})}t.executeNotifiedProxyingQueue=F,ft=x?()=>{var T=process.hrtime();return 1e3*T[0]+T[1]/1e6}:I?()=>performance.now()-t.__performance_now_clock_drift:()=>performance.now();var le,Te=[],Le={};function $e(){if(!le){var T,E={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:(typeof navigator=="object"&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:v||"./this.program"};for(T in Le)Le[T]===void 0?delete E[T]:E[T]=Le[T];var k=[];for(T in E)k.push(T+"="+E[T]);le=k}return le}function q(T,E){if(I)return Q(20,1,T,E);var k=0;return $e().forEach(function(C,B){var V=E+k;for(B=c()[T+4*B>>2>>>0]=V,V=0;V>0>>>0]=C.charCodeAt(V);s()[B>>0>>>0]=0,k+=C.length+1}),0}function me(T,E){if(I)return Q(21,1,T,E);var k=$e();c()[T>>2>>>0]=k.length;var C=0;return k.forEach(function(B){C+=B.length+1}),c()[E>>2>>>0]=C,0}function Ee(T){return I?Q(22,1,T):52}function We(T,E,k,C){return I?Q(23,1,T,E,k,C):52}function et(T,E,k,C,B){return I?Q(24,1,T,E,k,C,B):70}var $t=[null,[],[]];function Ft(T,E){var k=$t[T];E===0||E===10?((T===1?D:j)(Fe(k,0)),k.length=0):k.push(E)}function zt(T,E,k,C){if(I)return Q(25,1,T,E,k,C);for(var B=0,V=0;V>2>>>0],ne=c()[E+4>>2>>>0];E+=8;for(var pe=0;pe>>0]);B+=ne}return c()[C>>2>>>0]=B,0}var ze=0;function Mt(T){return T%4==0&&(T%100!=0||T%400==0)}var Bt=[31,29,31,30,31,30,31,31,30,31,30,31],Ut=[31,28,31,30,31,30,31,31,30,31,30,31];function Gt(T,E,k,C){function B(W,_e,Me){for(W=typeof W=="number"?W.toString():W||"";W.length<_e;)W=Me[0]+W;return W}function V(W,_e){return B(W,_e,"0")}function K(W,_e){function Me(ht){return 0>ht?-1:0tt-W.getDate())){W.setDate(W.getDate()+_e);break}_e-=tt-W.getDate()+1,W.setDate(1),11>Me?W.setMonth(Me+1):(W.setMonth(0),W.setFullYear(W.getFullYear()+1))}return Me=new Date(W.getFullYear()+1,0,4),_e=ne(new Date(W.getFullYear(),0,4)),Me=ne(Me),0>=K(_e,W)?0>=K(Me,W)?W.getFullYear()+1:W.getFullYear():W.getFullYear()-1}var ge=f()[C+40>>2>>>0];for(var De in C={Lc:f()[C>>2>>>0],Kc:f()[C+4>>2>>>0],dc:f()[C+8>>2>>>0],jc:f()[C+12>>2>>>0],ec:f()[C+16>>2>>>0],Xb:f()[C+20>>2>>>0],Tb:f()[C+24>>2>>>0],Wb:f()[C+28>>2>>>0],Rc:f()[C+32>>2>>>0],Jc:f()[C+36>>2>>>0],Mc:ge?xe(ge):""},k=xe(k),ge={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})k=k.replace(new RegExp(De,"g"),ge[De]);var Ke="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),He="January February March April May June July August September October November December".split(" ");for(De in ge={"%a":function(W){return Ke[W.Tb].substring(0,3)},"%A":function(W){return Ke[W.Tb]},"%b":function(W){return He[W.ec].substring(0,3)},"%B":function(W){return He[W.ec]},"%C":function(W){return V((W.Xb+1900)/100|0,2)},"%d":function(W){return V(W.jc,2)},"%e":function(W){return B(W.jc,2," ")},"%g":function(W){return pe(W).toString().substring(2)},"%G":function(W){return pe(W)},"%H":function(W){return V(W.dc,2)},"%I":function(W){return(W=W.dc)==0?W=12:12W.dc?"AM":"PM"},"%S":function(W){return V(W.Lc,2)},"%t":function(){return" "},"%u":function(W){return W.Tb||7},"%U":function(W){return V(Math.floor((W.Wb+7-W.Tb)/7),2)},"%V":function(W){var _e=Math.floor((W.Wb+7-(W.Tb+6)%7)/7);if(2>=(W.Tb+371-W.Wb-2)%7&&_e++,_e)_e==53&&((Me=(W.Tb+371-W.Wb)%7)==4||Me==3&&Mt(W.Xb)||(_e=1));else{_e=52;var Me=(W.Tb+7-W.Wb-1)%7;(Me==4||Me==5&&Mt(W.Xb%400-1))&&_e++}return V(_e,2)},"%w":function(W){return W.Tb},"%W":function(W){return V(Math.floor((W.Wb+7-(W.Tb+6)%7)/7),2)},"%y":function(W){return(W.Xb+1900).toString().substring(2)},"%Y":function(W){return W.Xb+1900},"%z":function(W){var _e=0<=(W=W.Jc);return W=Math.abs(W)/60,(_e?"+":"-")+("0000"+(W/60*100+W%60)).slice(-4)},"%Z":function(W){return W.Mc},"%%":function(){return"%"}},k=k.replace(/%%/g,"\0\0"),ge)k.includes(De)&&(k=k.replace(new RegExp(De,"g"),ge[De](C)));return De=function(W){var _e=Array(Ce(W)+1);return Ne(W,_e,0,_e.length),_e}(k=k.replace(/\0\0/g,"%")),De.length>E?0:(function(W,_e){s().set(W,_e>>>0)}(De,T),De.length-1)}re.fc();var hn=[null,Re,kt,at,bt,_t,yt,wt,vt,xt,Tt,St,At,Ot,Et,P,M,U,H,te,q,me,Ee,We,et,zt],pn={b:function(T){return Lt(T+24)+24},n:function(T){return(T=new se(T)).uc()||(T.hc(!0),ae--),T.ic(!1),st.push(T),T.sc(),T.vc()},ma:function(T){throw j("Unexpected exception thrown, this is not properly supported - aborting"),be=!0,T},x:function(){he(0);var T=st.pop();if(T.Hc()&&!T.kc()){var E=T.Dc();E&&we(E)(T.Zb),gt(T.Zb)}ie=0},e:function(){var T=ie;if(!T)return ze=0;var E=new se(T);E.cc(T);var k=E.bc();if(!k)return ze=0,T;for(var C=Array.prototype.slice.call(arguments),B=0;BF(C));else if(I)postMessage({targetThread:T,cmd:"processProxyingQueue",queue:C});else{if(!(T=re.Vb[T]))return;T.postMessage({cmd:"processProxyingQueue",queue:C})}return 1},Ea:function(){return-1},Pa:function(T,E){T=new Date(1e3*z(T)),f()[E>>2>>>0]=T.getUTCSeconds(),f()[E+4>>2>>>0]=T.getUTCMinutes(),f()[E+8>>2>>>0]=T.getUTCHours(),f()[E+12>>2>>>0]=T.getUTCDate(),f()[E+16>>2>>>0]=T.getUTCMonth(),f()[E+20>>2>>>0]=T.getUTCFullYear()-1900,f()[E+24>>2>>>0]=T.getUTCDay(),T=(T.getTime()-Date.UTC(T.getUTCFullYear(),0,1,0,0,0,0))/864e5|0,f()[E+28>>2>>>0]=T},Qa:function(T,E){T=new Date(1e3*z(T)),f()[E>>2>>>0]=T.getSeconds(),f()[E+4>>2>>>0]=T.getMinutes(),f()[E+8>>2>>>0]=T.getHours(),f()[E+12>>2>>>0]=T.getDate(),f()[E+16>>2>>>0]=T.getMonth(),f()[E+20>>2>>>0]=T.getFullYear()-1900,f()[E+24>>2>>>0]=T.getDay();var k=new Date(T.getFullYear(),0,1),C=(T.getTime()-k.getTime())/864e5|0;f()[E+28>>2>>>0]=C,f()[E+36>>2>>>0]=-60*T.getTimezoneOffset(),C=new Date(T.getFullYear(),6,1).getTimezoneOffset(),T=0|(C!=(k=k.getTimezoneOffset())&&T.getTimezoneOffset()==Math.min(k,C)),f()[E+32>>2>>>0]=T},Ra:function(T){var E=new Date(f()[T+20>>2>>>0]+1900,f()[T+16>>2>>>0],f()[T+12>>2>>>0],f()[T+8>>2>>>0],f()[T+4>>2>>>0],f()[T>>2>>>0],0),k=f()[T+32>>2>>>0],C=E.getTimezoneOffset(),B=new Date(E.getFullYear(),0,1),V=new Date(E.getFullYear(),6,1).getTimezoneOffset(),K=B.getTimezoneOffset(),ne=Math.min(K,V);return 0>k?f()[T+32>>2>>>0]=+(V!=K&&ne==C):0>2>>>0]=E.getDay(),k=(E.getTime()-B.getTime())/864e5|0,f()[T+28>>2>>>0]=k,f()[T>>2>>>0]=E.getSeconds(),f()[T+4>>2>>>0]=E.getMinutes(),f()[T+8>>2>>>0]=E.getHours(),f()[T+12>>2>>>0]=E.getDate(),f()[T+16>>2>>>0]=E.getMonth(),E.getTime()/1e3|0},Aa:U,Ba:H,Sa:function T(E,k,C){T.Ac||(T.Ac=!0,te(E,k,C))},y:function(){fe("")},U:function(){if(!x&&!O){var T="Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread";ot||(ot={}),ot[T]||(ot[T]=1,x&&(T="warning: "+T),j(T))}},ra:function(){return 4294901760},B:ft,Ia:function(T,E,k){h().copyWithin(T>>>0,E>>>0,E+k>>>0)},F:function(){return x?a(3993).cpus().length:navigator.hardwareConcurrency},Da:function(T,E,k){Te.length=E,k>>=3;for(var C=0;C>>0];return(0>T?Pt[-T-1]:hn[T]).apply(null,Te)},qa:function(T){var E=h().length;if((T>>>=0)<=E||4294901760=k;k*=2){var C=E*(1+.2/k);C=Math.min(C,T+100663296);var B=Math;C=Math.max(T,C),B=B.min.call(B,4294901760,C+(65536-C%65536)%65536);e:{try{X.grow(B-ee.byteLength+65535>>>16),Oe(X.buffer);var V=1;break e}catch{}V=void 0}if(V)return!0}return!1},Na:function(){throw"unwind"},Ga:q,Ha:me,J:it,I:Ee,S:We,ga:et,R:zt,d:function(){return ze},na:function T(E,k){T.lc||(T.lc=function(){if(typeof crypto=="object"&&typeof crypto.getRandomValues=="function"){var B=new Uint8Array(1);return()=>(crypto.getRandomValues(B),B[0])}if(x)try{var V=a(Object(function(){var K=new Error("Cannot find module 'crypto'");throw K.code="MODULE_NOT_FOUND",K}()));return()=>V.randomBytes(1)[0]}catch{}return()=>fe("randomDevice")}());for(var C=0;C>0>>>0]=T.lc();return 0},ia:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},ja:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},K:function(T){var E=de();try{return we(T)()}catch(k){if(ce(E),k!==k+0)throw k;he(1,0)}},f:function(T,E){var k=de();try{return we(T)(E)}catch(C){if(ce(k),C!==C+0)throw C;he(1,0)}},P:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},Q:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},k:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},p:function(T,E,k,C){var B=de();try{return we(T)(E,k,C)}catch(V){if(ce(B),V!==V+0)throw V;he(1,0)}},q:function(T,E,k,C,B){var V=de();try{return we(T)(E,k,C,B)}catch(K){if(ce(V),K!==K+0)throw K;he(1,0)}},N:function(T,E,k,C,B,V){var K=de();try{return we(T)(E,k,C,B,V)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},s:function(T,E,k,C,B,V){var K=de();try{return we(T)(E,k,C,B,V)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},w:function(T,E,k,C,B,V,K){var ne=de();try{return we(T)(E,k,C,B,V,K)}catch(pe){if(ce(ne),pe!==pe+0)throw pe;he(1,0)}},L:function(T,E,k,C,B,V,K,ne){var pe=de();try{return we(T)(E,k,C,B,V,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},E:function(T,E,k,C,B,V,K,ne,pe,ge,De,Ke){var He=de();try{return we(T)(E,k,C,B,V,K,ne,pe,ge,De,Ke)}catch(W){if(ce(He),W!==W+0)throw W;he(1,0)}},aa:function(T,E,k,C,B,V,K,ne){var pe=de();try{return un(T,E,k,C,B,V,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},_:function(T,E,k,C,B,V,K){var ne=de();try{return en(T,E,k,C,B,V,K)}catch(pe){if(ce(ne),pe!==pe+0)throw pe;he(1,0)}},Z:function(T,E,k,C,B){var V=de();try{return ln(T,E,k,C,B)}catch(K){if(ce(V),K!==K+0)throw K;he(1,0)}},ca:function(T,E,k,C){var B=de();try{return sn(T,E,k,C)}catch(V){if(ce(B),V!==V+0)throw V;he(1,0)}},$:function(T){var E=de();try{return Jt(T)}catch(k){if(ce(E),k!==k+0)throw k;he(1,0)}},ba:function(T,E){var k=de();try{return an(T,E)}catch(C){if(ce(k),C!==C+0)throw C;he(1,0)}},Y:function(T,E,k){var C=de();try{return tn(T,E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},g:function(T){var E=de();try{we(T)()}catch(k){if(ce(E),k!==k+0)throw k;he(1,0)}},r:function(T,E){var k=de();try{we(T)(E)}catch(C){if(ce(k),C!==C+0)throw C;he(1,0)}},i:function(T,E,k){var C=de();try{we(T)(E,k)}catch(B){if(ce(C),B!==B+0)throw B;he(1,0)}},ha:function(T,E,k,C){var B=de();try{we(T)(E,k,C)}catch(V){if(ce(B),V!==V+0)throw V;he(1,0)}},m:function(T,E,k,C){var B=de();try{we(T)(E,k,C)}catch(V){if(ce(B),V!==V+0)throw V;he(1,0)}},v:function(T,E,k,C,B){var V=de();try{we(T)(E,k,C,B)}catch(K){if(ce(V),K!==K+0)throw K;he(1,0)}},u:function(T,E,k,C,B,V){var K=de();try{we(T)(E,k,C,B,V)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},O:function(T,E,k,C,B,V,K){var ne=de();try{we(T)(E,k,C,B,V,K)}catch(pe){if(ce(ne),pe!==pe+0)throw pe;he(1,0)}},A:function(T,E,k,C,B,V,K,ne){var pe=de();try{we(T)(E,k,C,B,V,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},ka:function(T,E,k,C,B,V,K,ne,pe){var ge=de();try{we(T)(E,k,C,B,V,K,ne,pe)}catch(De){if(ce(ge),De!==De+0)throw De;he(1,0)}},C:function(T,E,k,C,B,V,K,ne,pe,ge,De){var Ke=de();try{we(T)(E,k,C,B,V,K,ne,pe,ge,De)}catch(He){if(ce(Ke),He!==He+0)throw He;he(1,0)}},D:function(T,E,k,C,B,V,K,ne,pe,ge,De,Ke,He,W,_e,Me){var tt=de();try{we(T)(E,k,C,B,V,K,ne,pe,ge,De,Ke,He,W,_e,Me)}catch(ht){if(ce(tt),ht!==ht+0)throw ht;he(1,0)}},fa:function(T,E,k,C,B,V,K,ne){var pe=de();try{nn(T,E,k,C,B,V,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},da:function(T,E,k,C,B,V,K,ne,pe,ge,De,Ke){var He=de();try{on(T,E,k,C,B,V,K,ne,pe,ge,De,Ke)}catch(W){if(ce(He),W!==W+0)throw W;he(1,0)}},ea:function(T,E,k,C,B,V){var K=de();try{rn(T,E,k,C,B,V)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},o:function(T){return T},a:X||t.wasmMemory,G:function(T){ze=T},la:Gt,z:function(T,E,k,C){return Gt(T,E,k,C)}};(function(){function T(B,V){t.asm=B.exports,re.qc.push(t.asm.sb),Be=t.asm.ub,Ve.unshift(t.asm.Va),J=V,I||(je--,t.monitorRunDependencies&&t.monitorRunDependencies(je),je==0&&Ye&&(B=Ye,Ye=null,B()))}function E(B){T(B.instance,B.module)}function k(B){return function(){if(!G&&(A||O)){if(typeof fetch=="function"&&!Pe.startsWith("file://"))return fetch(Pe,{credentials:"same-origin"}).then(function(V){if(!V.ok)throw"failed to load wasm binary file at '"+Pe+"'";return V.arrayBuffer()}).catch(function(){return lt()});if(d)return new Promise(function(V,K){d(Pe,function(ne){V(new Uint8Array(ne))},K)})}return Promise.resolve().then(function(){return lt()})}().then(function(V){return WebAssembly.instantiate(V,C)}).then(function(V){return V}).then(B,function(V){j("failed to asynchronously prepare wasm: "+V),fe(V)})}var C={a:pn};if(I||(je++,t.monitorRunDependencies&&t.monitorRunDependencies(je)),t.instantiateWasm)try{return t.instantiateWasm(C,T)}catch(B){return j("Module.instantiateWasm callback failed with error: "+B),!1}(G||typeof WebAssembly.instantiateStreaming!="function"||pt()||Pe.startsWith("file://")||x||typeof fetch!="function"?k(E):fetch(Pe,{credentials:"same-origin"}).then(function(B){return WebAssembly.instantiateStreaming(B,C).then(E,function(V){return j("wasm streaming compile failed: "+V),j("falling back to ArrayBuffer instantiation"),k(E)})})).catch(r)})(),t.___wasm_call_ctors=function(){return(t.___wasm_call_ctors=t.asm.Va).apply(null,arguments)},t._OrtInit=function(){return(t._OrtInit=t.asm.Wa).apply(null,arguments)},t._OrtCreateSessionOptions=function(){return(t._OrtCreateSessionOptions=t.asm.Xa).apply(null,arguments)},t._OrtAppendExecutionProvider=function(){return(t._OrtAppendExecutionProvider=t.asm.Ya).apply(null,arguments)},t._OrtAddSessionConfigEntry=function(){return(t._OrtAddSessionConfigEntry=t.asm.Za).apply(null,arguments)},t._OrtReleaseSessionOptions=function(){return(t._OrtReleaseSessionOptions=t.asm._a).apply(null,arguments)},t._OrtCreateSession=function(){return(t._OrtCreateSession=t.asm.$a).apply(null,arguments)},t._OrtReleaseSession=function(){return(t._OrtReleaseSession=t.asm.ab).apply(null,arguments)},t._OrtGetInputCount=function(){return(t._OrtGetInputCount=t.asm.bb).apply(null,arguments)},t._OrtGetOutputCount=function(){return(t._OrtGetOutputCount=t.asm.cb).apply(null,arguments)},t._OrtGetInputName=function(){return(t._OrtGetInputName=t.asm.db).apply(null,arguments)},t._OrtGetOutputName=function(){return(t._OrtGetOutputName=t.asm.eb).apply(null,arguments)},t._OrtFree=function(){return(t._OrtFree=t.asm.fb).apply(null,arguments)},t._OrtCreateTensor=function(){return(t._OrtCreateTensor=t.asm.gb).apply(null,arguments)},t._OrtGetTensorData=function(){return(t._OrtGetTensorData=t.asm.hb).apply(null,arguments)},t._OrtReleaseTensor=function(){return(t._OrtReleaseTensor=t.asm.ib).apply(null,arguments)},t._OrtCreateRunOptions=function(){return(t._OrtCreateRunOptions=t.asm.jb).apply(null,arguments)},t._OrtAddRunConfigEntry=function(){return(t._OrtAddRunConfigEntry=t.asm.kb).apply(null,arguments)},t._OrtReleaseRunOptions=function(){return(t._OrtReleaseRunOptions=t.asm.lb).apply(null,arguments)},t._OrtRun=function(){return(t._OrtRun=t.asm.mb).apply(null,arguments)},t._OrtEndProfiling=function(){return(t._OrtEndProfiling=t.asm.nb).apply(null,arguments)};var Dt=t._pthread_self=function(){return(Dt=t._pthread_self=t.asm.ob).apply(null,arguments)},Lt=t._malloc=function(){return(Lt=t._malloc=t.asm.pb).apply(null,arguments)},Vt=t._free=function(){return(Vt=t._free=t.asm.qb).apply(null,arguments)},qt=t._fflush=function(){return(qt=t._fflush=t.asm.rb).apply(null,arguments)};t.__emscripten_tls_init=function(){return(t.__emscripten_tls_init=t.asm.sb).apply(null,arguments)};var Wt=t.___funcs_on_exit=function(){return(Wt=t.___funcs_on_exit=t.asm.tb).apply(null,arguments)},Ht=t.__emscripten_thread_init=function(){return(Ht=t.__emscripten_thread_init=t.asm.vb).apply(null,arguments)};t.__emscripten_thread_crashed=function(){return(t.__emscripten_thread_crashed=t.asm.wb).apply(null,arguments)};var Ct,Xt=t._emscripten_run_in_main_runtime_thread_js=function(){return(Xt=t._emscripten_run_in_main_runtime_thread_js=t.asm.xb).apply(null,arguments)},Yt=t.__emscripten_proxy_execute_task_queue=function(){return(Yt=t.__emscripten_proxy_execute_task_queue=t.asm.yb).apply(null,arguments)},Rt=t.__emscripten_thread_free_data=function(){return(Rt=t.__emscripten_thread_free_data=t.asm.zb).apply(null,arguments)},Kt=t.__emscripten_thread_exit=function(){return(Kt=t.__emscripten_thread_exit=t.asm.Ab).apply(null,arguments)},he=t._setThrew=function(){return(he=t._setThrew=t.asm.Bb).apply(null,arguments)},Zt=t._emscripten_stack_set_limits=function(){return(Zt=t._emscripten_stack_set_limits=t.asm.Cb).apply(null,arguments)},de=t.stackSave=function(){return(de=t.stackSave=t.asm.Db).apply(null,arguments)},ce=t.stackRestore=function(){return(ce=t.stackRestore=t.asm.Eb).apply(null,arguments)},jt=t.stackAlloc=function(){return(jt=t.stackAlloc=t.asm.Fb).apply(null,arguments)},Nt=t.___cxa_can_catch=function(){return(Nt=t.___cxa_can_catch=t.asm.Gb).apply(null,arguments)},Qt=t.___cxa_is_pointer_type=function(){return(Qt=t.___cxa_is_pointer_type=t.asm.Hb).apply(null,arguments)},Jt=t.dynCall_j=function(){return(Jt=t.dynCall_j=t.asm.Ib).apply(null,arguments)},en=t.dynCall_iiiiij=function(){return(en=t.dynCall_iiiiij=t.asm.Jb).apply(null,arguments)},tn=t.dynCall_jii=function(){return(tn=t.dynCall_jii=t.asm.Kb).apply(null,arguments)},nn=t.dynCall_viiiiij=function(){return(nn=t.dynCall_viiiiij=t.asm.Lb).apply(null,arguments)},rn=t.dynCall_vjji=function(){return(rn=t.dynCall_vjji=t.asm.Mb).apply(null,arguments)},on=t.dynCall_viiijjjii=function(){return(on=t.dynCall_viiijjjii=t.asm.Nb).apply(null,arguments)},sn=t.dynCall_iij=function(){return(sn=t.dynCall_iij=t.asm.Ob).apply(null,arguments)},an=t.dynCall_ji=function(){return(an=t.dynCall_ji=t.asm.Pb).apply(null,arguments)},un=t.dynCall_iiiiiij=function(){return(un=t.dynCall_iiiiiij=t.asm.Qb).apply(null,arguments)},ln=t.dynCall_iiij=function(){return(ln=t.dynCall_iiij=t.asm.Rb).apply(null,arguments)};function cn(){function T(){if(!Ct&&(Ct=!0,t.calledRun=!0,!be)&&(I||rt(Ve),e(t),t.onRuntimeInitialized&&t.onRuntimeInitialized(),!I)){if(t.postRun)for(typeof t.postRun=="function"&&(t.postRun=[t.postRun]);t.postRun.length;){var E=t.postRun.shift();Ze.unshift(E)}rt(Ze)}}if(!(0{var u,l=(u=(u=typeof document<"u"&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(p){var s,h,f;p=p||{},s||(s=p!==void 0?p:{}),s.ready=new Promise(function(P,M){h=P,f=M});var c,o,t,e,r,i,d=Object.assign({},s),g="./this.program",m=(P,M)=>{throw M},b=typeof window=="object",y=typeof importScripts=="function",w=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string",v="";w?(v=y?a(908).dirname(v)+"/":"//",i=()=>{r||(e=a(1384),r=a(908))},c=function(P,M){return i(),P=r.normalize(P),e.readFileSync(P,M?void 0:"utf8")},t=P=>((P=c(P,!0)).buffer||(P=new Uint8Array(P)),P),o=(P,M,F)=>{i(),P=r.normalize(P),e.readFile(P,function(z,U){z?F(z):M(U.buffer)})},1{if(x||0{var M=new XMLHttpRequest;return M.open("GET",P,!1),M.send(null),M.responseText},y&&(t=P=>{var M=new XMLHttpRequest;return M.open("GET",P,!1),M.responseType="arraybuffer",M.send(null),new Uint8Array(M.response)}),o=(P,M,F)=>{var z=new XMLHttpRequest;z.open("GET",P,!0),z.responseType="arraybuffer",z.onload=()=>{z.status==200||z.status==0&&z.response?M(z.response):F()},z.onerror=F,z.send(null)});var S,A=s.print||console.log.bind(console),O=s.printErr||console.warn.bind(console);Object.assign(s,d),d=null,s.thisProgram&&(g=s.thisProgram),s.quit&&(m=s.quit),s.wasmBinary&&(S=s.wasmBinary);var x=s.noExitRuntime||!1;typeof WebAssembly!="object"&&Oe("no native wasm support detected");var I,N,R,L,$,G,D=!1,j=typeof TextDecoder<"u"?new TextDecoder("utf8"):void 0;function Z(P,M,F){var z=(M>>>=0)+F;for(F=M;P[F]&&!(F>=z);)++F;if(16(U=(240&U)==224?(15&U)<<12|H<<6|Y:(7&U)<<18|H<<12|Y<<6|63&P[M++])?z+=String.fromCharCode(U):(U-=65536,z+=String.fromCharCode(55296|U>>10,56320|1023&U))}}else z+=String.fromCharCode(U)}return z}function X(P,M){return(P>>>=0)?Z(L,P,M):""}function J(P,M,F,z){if(!(0>>=0;z=F+z-1;for(var H=0;H=Y&&(Y=65536+((1023&Y)<<10)|1023&P.charCodeAt(++H)),127>=Y){if(F>=z)break;M[F++>>>0]=Y}else{if(2047>=Y){if(F+1>=z)break;M[F++>>>0]=192|Y>>6}else{if(65535>=Y){if(F+2>=z)break;M[F++>>>0]=224|Y>>12}else{if(F+3>=z)break;M[F++>>>0]=240|Y>>18,M[F++>>>0]=128|Y>>12&63}M[F++>>>0]=128|Y>>6&63}M[F++>>>0]=128|63&Y}}return M[F>>>0]=0,F-U}function ee(P){for(var M=0,F=0;F=z?M++:2047>=z?M+=2:55296<=z&&57343>=z?(M+=4,++F):M+=3}return M}function ue(){var P=I.buffer;N=P,s.HEAP8=R=new Int8Array(P),s.HEAP16=new Int16Array(P),s.HEAP32=$=new Int32Array(P),s.HEAPU8=L=new Uint8Array(P),s.HEAPU16=new Uint16Array(P),s.HEAPU32=G=new Uint32Array(P),s.HEAPF32=new Float32Array(P),s.HEAPF64=new Float64Array(P)}var Se,ve=[],oe=[],ye=[],be=[],ke=0;function Fe(){var P=s.preRun.shift();ve.unshift(P)}var xe,Ne=0,Ce=null;function Oe(P){throw s.onAbort&&s.onAbort(P),O(P="Aborted("+P+")"),D=!0,P=new WebAssembly.RuntimeError(P+". Build with -sASSERTIONS for more info."),f(P),P}function Ae(){return xe.startsWith("data:application/octet-stream;base64,")}if(xe="ort-wasm.wasm",!Ae()){var Be=xe;xe=s.locateFile?s.locateFile(Be,v):v+Be}function Ge(){var P=xe;try{if(P==xe&&S)return new Uint8Array(S);if(t)return t(P);throw"both async and sync fetching of the wasm failed"}catch(M){Oe(M)}}function Ve(P){this.name="ExitStatus",this.message="Program terminated with exit("+P+")",this.status=P}function Xe(P){for(;0>2>>>0]=M},this.Eb=function(){return G[this.zb+4>>2>>>0]},this.Sb=function(M){G[this.zb+8>>2>>>0]=M},this.Wb=function(){return G[this.zb+8>>2>>>0]},this.Tb=function(){$[this.zb>>2>>>0]=0},this.Ib=function(M){R[this.zb+12>>0>>>0]=M?1:0},this.Pb=function(){return R[this.zb+12>>0>>>0]!=0},this.Jb=function(M){R[this.zb+13>>0>>>0]=M?1:0},this.Lb=function(){return R[this.zb+13>>0>>>0]!=0},this.Rb=function(M,F){this.Fb(0),this.Ub(M),this.Sb(F),this.Tb(),this.Ib(!1),this.Jb(!1)},this.Nb=function(){$[this.zb>>2>>>0]+=1},this.Xb=function(){var M=$[this.zb>>2>>>0];return $[this.zb>>2>>>0]=M-1,M===1},this.Fb=function(M){G[this.zb+16>>2>>>0]=M},this.Ob=function(){return G[this.zb+16>>2>>>0]},this.Qb=function(){if(mt(this.Eb()))return G[this.Db>>2>>>0];var M=this.Ob();return M!==0?M:this.Db}}function je(P){return ot(new Pe(P).zb)}var Ye=[];function fe(P){var M=Ye[P];return M||(P>=Ye.length&&(Ye.length=P+1),Ye[P]=M=Se.get(P)),M}function pt(P){var M=ee(P)+1,F=we(M);return F&&J(P,R,F,M),F}var lt={};function Pt(){if(!Qe){var P,M={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:(typeof navigator=="object"&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:g||"./this.program"};for(P in lt)lt[P]===void 0?delete M[P]:M[P]=lt[P];var F=[];for(P in M)F.push(P+"="+M[P]);Qe=F}return Qe}var Qe,ct=[null,[],[]];function dt(P,M){var F=ct[P];M===0||M===10?((P===1?A:O)(Z(F,0)),F.length=0):F.push(M)}var Re=0;function it(P){return P%4==0&&(P%100!=0||P%400==0)}var re=[31,29,31,30,31,30,31,31,30,31,30,31],rt=[31,28,31,30,31,30,31,31,30,31,30,31];function It(P,M,F,z){function U(q,me,Ee){for(q=typeof q=="number"?q.toString():q||"";q.lengthet?-1:0We-q.getDate())){q.setDate(q.getDate()+me);break}me-=We-q.getDate()+1,q.setDate(1),11>Ee?q.setMonth(Ee+1):(q.setMonth(0),q.setFullYear(q.getFullYear()+1))}return Ee=new Date(q.getFullYear()+1,0,4),me=te(new Date(q.getFullYear(),0,4)),Ee=te(Ee),0>=Y(me,q)?0>=Y(Ee,q)?q.getFullYear()+1:q.getFullYear():q.getFullYear()-1}var le=$[z+40>>2>>>0];for(var Te in z={$b:$[z>>2>>>0],Zb:$[z+4>>2>>>0],Gb:$[z+8>>2>>>0],Kb:$[z+12>>2>>>0],Hb:$[z+16>>2>>>0],Cb:$[z+20>>2>>>0],Ab:$[z+24>>2>>>0],Bb:$[z+28>>2>>>0],bc:$[z+32>>2>>>0],Yb:$[z+36>>2>>>0],ac:le?X(le):""},F=X(F),le={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})F=F.replace(new RegExp(Te,"g"),le[Te]);var Le="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),$e="January February March April May June July August September October November December".split(" ");for(Te in le={"%a":function(q){return Le[q.Ab].substring(0,3)},"%A":function(q){return Le[q.Ab]},"%b":function(q){return $e[q.Hb].substring(0,3)},"%B":function(q){return $e[q.Hb]},"%C":function(q){return H((q.Cb+1900)/100|0,2)},"%d":function(q){return H(q.Kb,2)},"%e":function(q){return U(q.Kb,2," ")},"%g":function(q){return Q(q).toString().substring(2)},"%G":function(q){return Q(q)},"%H":function(q){return H(q.Gb,2)},"%I":function(q){return(q=q.Gb)==0?q=12:12q.Gb?"AM":"PM"},"%S":function(q){return H(q.$b,2)},"%t":function(){return" "},"%u":function(q){return q.Ab||7},"%U":function(q){return H(Math.floor((q.Bb+7-q.Ab)/7),2)},"%V":function(q){var me=Math.floor((q.Bb+7-(q.Ab+6)%7)/7);if(2>=(q.Ab+371-q.Bb-2)%7&&me++,me)me==53&&((Ee=(q.Ab+371-q.Bb)%7)==4||Ee==3&&it(q.Cb)||(me=1));else{me=52;var Ee=(q.Ab+7-q.Bb-1)%7;(Ee==4||Ee==5&&it(q.Cb%400-1))&&me++}return H(me,2)},"%w":function(q){return q.Ab},"%W":function(q){return H(Math.floor((q.Bb+7-(q.Ab+6)%7)/7),2)},"%y":function(q){return(q.Cb+1900).toString().substring(2)},"%Y":function(q){return q.Cb+1900},"%z":function(q){var me=0<=(q=q.Yb);return q=Math.abs(q)/60,(me?"+":"-")+("0000"+(q/60*100+q%60)).slice(-4)},"%Z":function(q){return q.ac},"%%":function(){return"%"}},F=F.replace(/%%/g,"\0\0"),le)F.includes(Te)&&(F=F.replace(new RegExp(Te,"g"),le[Te](z)));return Te=function(q){var me=Array(ee(q)+1);return J(q,me,0,me.length),me}(F=F.replace(/\0\0/g,"%")),Te.length>M?0:(R.set(Te,P>>>0),Te.length-1)}var kt={a:function(P){return we(P+24)+24},m:function(P){return(P=new Pe(P)).Pb()||(P.Ib(!0),qe--),P.Jb(!1),Ze.push(P),P.Nb(),P.Qb()},ia:function(P){throw O("Unexpected exception thrown, this is not properly supported - aborting"),D=!0,P},w:function(){ae(0);var P=Ze.pop();if(P.Xb()&&!P.Lb()){var M=P.Wb();M&&fe(M)(P.Db),je(P.Db)}Ue=0},d:function(){var P=Ue;if(!P)return Re=0;var M=new Pe(P);M.Fb(P);var F=M.Eb();if(!F)return Re=0,P;for(var z=Array.prototype.slice.call(arguments),U=0;U>>2]+4294967296*$[P+4>>>2])),$[M>>2>>>0]=P.getUTCSeconds(),$[M+4>>2>>>0]=P.getUTCMinutes(),$[M+8>>2>>>0]=P.getUTCHours(),$[M+12>>2>>>0]=P.getUTCDate(),$[M+16>>2>>>0]=P.getUTCMonth(),$[M+20>>2>>>0]=P.getUTCFullYear()-1900,$[M+24>>2>>>0]=P.getUTCDay(),$[M+28>>2>>>0]=(P.getTime()-Date.UTC(P.getUTCFullYear(),0,1,0,0,0,0))/864e5|0},Ea:function(P,M){P=new Date(1e3*(G[P>>>2]+4294967296*$[P+4>>>2])),$[M>>2>>>0]=P.getSeconds(),$[M+4>>2>>>0]=P.getMinutes(),$[M+8>>2>>>0]=P.getHours(),$[M+12>>2>>>0]=P.getDate(),$[M+16>>2>>>0]=P.getMonth(),$[M+20>>2>>>0]=P.getFullYear()-1900,$[M+24>>2>>>0]=P.getDay();var F=new Date(P.getFullYear(),0,1);$[M+28>>2>>>0]=(P.getTime()-F.getTime())/864e5|0,$[M+36>>2>>>0]=-60*P.getTimezoneOffset();var z=new Date(P.getFullYear(),6,1).getTimezoneOffset();F=F.getTimezoneOffset(),$[M+32>>2>>>0]=0|(z!=F&&P.getTimezoneOffset()==Math.min(F,z))},Fa:function(P){var M=new Date($[P+20>>2>>>0]+1900,$[P+16>>2>>>0],$[P+12>>2>>>0],$[P+8>>2>>>0],$[P+4>>2>>>0],$[P>>2>>>0],0),F=$[P+32>>2>>>0],z=M.getTimezoneOffset(),U=new Date(M.getFullYear(),0,1),H=new Date(M.getFullYear(),6,1).getTimezoneOffset(),Y=U.getTimezoneOffset(),te=Math.min(Y,H);return 0>F?$[P+32>>2>>>0]=+(H!=Y&&te==z):0>2>>>0]=M.getDay(),$[P+28>>2>>>0]=(M.getTime()-U.getTime())/864e5|0,$[P>>2>>>0]=M.getSeconds(),$[P+4>>2>>>0]=M.getMinutes(),$[P+8>>2>>>0]=M.getHours(),$[P+12>>2>>>0]=M.getDate(),$[P+16>>2>>>0]=M.getMonth(),M.getTime()/1e3|0},sa:function(){return-52},ta:function(){},Ga:function P(M,F,z){P.Vb||(P.Vb=!0,function(U,H,Y){function te($e){return($e=$e.toTimeString().match(/\(([A-Za-z ]+)\)$/))?$e[1]:"GMT"}var Q=new Date().getFullYear(),le=new Date(Q,0,1),Te=new Date(Q,6,1);Q=le.getTimezoneOffset();var Le=Te.getTimezoneOffset();$[U>>2>>>0]=60*Math.max(Q,Le),$[H>>2>>>0]=+(Q!=Le),U=te(le),H=te(Te),U=pt(U),H=pt(H),Le>2>>>0]=U,G[Y+4>>2>>>0]=H):(G[Y>>2>>>0]=H,G[Y+4>>2>>>0]=U)}(M,F,z))},B:function(){Oe("")},ma:function(){return 4294901760},I:w?()=>{var P=process.hrtime();return 1e3*P[0]+P[1]/1e6}:()=>performance.now(),xa:function(P,M,F){L.copyWithin(P>>>0,M>>>0,M+F>>>0)},G:function(P){var M=L.length;if(4294901760<(P>>>=0))return!1;for(var F=1;4>=F;F*=2){var z=M*(1+.2/F);z=Math.min(z,P+100663296);var U=Math;z=Math.max(P,z),U=U.min.call(U,4294901760,z+(65536-z%65536)%65536);e:{try{I.grow(U-N.byteLength+65535>>>16),ue();var H=1;break e}catch{}H=void 0}if(H)return!0}return!1},va:function(P,M){var F=0;return Pt().forEach(function(z,U){var H=M+F;for(U=G[P+4*U>>2>>>0]=H,H=0;H>0>>>0]=z.charCodeAt(H);R[U>>0>>>0]=0,F+=z.length+1}),0},wa:function(P,M){var F=Pt();G[P>>2>>>0]=F.length;var z=0;return F.forEach(function(U){z+=U.length+1}),G[M>>2>>>0]=z,0},ba:function(P){x||0>2>>>0],te=G[M+4>>2>>>0];M+=8;for(var Q=0;Q>>0]);U+=te}return G[z>>2>>>0]=U,0},c:function(){return Re},ja:function P(M,F){P.Mb||(P.Mb=function(){if(typeof crypto=="object"&&typeof crypto.getRandomValues=="function"){var U=new Uint8Array(1);return()=>(crypto.getRandomValues(U),U[0])}if(w)try{var H=a(Object(function(){var Y=new Error("Cannot find module 'crypto'");throw Y.code="MODULE_NOT_FOUND",Y}()));return()=>H.randomBytes(1)[0]}catch{}return()=>Oe("randomDevice")}());for(var z=0;z>0>>>0]=P.Mb();return 0},ea:function(P,M,F){var z=ie();try{return fe(P)(M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},fa:function(P,M,F){var z=ie();try{return fe(P)(M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},J:function(P){var M=ie();try{return fe(P)()}catch(F){if(se(M),F!==F+0)throw F;ae(1,0)}},e:function(P,M){var F=ie();try{return fe(P)(M)}catch(z){if(se(F),z!==z+0)throw z;ae(1,0)}},N:function(P,M,F){var z=ie();try{return fe(P)(M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},O:function(P,M,F){var z=ie();try{return fe(P)(M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},j:function(P,M,F){var z=ie();try{return fe(P)(M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},o:function(P,M,F,z){var U=ie();try{return fe(P)(M,F,z)}catch(H){if(se(U),H!==H+0)throw H;ae(1,0)}},p:function(P,M,F,z,U){var H=ie();try{return fe(P)(M,F,z,U)}catch(Y){if(se(H),Y!==Y+0)throw Y;ae(1,0)}},M:function(P,M,F,z,U,H){var Y=ie();try{return fe(P)(M,F,z,U,H)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},r:function(P,M,F,z,U,H){var Y=ie();try{return fe(P)(M,F,z,U,H)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},v:function(P,M,F,z,U,H,Y){var te=ie();try{return fe(P)(M,F,z,U,H,Y)}catch(Q){if(se(te),Q!==Q+0)throw Q;ae(1,0)}},K:function(P,M,F,z,U,H,Y,te){var Q=ie();try{return fe(P)(M,F,z,U,H,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},D:function(P,M,F,z,U,H,Y,te,Q,le,Te,Le){var $e=ie();try{return fe(P)(M,F,z,U,H,Y,te,Q,le,Te,Le)}catch(q){if(se($e),q!==q+0)throw q;ae(1,0)}},X:function(P,M,F,z,U,H,Y,te){var Q=ie();try{return At(P,M,F,z,U,H,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},V:function(P,M,F,z,U,H,Y){var te=ie();try{return _t(P,M,F,z,U,H,Y)}catch(Q){if(se(te),Q!==Q+0)throw Q;ae(1,0)}},U:function(P,M,F,z,U){var H=ie();try{return Ot(P,M,F,z,U)}catch(Y){if(se(H),Y!==Y+0)throw Y;ae(1,0)}},Z:function(P,M,F,z){var U=ie();try{return Tt(P,M,F,z)}catch(H){if(se(U),H!==H+0)throw H;ae(1,0)}},W:function(P){var M=ie();try{return bt(P)}catch(F){if(se(M),F!==F+0)throw F;ae(1,0)}},Y:function(P,M){var F=ie();try{return St(P,M)}catch(z){if(se(F),z!==z+0)throw z;ae(1,0)}},T:function(P,M,F){var z=ie();try{return yt(P,M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},f:function(P){var M=ie();try{fe(P)()}catch(F){if(se(M),F!==F+0)throw F;ae(1,0)}},q:function(P,M){var F=ie();try{fe(P)(M)}catch(z){if(se(F),z!==z+0)throw z;ae(1,0)}},h:function(P,M,F){var z=ie();try{fe(P)(M,F)}catch(U){if(se(z),U!==U+0)throw U;ae(1,0)}},da:function(P,M,F,z){var U=ie();try{fe(P)(M,F,z)}catch(H){if(se(U),H!==H+0)throw H;ae(1,0)}},l:function(P,M,F,z){var U=ie();try{fe(P)(M,F,z)}catch(H){if(se(U),H!==H+0)throw H;ae(1,0)}},t:function(P,M,F,z,U){var H=ie();try{fe(P)(M,F,z,U)}catch(Y){if(se(H),Y!==Y+0)throw Y;ae(1,0)}},u:function(P,M,F,z,U,H){var Y=ie();try{fe(P)(M,F,z,U,H)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},x:function(P,M,F,z,U,H,Y){var te=ie();try{fe(P)(M,F,z,U,H,Y)}catch(Q){if(se(te),Q!==Q+0)throw Q;ae(1,0)}},z:function(P,M,F,z,U,H,Y,te){var Q=ie();try{fe(P)(M,F,z,U,H,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},ga:function(P,M,F,z,U,H,Y,te,Q){var le=ie();try{fe(P)(M,F,z,U,H,Y,te,Q)}catch(Te){if(se(le),Te!==Te+0)throw Te;ae(1,0)}},A:function(P,M,F,z,U,H,Y,te,Q,le,Te){var Le=ie();try{fe(P)(M,F,z,U,H,Y,te,Q,le,Te)}catch($e){if(se(Le),$e!==$e+0)throw $e;ae(1,0)}},C:function(P,M,F,z,U,H,Y,te,Q,le,Te,Le,$e,q,me,Ee){var We=ie();try{fe(P)(M,F,z,U,H,Y,te,Q,le,Te,Le,$e,q,me,Ee)}catch(et){if(se(We),et!==et+0)throw et;ae(1,0)}},aa:function(P,M,F,z,U,H,Y,te){var Q=ie();try{wt(P,M,F,z,U,H,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},_:function(P,M,F,z,U,H,Y,te,Q,le,Te,Le){var $e=ie();try{xt(P,M,F,z,U,H,Y,te,Q,le,Te,Le)}catch(q){if(se($e),q!==q+0)throw q;ae(1,0)}},$:function(P,M,F,z,U,H){var Y=ie();try{vt(P,M,F,z,U,H)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},n:function(P){return P},F:function(P){Re=P},ha:It,y:function(P,M,F,z){return It(P,M,F,z)}};(function(){function P(U){s.asm=U.exports,I=s.asm.Ka,ue(),Se=s.asm.ib,oe.unshift(s.asm.La),Ne--,s.monitorRunDependencies&&s.monitorRunDependencies(Ne),Ne==0&&Ce&&(U=Ce,Ce=null,U())}function M(U){P(U.instance)}function F(U){return function(){if(!S&&(b||y)){if(typeof fetch=="function"&&!xe.startsWith("file://"))return fetch(xe,{credentials:"same-origin"}).then(function(H){if(!H.ok)throw"failed to load wasm binary file at '"+xe+"'";return H.arrayBuffer()}).catch(function(){return Ge()});if(o)return new Promise(function(H,Y){o(xe,function(te){H(new Uint8Array(te))},Y)})}return Promise.resolve().then(function(){return Ge()})}().then(function(H){return WebAssembly.instantiate(H,z)}).then(function(H){return H}).then(U,function(H){O("failed to asynchronously prepare wasm: "+H),Oe(H)})}var z={a:kt};if(Ne++,s.monitorRunDependencies&&s.monitorRunDependencies(Ne),s.instantiateWasm)try{return s.instantiateWasm(z,P)}catch(U){return O("Module.instantiateWasm callback failed with error: "+U),!1}(S||typeof WebAssembly.instantiateStreaming!="function"||Ae()||xe.startsWith("file://")||w||typeof fetch!="function"?F(M):fetch(xe,{credentials:"same-origin"}).then(function(U){return WebAssembly.instantiateStreaming(U,z).then(M,function(H){return O("wasm streaming compile failed: "+H),O("falling back to ArrayBuffer instantiation"),F(M)})})).catch(f)})(),s.___wasm_call_ctors=function(){return(s.___wasm_call_ctors=s.asm.La).apply(null,arguments)},s._OrtInit=function(){return(s._OrtInit=s.asm.Ma).apply(null,arguments)},s._OrtCreateSessionOptions=function(){return(s._OrtCreateSessionOptions=s.asm.Na).apply(null,arguments)},s._OrtAppendExecutionProvider=function(){return(s._OrtAppendExecutionProvider=s.asm.Oa).apply(null,arguments)},s._OrtAddSessionConfigEntry=function(){return(s._OrtAddSessionConfigEntry=s.asm.Pa).apply(null,arguments)},s._OrtReleaseSessionOptions=function(){return(s._OrtReleaseSessionOptions=s.asm.Qa).apply(null,arguments)},s._OrtCreateSession=function(){return(s._OrtCreateSession=s.asm.Ra).apply(null,arguments)},s._OrtReleaseSession=function(){return(s._OrtReleaseSession=s.asm.Sa).apply(null,arguments)},s._OrtGetInputCount=function(){return(s._OrtGetInputCount=s.asm.Ta).apply(null,arguments)},s._OrtGetOutputCount=function(){return(s._OrtGetOutputCount=s.asm.Ua).apply(null,arguments)},s._OrtGetInputName=function(){return(s._OrtGetInputName=s.asm.Va).apply(null,arguments)},s._OrtGetOutputName=function(){return(s._OrtGetOutputName=s.asm.Wa).apply(null,arguments)},s._OrtFree=function(){return(s._OrtFree=s.asm.Xa).apply(null,arguments)},s._OrtCreateTensor=function(){return(s._OrtCreateTensor=s.asm.Ya).apply(null,arguments)},s._OrtGetTensorData=function(){return(s._OrtGetTensorData=s.asm.Za).apply(null,arguments)},s._OrtReleaseTensor=function(){return(s._OrtReleaseTensor=s.asm._a).apply(null,arguments)},s._OrtCreateRunOptions=function(){return(s._OrtCreateRunOptions=s.asm.$a).apply(null,arguments)},s._OrtAddRunConfigEntry=function(){return(s._OrtAddRunConfigEntry=s.asm.ab).apply(null,arguments)},s._OrtReleaseRunOptions=function(){return(s._OrtReleaseRunOptions=s.asm.bb).apply(null,arguments)},s._OrtRun=function(){return(s._OrtRun=s.asm.cb).apply(null,arguments)},s._OrtEndProfiling=function(){return(s._OrtEndProfiling=s.asm.db).apply(null,arguments)};var Je,we=s._malloc=function(){return(we=s._malloc=s.asm.eb).apply(null,arguments)},ot=s._free=function(){return(ot=s._free=s.asm.fb).apply(null,arguments)},ft=s._fflush=function(){return(ft=s._fflush=s.asm.gb).apply(null,arguments)},st=s.___funcs_on_exit=function(){return(st=s.___funcs_on_exit=s.asm.hb).apply(null,arguments)},ae=s._setThrew=function(){return(ae=s._setThrew=s.asm.jb).apply(null,arguments)},ie=s.stackSave=function(){return(ie=s.stackSave=s.asm.kb).apply(null,arguments)},se=s.stackRestore=function(){return(se=s.stackRestore=s.asm.lb).apply(null,arguments)},gt=s.stackAlloc=function(){return(gt=s.stackAlloc=s.asm.mb).apply(null,arguments)},at=s.___cxa_can_catch=function(){return(at=s.___cxa_can_catch=s.asm.nb).apply(null,arguments)},mt=s.___cxa_is_pointer_type=function(){return(mt=s.___cxa_is_pointer_type=s.asm.ob).apply(null,arguments)},bt=s.dynCall_j=function(){return(bt=s.dynCall_j=s.asm.pb).apply(null,arguments)},_t=s.dynCall_iiiiij=function(){return(_t=s.dynCall_iiiiij=s.asm.qb).apply(null,arguments)},yt=s.dynCall_jii=function(){return(yt=s.dynCall_jii=s.asm.rb).apply(null,arguments)},wt=s.dynCall_viiiiij=function(){return(wt=s.dynCall_viiiiij=s.asm.sb).apply(null,arguments)},vt=s.dynCall_vjji=function(){return(vt=s.dynCall_vjji=s.asm.tb).apply(null,arguments)},xt=s.dynCall_viiijjjii=function(){return(xt=s.dynCall_viiijjjii=s.asm.ub).apply(null,arguments)},Tt=s.dynCall_iij=function(){return(Tt=s.dynCall_iij=s.asm.vb).apply(null,arguments)},St=s.dynCall_ji=function(){return(St=s.dynCall_ji=s.asm.wb).apply(null,arguments)},At=s.dynCall_iiiiiij=function(){return(At=s.dynCall_iiiiiij=s.asm.xb).apply(null,arguments)},Ot=s.dynCall_iiij=function(){return(Ot=s.dynCall_iiij=s.asm.yb).apply(null,arguments)};function Et(){function P(){if(!Je&&(Je=!0,s.calledRun=!0,!D)){if(Xe(oe),h(s),s.onRuntimeInitialized&&s.onRuntimeInitialized(),s.postRun)for(typeof s.postRun=="function"&&(s.postRun=[s.postRun]);s.postRun.length;){var M=s.postRun.shift();be.unshift(M)}Xe(be)}}if(!(0{_.exports=function(n,a){for(var u=new Array(arguments.length-1),l=0,p=2,s=!0;p{var a=n;a.length=function(h){var f=h.length;if(!f)return 0;for(var c=0;--f%4>1&&h.charAt(f)==="=";)++c;return Math.ceil(3*h.length)/4-c};for(var u=new Array(64),l=new Array(123),p=0;p<64;)l[u[p]=p<26?p+65:p<52?p+71:p<62?p-4:p-59|43]=p++;a.encode=function(h,f,c){for(var o,t=null,e=[],r=0,i=0;f>2],o=(3&d)<<4,i=1;break;case 1:e[r++]=u[o|d>>4],o=(15&d)<<2,i=2;break;case 2:e[r++]=u[o|d>>6],e[r++]=u[63&d],i=0}r>8191&&((t||(t=[])).push(String.fromCharCode.apply(String,e)),r=0)}return i&&(e[r++]=u[o],e[r++]=61,i===1&&(e[r++]=61)),t?(r&&t.push(String.fromCharCode.apply(String,e.slice(0,r))),t.join("")):String.fromCharCode.apply(String,e.slice(0,r))};var s="invalid encoding";a.decode=function(h,f,c){for(var o,t=c,e=0,r=0;r1)break;if((i=l[i])===void 0)throw Error(s);switch(e){case 0:o=i,e=1;break;case 1:f[c++]=o<<2|(48&i)>>4,o=i,e=2;break;case 2:f[c++]=(15&o)<<4|(60&i)>>2,o=i,e=3;break;case 3:f[c++]=(3&o)<<6|i,e=0}}if(e===1)throw Error(s);return c-t},a.test=function(h){return/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/.test(h)}},9211:_=>{function n(){this._listeners={}}_.exports=n,n.prototype.on=function(a,u,l){return(this._listeners[a]||(this._listeners[a]=[])).push({fn:u,ctx:l||this}),this},n.prototype.off=function(a,u){if(a===void 0)this._listeners={};else if(u===void 0)this._listeners[a]=[];else for(var l=this._listeners[a],p=0;p{function n(s){return typeof Float32Array<"u"?function(){var h=new Float32Array([-0]),f=new Uint8Array(h.buffer),c=f[3]===128;function o(i,d,g){h[0]=i,d[g]=f[0],d[g+1]=f[1],d[g+2]=f[2],d[g+3]=f[3]}function t(i,d,g){h[0]=i,d[g]=f[3],d[g+1]=f[2],d[g+2]=f[1],d[g+3]=f[0]}function e(i,d){return f[0]=i[d],f[1]=i[d+1],f[2]=i[d+2],f[3]=i[d+3],h[0]}function r(i,d){return f[3]=i[d],f[2]=i[d+1],f[1]=i[d+2],f[0]=i[d+3],h[0]}s.writeFloatLE=c?o:t,s.writeFloatBE=c?t:o,s.readFloatLE=c?e:r,s.readFloatBE=c?r:e}():function(){function h(c,o,t,e){var r=o<0?1:0;if(r&&(o=-o),o===0)c(1/o>0?0:2147483648,t,e);else if(isNaN(o))c(2143289344,t,e);else if(o>34028234663852886e22)c((r<<31|2139095040)>>>0,t,e);else if(o<11754943508222875e-54)c((r<<31|Math.round(o/1401298464324817e-60))>>>0,t,e);else{var i=Math.floor(Math.log(o)/Math.LN2);c((r<<31|i+127<<23|8388607&Math.round(o*Math.pow(2,-i)*8388608))>>>0,t,e)}}function f(c,o,t){var e=c(o,t),r=2*(e>>31)+1,i=e>>>23&255,d=8388607&e;return i===255?d?NaN:r*(1/0):i===0?1401298464324817e-60*r*d:r*Math.pow(2,i-150)*(d+8388608)}s.writeFloatLE=h.bind(null,a),s.writeFloatBE=h.bind(null,u),s.readFloatLE=f.bind(null,l),s.readFloatBE=f.bind(null,p)}(),typeof Float64Array<"u"?function(){var h=new Float64Array([-0]),f=new Uint8Array(h.buffer),c=f[7]===128;function o(i,d,g){h[0]=i,d[g]=f[0],d[g+1]=f[1],d[g+2]=f[2],d[g+3]=f[3],d[g+4]=f[4],d[g+5]=f[5],d[g+6]=f[6],d[g+7]=f[7]}function t(i,d,g){h[0]=i,d[g]=f[7],d[g+1]=f[6],d[g+2]=f[5],d[g+3]=f[4],d[g+4]=f[3],d[g+5]=f[2],d[g+6]=f[1],d[g+7]=f[0]}function e(i,d){return f[0]=i[d],f[1]=i[d+1],f[2]=i[d+2],f[3]=i[d+3],f[4]=i[d+4],f[5]=i[d+5],f[6]=i[d+6],f[7]=i[d+7],h[0]}function r(i,d){return f[7]=i[d],f[6]=i[d+1],f[5]=i[d+2],f[4]=i[d+3],f[3]=i[d+4],f[2]=i[d+5],f[1]=i[d+6],f[0]=i[d+7],h[0]}s.writeDoubleLE=c?o:t,s.writeDoubleBE=c?t:o,s.readDoubleLE=c?e:r,s.readDoubleBE=c?r:e}():function(){function h(c,o,t,e,r,i){var d=e<0?1:0;if(d&&(e=-e),e===0)c(0,r,i+o),c(1/e>0?0:2147483648,r,i+t);else if(isNaN(e))c(0,r,i+o),c(2146959360,r,i+t);else if(e>17976931348623157e292)c(0,r,i+o),c((d<<31|2146435072)>>>0,r,i+t);else{var g;if(e<22250738585072014e-324)c((g=e/5e-324)>>>0,r,i+o),c((d<<31|g/4294967296)>>>0,r,i+t);else{var m=Math.floor(Math.log(e)/Math.LN2);m===1024&&(m=1023),c(4503599627370496*(g=e*Math.pow(2,-m))>>>0,r,i+o),c((d<<31|m+1023<<20|1048576*g&1048575)>>>0,r,i+t)}}}function f(c,o,t,e,r){var i=c(e,r+o),d=c(e,r+t),g=2*(d>>31)+1,m=d>>>20&2047,b=4294967296*(1048575&d)+i;return m===2047?b?NaN:g*(1/0):m===0?5e-324*g*b:g*Math.pow(2,m-1075)*(b+4503599627370496)}s.writeDoubleLE=h.bind(null,a,0,4),s.writeDoubleBE=h.bind(null,u,4,0),s.readDoubleLE=f.bind(null,l,0,4),s.readDoubleBE=f.bind(null,p,4,0)}(),s}function a(s,h,f){h[f]=255&s,h[f+1]=s>>>8&255,h[f+2]=s>>>16&255,h[f+3]=s>>>24}function u(s,h,f){h[f]=s>>>24,h[f+1]=s>>>16&255,h[f+2]=s>>>8&255,h[f+3]=255&s}function l(s,h){return(s[h]|s[h+1]<<8|s[h+2]<<16|s[h+3]<<24)>>>0}function p(s,h){return(s[h]<<24|s[h+1]<<16|s[h+2]<<8|s[h+3])>>>0}_.exports=n(n)},7199:module=>{function inquire(moduleName){try{var mod=eval("quire".replace(/^/,"re"))(moduleName);if(mod&&(mod.length||Object.keys(mod).length))return mod}catch(_){}return null}module.exports=inquire},6662:_=>{_.exports=function(n,a,u){var l=u||8192,p=l>>>1,s=null,h=l;return function(f){if(f<1||f>p)return n(f);h+f>l&&(s=n(l),h=0);var c=a.call(s,h,h+=f);return 7&h&&(h=1+(7|h)),c}}},4997:(_,n)=>{var a=n;a.length=function(u){for(var l=0,p=0,s=0;s191&&s<224?f[c++]=(31&s)<<6|63&u[l++]:s>239&&s<365?(s=((7&s)<<18|(63&u[l++])<<12|(63&u[l++])<<6|63&u[l++])-65536,f[c++]=55296+(s>>10),f[c++]=56320+(1023&s)):f[c++]=(15&s)<<12|(63&u[l++])<<6|63&u[l++],c>8191&&((h||(h=[])).push(String.fromCharCode.apply(String,f)),c=0);return h?(c&&h.push(String.fromCharCode.apply(String,f.slice(0,c))),h.join("")):String.fromCharCode.apply(String,f.slice(0,c))},a.write=function(u,l,p){for(var s,h,f=p,c=0;c>6|192,l[p++]=63&s|128):(64512&s)==55296&&(64512&(h=u.charCodeAt(c+1)))==56320?(s=65536+((1023&s)<<10)+(1023&h),++c,l[p++]=s>>18|240,l[p++]=s>>12&63|128,l[p++]=s>>6&63|128,l[p++]=63&s|128):(l[p++]=s>>12|224,l[p++]=s>>6&63|128,l[p++]=63&s|128);return p-f}},3442:(_,n)=>{n.__esModule=!0;var a=function(){function u(l){if(!l)throw new TypeError("Invalid argument; `value` has no value.");this.value=u.EMPTY,l&&u.isGuid(l)&&(this.value=l)}return u.isGuid=function(l){var p=l.toString();return l&&(l instanceof u||u.validator.test(p))},u.create=function(){return new u([u.gen(2),u.gen(1),u.gen(1),u.gen(1),u.gen(3)].join("-"))},u.createEmpty=function(){return new u("emptyguid")},u.parse=function(l){return new u(l)},u.raw=function(){return[u.gen(2),u.gen(1),u.gen(1),u.gen(1),u.gen(3)].join("-")},u.gen=function(l){for(var p="",s=0;s{_.exports=a;var n=null;try{n=new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([0,97,115,109,1,0,0,0,1,13,2,96,0,1,127,96,4,127,127,127,127,1,127,3,7,6,0,1,1,1,1,1,6,6,1,127,1,65,0,11,7,50,6,3,109,117,108,0,1,5,100,105,118,95,115,0,2,5,100,105,118,95,117,0,3,5,114,101,109,95,115,0,4,5,114,101,109,95,117,0,5,8,103,101,116,95,104,105,103,104,0,0,10,191,1,6,4,0,35,0,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,126,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,127,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,128,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,129,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,130,34,4,66,32,135,167,36,0,32,4,167,11])),{}).exports}catch{}function a(x,I,N){this.low=0|x,this.high=0|I,this.unsigned=!!N}function u(x){return(x&&x.__isLong__)===!0}a.prototype.__isLong__,Object.defineProperty(a.prototype,"__isLong__",{value:!0}),a.isLong=u;var l={},p={};function s(x,I){var N,R,L;return I?(L=0<=(x>>>=0)&&x<256)&&(R=p[x])?R:(N=f(x,(0|x)<0?-1:0,!0),L&&(p[x]=N),N):(L=-128<=(x|=0)&&x<128)&&(R=l[x])?R:(N=f(x,x<0?-1:0,!1),L&&(l[x]=N),N)}function h(x,I){if(isNaN(x))return I?m:g;if(I){if(x<0)return m;if(x>=r)return S}else{if(x<=-i)return A;if(x+1>=i)return v}return x<0?h(-x,I).neg():f(x%e|0,x/e|0,I)}function f(x,I,N){return new a(x,I,N)}a.fromInt=s,a.fromNumber=h,a.fromBits=f;var c=Math.pow;function o(x,I,N){if(x.length===0)throw Error("empty string");if(x==="NaN"||x==="Infinity"||x==="+Infinity"||x==="-Infinity")return g;if(typeof I=="number"?(N=I,I=!1):I=!!I,(N=N||10)<2||360)throw Error("interior hyphen");if(R===0)return o(x.substring(1),I,N).neg();for(var L=h(c(N,8)),$=g,G=0;G>>0:this.low},O.toNumber=function(){return this.unsigned?(this.high>>>0)*e+(this.low>>>0):this.high*e+(this.low>>>0)},O.toString=function(x){if((x=x||10)<2||36>>0).toString(x);if(($=D).isZero())return j+G;for(;j.length<6;)j="0"+j;G=""+j+G}},O.getHighBits=function(){return this.high},O.getHighBitsUnsigned=function(){return this.high>>>0},O.getLowBits=function(){return this.low},O.getLowBitsUnsigned=function(){return this.low>>>0},O.getNumBitsAbs=function(){if(this.isNegative())return this.eq(A)?64:this.neg().getNumBitsAbs();for(var x=this.high!=0?this.high:this.low,I=31;I>0&&!(x&1<=0},O.isOdd=function(){return(1&this.low)==1},O.isEven=function(){return(1&this.low)==0},O.equals=function(x){return u(x)||(x=t(x)),(this.unsigned===x.unsigned||this.high>>>31!=1||x.high>>>31!=1)&&this.high===x.high&&this.low===x.low},O.eq=O.equals,O.notEquals=function(x){return!this.eq(x)},O.neq=O.notEquals,O.ne=O.notEquals,O.lessThan=function(x){return this.comp(x)<0},O.lt=O.lessThan,O.lessThanOrEqual=function(x){return this.comp(x)<=0},O.lte=O.lessThanOrEqual,O.le=O.lessThanOrEqual,O.greaterThan=function(x){return this.comp(x)>0},O.gt=O.greaterThan,O.greaterThanOrEqual=function(x){return this.comp(x)>=0},O.gte=O.greaterThanOrEqual,O.ge=O.greaterThanOrEqual,O.compare=function(x){if(u(x)||(x=t(x)),this.eq(x))return 0;var I=this.isNegative(),N=x.isNegative();return I&&!N?-1:!I&&N?1:this.unsigned?x.high>>>0>this.high>>>0||x.high===this.high&&x.low>>>0>this.low>>>0?-1:1:this.sub(x).isNegative()?-1:1},O.comp=O.compare,O.negate=function(){return!this.unsigned&&this.eq(A)?A:this.not().add(b)},O.neg=O.negate,O.add=function(x){u(x)||(x=t(x));var I=this.high>>>16,N=65535&this.high,R=this.low>>>16,L=65535&this.low,$=x.high>>>16,G=65535&x.high,D=x.low>>>16,j=0,Z=0,X=0,J=0;return X+=(J+=L+(65535&x.low))>>>16,Z+=(X+=R+D)>>>16,j+=(Z+=N+G)>>>16,j+=I+$,f((X&=65535)<<16|(J&=65535),(j&=65535)<<16|(Z&=65535),this.unsigned)},O.subtract=function(x){return u(x)||(x=t(x)),this.add(x.neg())},O.sub=O.subtract,O.multiply=function(x){if(this.isZero())return g;if(u(x)||(x=t(x)),n)return f(n.mul(this.low,this.high,x.low,x.high),n.get_high(),this.unsigned);if(x.isZero())return g;if(this.eq(A))return x.isOdd()?A:g;if(x.eq(A))return this.isOdd()?A:g;if(this.isNegative())return x.isNegative()?this.neg().mul(x.neg()):this.neg().mul(x).neg();if(x.isNegative())return this.mul(x.neg()).neg();if(this.lt(d)&&x.lt(d))return h(this.toNumber()*x.toNumber(),this.unsigned);var I=this.high>>>16,N=65535&this.high,R=this.low>>>16,L=65535&this.low,$=x.high>>>16,G=65535&x.high,D=x.low>>>16,j=65535&x.low,Z=0,X=0,J=0,ee=0;return J+=(ee+=L*j)>>>16,X+=(J+=R*j)>>>16,J&=65535,X+=(J+=L*D)>>>16,Z+=(X+=N*j)>>>16,X&=65535,Z+=(X+=R*D)>>>16,X&=65535,Z+=(X+=L*G)>>>16,Z+=I*j+N*D+R*G+L*$,f((J&=65535)<<16|(ee&=65535),(Z&=65535)<<16|(X&=65535),this.unsigned)},O.mul=O.multiply,O.divide=function(x){if(u(x)||(x=t(x)),x.isZero())throw Error("division by zero");var I,N,R;if(n)return this.unsigned||this.high!==-2147483648||x.low!==-1||x.high!==-1?f((this.unsigned?n.div_u:n.div_s)(this.low,this.high,x.low,x.high),n.get_high(),this.unsigned):this;if(this.isZero())return this.unsigned?m:g;if(this.unsigned){if(x.unsigned||(x=x.toUnsigned()),x.gt(this))return m;if(x.gt(this.shru(1)))return y;R=m}else{if(this.eq(A))return x.eq(b)||x.eq(w)?A:x.eq(A)?b:(I=this.shr(1).div(x).shl(1)).eq(g)?x.isNegative()?b:w:(N=this.sub(x.mul(I)),R=I.add(N.div(x)));if(x.eq(A))return this.unsigned?m:g;if(this.isNegative())return x.isNegative()?this.neg().div(x.neg()):this.neg().div(x).neg();if(x.isNegative())return this.div(x.neg()).neg();R=g}for(N=this;N.gte(x);){I=Math.max(1,Math.floor(N.toNumber()/x.toNumber()));for(var L=Math.ceil(Math.log(I)/Math.LN2),$=L<=48?1:c(2,L-48),G=h(I),D=G.mul(x);D.isNegative()||D.gt(N);)D=(G=h(I-=$,this.unsigned)).mul(x);G.isZero()&&(G=b),R=R.add(G),N=N.sub(D)}return R},O.div=O.divide,O.modulo=function(x){return u(x)||(x=t(x)),n?f((this.unsigned?n.rem_u:n.rem_s)(this.low,this.high,x.low,x.high),n.get_high(),this.unsigned):this.sub(this.div(x).mul(x))},O.mod=O.modulo,O.rem=O.modulo,O.not=function(){return f(~this.low,~this.high,this.unsigned)},O.and=function(x){return u(x)||(x=t(x)),f(this.low&x.low,this.high&x.high,this.unsigned)},O.or=function(x){return u(x)||(x=t(x)),f(this.low|x.low,this.high|x.high,this.unsigned)},O.xor=function(x){return u(x)||(x=t(x)),f(this.low^x.low,this.high^x.high,this.unsigned)},O.shiftLeft=function(x){return u(x)&&(x=x.toInt()),(x&=63)==0?this:x<32?f(this.low<>>32-x,this.unsigned):f(0,this.low<>>x|this.high<<32-x,this.high>>x,this.unsigned):f(this.high>>x-32,this.high>=0?0:-1,this.unsigned)},O.shr=O.shiftRight,O.shiftRightUnsigned=function(x){if(u(x)&&(x=x.toInt()),(x&=63)==0)return this;var I=this.high;return x<32?f(this.low>>>x|I<<32-x,I>>>x,this.unsigned):f(x===32?I:I>>>x-32,0,this.unsigned)},O.shru=O.shiftRightUnsigned,O.shr_u=O.shiftRightUnsigned,O.toSigned=function(){return this.unsigned?f(this.low,this.high,!1):this},O.toUnsigned=function(){return this.unsigned?this:f(this.low,this.high,!0)},O.toBytes=function(x){return x?this.toBytesLE():this.toBytesBE()},O.toBytesLE=function(){var x=this.high,I=this.low;return[255&I,I>>>8&255,I>>>16&255,I>>>24,255&x,x>>>8&255,x>>>16&255,x>>>24]},O.toBytesBE=function(){var x=this.high,I=this.low;return[x>>>24,x>>>16&255,x>>>8&255,255&x,I>>>24,I>>>16&255,I>>>8&255,255&I]},a.fromBytes=function(x,I,N){return N?a.fromBytesLE(x,I):a.fromBytesBE(x,I)},a.fromBytesLE=function(x,I){return new a(x[0]|x[1]<<8|x[2]<<16|x[3]<<24,x[4]|x[5]<<8|x[6]<<16|x[7]<<24,I)},a.fromBytesBE=function(x,I){return new a(x[4]<<24|x[5]<<16|x[6]<<8|x[7],x[0]<<24|x[1]<<16|x[2]<<8|x[3],I)}},1446:(_,n,a)=>{var u,l,p,s=a(2100),h=s.Reader,f=s.Writer,c=s.util,o=s.roots.default||(s.roots.default={});o.onnx=((p={}).Version=(u={},(l=Object.create(u))[u[0]="_START_VERSION"]=0,l[u[1]="IR_VERSION_2017_10_10"]=1,l[u[2]="IR_VERSION_2017_10_30"]=2,l[u[3]="IR_VERSION_2017_11_3"]=3,l[u[4]="IR_VERSION_2019_1_22"]=4,l[u[5]="IR_VERSION"]=5,l),p.AttributeProto=function(){function t(e){if(this.floats=[],this.ints=[],this.strings=[],this.tensors=[],this.graphs=[],e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.name=e.string();break;case 21:d.refAttrName=e.string();break;case 13:d.docString=e.string();break;case 20:d.type=e.int32();break;case 2:d.f=e.float();break;case 3:d.i=e.int64();break;case 4:d.s=e.bytes();break;case 5:d.t=o.onnx.TensorProto.decode(e,e.uint32());break;case 6:d.g=o.onnx.GraphProto.decode(e,e.uint32());break;case 7:if(d.floats&&d.floats.length||(d.floats=[]),(7&g)==2)for(var m=e.uint32()+e.pos;e.pos>>0,e.i.high>>>0).toNumber())),e.s!=null&&(typeof e.s=="string"?c.base64.decode(e.s,r.s=c.newBuffer(c.base64.length(e.s)),0):e.s.length&&(r.s=e.s)),e.t!=null){if(typeof e.t!="object")throw TypeError(".onnx.AttributeProto.t: object expected");r.t=o.onnx.TensorProto.fromObject(e.t)}if(e.g!=null){if(typeof e.g!="object")throw TypeError(".onnx.AttributeProto.g: object expected");r.g=o.onnx.GraphProto.fromObject(e.g)}if(e.floats){if(!Array.isArray(e.floats))throw TypeError(".onnx.AttributeProto.floats: array expected");r.floats=[];for(var i=0;i>>0,e.ints[i].high>>>0).toNumber())}if(e.strings){if(!Array.isArray(e.strings))throw TypeError(".onnx.AttributeProto.strings: array expected");for(r.strings=[],i=0;i>>0,e.i.high>>>0).toNumber():e.i),e.s!=null&&e.hasOwnProperty("s")&&(i.s=r.bytes===String?c.base64.encode(e.s,0,e.s.length):r.bytes===Array?Array.prototype.slice.call(e.s):e.s),e.t!=null&&e.hasOwnProperty("t")&&(i.t=o.onnx.TensorProto.toObject(e.t,r)),e.g!=null&&e.hasOwnProperty("g")&&(i.g=o.onnx.GraphProto.toObject(e.g,r)),e.floats&&e.floats.length){i.floats=[];for(var g=0;g>>0,e.ints[g].high>>>0).toNumber():e.ints[g];if(e.strings&&e.strings.length)for(i.strings=[],g=0;g>>3){case 1:d.name=e.string();break;case 2:d.type=o.onnx.TypeProto.decode(e,e.uint32());break;case 3:d.docString=e.string();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.name!=null&&e.hasOwnProperty("name")&&!c.isString(e.name))return"name: string expected";if(e.type!=null&&e.hasOwnProperty("type")){var r=o.onnx.TypeProto.verify(e.type);if(r)return"type."+r}return e.docString!=null&&e.hasOwnProperty("docString")&&!c.isString(e.docString)?"docString: string expected":null},t.fromObject=function(e){if(e instanceof o.onnx.ValueInfoProto)return e;var r=new o.onnx.ValueInfoProto;if(e.name!=null&&(r.name=String(e.name)),e.type!=null){if(typeof e.type!="object")throw TypeError(".onnx.ValueInfoProto.type: object expected");r.type=o.onnx.TypeProto.fromObject(e.type)}return e.docString!=null&&(r.docString=String(e.docString)),r},t.toObject=function(e,r){r||(r={});var i={};return r.defaults&&(i.name="",i.type=null,i.docString=""),e.name!=null&&e.hasOwnProperty("name")&&(i.name=e.name),e.type!=null&&e.hasOwnProperty("type")&&(i.type=o.onnx.TypeProto.toObject(e.type,r)),e.docString!=null&&e.hasOwnProperty("docString")&&(i.docString=e.docString),i},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t}(),p.NodeProto=function(){function t(e){if(this.input=[],this.output=[],this.attribute=[],e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.input&&d.input.length||(d.input=[]),d.input.push(e.string());break;case 2:d.output&&d.output.length||(d.output=[]),d.output.push(e.string());break;case 3:d.name=e.string();break;case 4:d.opType=e.string();break;case 7:d.domain=e.string();break;case 5:d.attribute&&d.attribute.length||(d.attribute=[]),d.attribute.push(o.onnx.AttributeProto.decode(e,e.uint32()));break;case 6:d.docString=e.string();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.input!=null&&e.hasOwnProperty("input")){if(!Array.isArray(e.input))return"input: array expected";for(var r=0;r>>3){case 1:d.irVersion=e.int64();break;case 8:d.opsetImport&&d.opsetImport.length||(d.opsetImport=[]),d.opsetImport.push(o.onnx.OperatorSetIdProto.decode(e,e.uint32()));break;case 2:d.producerName=e.string();break;case 3:d.producerVersion=e.string();break;case 4:d.domain=e.string();break;case 5:d.modelVersion=e.int64();break;case 6:d.docString=e.string();break;case 7:d.graph=o.onnx.GraphProto.decode(e,e.uint32());break;case 14:d.metadataProps&&d.metadataProps.length||(d.metadataProps=[]),d.metadataProps.push(o.onnx.StringStringEntryProto.decode(e,e.uint32()));break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.irVersion!=null&&e.hasOwnProperty("irVersion")&&!(c.isInteger(e.irVersion)||e.irVersion&&c.isInteger(e.irVersion.low)&&c.isInteger(e.irVersion.high)))return"irVersion: integer|Long expected";if(e.opsetImport!=null&&e.hasOwnProperty("opsetImport")){if(!Array.isArray(e.opsetImport))return"opsetImport: array expected";for(var r=0;r>>0,e.irVersion.high>>>0).toNumber())),e.opsetImport){if(!Array.isArray(e.opsetImport))throw TypeError(".onnx.ModelProto.opsetImport: array expected");r.opsetImport=[];for(var i=0;i>>0,e.modelVersion.high>>>0).toNumber())),e.docString!=null&&(r.docString=String(e.docString)),e.graph!=null){if(typeof e.graph!="object")throw TypeError(".onnx.ModelProto.graph: object expected");r.graph=o.onnx.GraphProto.fromObject(e.graph)}if(e.metadataProps){if(!Array.isArray(e.metadataProps))throw TypeError(".onnx.ModelProto.metadataProps: array expected");for(r.metadataProps=[],i=0;i>>0,e.irVersion.high>>>0).toNumber():e.irVersion),e.producerName!=null&&e.hasOwnProperty("producerName")&&(i.producerName=e.producerName),e.producerVersion!=null&&e.hasOwnProperty("producerVersion")&&(i.producerVersion=e.producerVersion),e.domain!=null&&e.hasOwnProperty("domain")&&(i.domain=e.domain),e.modelVersion!=null&&e.hasOwnProperty("modelVersion")&&(typeof e.modelVersion=="number"?i.modelVersion=r.longs===String?String(e.modelVersion):e.modelVersion:i.modelVersion=r.longs===String?c.Long.prototype.toString.call(e.modelVersion):r.longs===Number?new c.LongBits(e.modelVersion.low>>>0,e.modelVersion.high>>>0).toNumber():e.modelVersion),e.docString!=null&&e.hasOwnProperty("docString")&&(i.docString=e.docString),e.graph!=null&&e.hasOwnProperty("graph")&&(i.graph=o.onnx.GraphProto.toObject(e.graph,r)),e.opsetImport&&e.opsetImport.length){i.opsetImport=[];for(var g=0;g>>3){case 1:d.key=e.string();break;case 2:d.value=e.string();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){return typeof e!="object"||e===null?"object expected":e.key!=null&&e.hasOwnProperty("key")&&!c.isString(e.key)?"key: string expected":e.value!=null&&e.hasOwnProperty("value")&&!c.isString(e.value)?"value: string expected":null},t.fromObject=function(e){if(e instanceof o.onnx.StringStringEntryProto)return e;var r=new o.onnx.StringStringEntryProto;return e.key!=null&&(r.key=String(e.key)),e.value!=null&&(r.value=String(e.value)),r},t.toObject=function(e,r){r||(r={});var i={};return r.defaults&&(i.key="",i.value=""),e.key!=null&&e.hasOwnProperty("key")&&(i.key=e.key),e.value!=null&&e.hasOwnProperty("value")&&(i.value=e.value),i},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t}(),p.TensorAnnotation=function(){function t(e){if(this.quantParameterTensorNames=[],e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.tensorName=e.string();break;case 2:d.quantParameterTensorNames&&d.quantParameterTensorNames.length||(d.quantParameterTensorNames=[]),d.quantParameterTensorNames.push(o.onnx.StringStringEntryProto.decode(e,e.uint32()));break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.tensorName!=null&&e.hasOwnProperty("tensorName")&&!c.isString(e.tensorName))return"tensorName: string expected";if(e.quantParameterTensorNames!=null&&e.hasOwnProperty("quantParameterTensorNames")){if(!Array.isArray(e.quantParameterTensorNames))return"quantParameterTensorNames: array expected";for(var r=0;r>>3){case 1:d.node&&d.node.length||(d.node=[]),d.node.push(o.onnx.NodeProto.decode(e,e.uint32()));break;case 2:d.name=e.string();break;case 5:d.initializer&&d.initializer.length||(d.initializer=[]),d.initializer.push(o.onnx.TensorProto.decode(e,e.uint32()));break;case 10:d.docString=e.string();break;case 11:d.input&&d.input.length||(d.input=[]),d.input.push(o.onnx.ValueInfoProto.decode(e,e.uint32()));break;case 12:d.output&&d.output.length||(d.output=[]),d.output.push(o.onnx.ValueInfoProto.decode(e,e.uint32()));break;case 13:d.valueInfo&&d.valueInfo.length||(d.valueInfo=[]),d.valueInfo.push(o.onnx.ValueInfoProto.decode(e,e.uint32()));break;case 14:d.quantizationAnnotation&&d.quantizationAnnotation.length||(d.quantizationAnnotation=[]),d.quantizationAnnotation.push(o.onnx.TensorAnnotation.decode(e,e.uint32()));break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.node!=null&&e.hasOwnProperty("node")){if(!Array.isArray(e.node))return"node: array expected";for(var r=0;r>>3){case 1:if(d.dims&&d.dims.length||(d.dims=[]),(7&g)==2)for(var m=e.uint32()+e.pos;e.pos>>0,e.dims[i].high>>>0).toNumber())}if(e.dataType!=null&&(r.dataType=0|e.dataType),e.segment!=null){if(typeof e.segment!="object")throw TypeError(".onnx.TensorProto.segment: object expected");r.segment=o.onnx.TensorProto.Segment.fromObject(e.segment)}if(e.floatData){if(!Array.isArray(e.floatData))throw TypeError(".onnx.TensorProto.floatData: array expected");for(r.floatData=[],i=0;i>>0,e.int64Data[i].high>>>0).toNumber())}if(e.name!=null&&(r.name=String(e.name)),e.docString!=null&&(r.docString=String(e.docString)),e.rawData!=null&&(typeof e.rawData=="string"?c.base64.decode(e.rawData,r.rawData=c.newBuffer(c.base64.length(e.rawData)),0):e.rawData.length&&(r.rawData=e.rawData)),e.externalData){if(!Array.isArray(e.externalData))throw TypeError(".onnx.TensorProto.externalData: array expected");for(r.externalData=[],i=0;i>>0,e.uint64Data[i].high>>>0).toNumber(!0))}return r},t.toObject=function(e,r){r||(r={});var i={};if((r.arrays||r.defaults)&&(i.dims=[],i.floatData=[],i.int32Data=[],i.stringData=[],i.int64Data=[],i.doubleData=[],i.uint64Data=[],i.externalData=[]),r.defaults&&(i.dataType=0,i.segment=null,i.name="",r.bytes===String?i.rawData="":(i.rawData=[],r.bytes!==Array&&(i.rawData=c.newBuffer(i.rawData))),i.docString="",i.dataLocation=r.enums===String?"DEFAULT":0),e.dims&&e.dims.length){i.dims=[];for(var d=0;d>>0,e.dims[d].high>>>0).toNumber():e.dims[d]}if(e.dataType!=null&&e.hasOwnProperty("dataType")&&(i.dataType=e.dataType),e.segment!=null&&e.hasOwnProperty("segment")&&(i.segment=o.onnx.TensorProto.Segment.toObject(e.segment,r)),e.floatData&&e.floatData.length)for(i.floatData=[],d=0;d>>0,e.int64Data[d].high>>>0).toNumber():e.int64Data[d];if(e.name!=null&&e.hasOwnProperty("name")&&(i.name=e.name),e.rawData!=null&&e.hasOwnProperty("rawData")&&(i.rawData=r.bytes===String?c.base64.encode(e.rawData,0,e.rawData.length):r.bytes===Array?Array.prototype.slice.call(e.rawData):e.rawData),e.doubleData&&e.doubleData.length)for(i.doubleData=[],d=0;d>>0,e.uint64Data[d].high>>>0).toNumber(!0):e.uint64Data[d];if(e.docString!=null&&e.hasOwnProperty("docString")&&(i.docString=e.docString),e.externalData&&e.externalData.length)for(i.externalData=[],d=0;d>>3){case 1:g.begin=r.int64();break;case 2:g.end=r.int64();break;default:r.skipType(7&m)}}return g},e.decodeDelimited=function(r){return r instanceof h||(r=new h(r)),this.decode(r,r.uint32())},e.verify=function(r){return typeof r!="object"||r===null?"object expected":r.begin!=null&&r.hasOwnProperty("begin")&&!(c.isInteger(r.begin)||r.begin&&c.isInteger(r.begin.low)&&c.isInteger(r.begin.high))?"begin: integer|Long expected":r.end!=null&&r.hasOwnProperty("end")&&!(c.isInteger(r.end)||r.end&&c.isInteger(r.end.low)&&c.isInteger(r.end.high))?"end: integer|Long expected":null},e.fromObject=function(r){if(r instanceof o.onnx.TensorProto.Segment)return r;var i=new o.onnx.TensorProto.Segment;return r.begin!=null&&(c.Long?(i.begin=c.Long.fromValue(r.begin)).unsigned=!1:typeof r.begin=="string"?i.begin=parseInt(r.begin,10):typeof r.begin=="number"?i.begin=r.begin:typeof r.begin=="object"&&(i.begin=new c.LongBits(r.begin.low>>>0,r.begin.high>>>0).toNumber())),r.end!=null&&(c.Long?(i.end=c.Long.fromValue(r.end)).unsigned=!1:typeof r.end=="string"?i.end=parseInt(r.end,10):typeof r.end=="number"?i.end=r.end:typeof r.end=="object"&&(i.end=new c.LongBits(r.end.low>>>0,r.end.high>>>0).toNumber())),i},e.toObject=function(r,i){i||(i={});var d={};if(i.defaults){if(c.Long){var g=new c.Long(0,0,!1);d.begin=i.longs===String?g.toString():i.longs===Number?g.toNumber():g}else d.begin=i.longs===String?"0":0;c.Long?(g=new c.Long(0,0,!1),d.end=i.longs===String?g.toString():i.longs===Number?g.toNumber():g):d.end=i.longs===String?"0":0}return r.begin!=null&&r.hasOwnProperty("begin")&&(typeof r.begin=="number"?d.begin=i.longs===String?String(r.begin):r.begin:d.begin=i.longs===String?c.Long.prototype.toString.call(r.begin):i.longs===Number?new c.LongBits(r.begin.low>>>0,r.begin.high>>>0).toNumber():r.begin),r.end!=null&&r.hasOwnProperty("end")&&(typeof r.end=="number"?d.end=i.longs===String?String(r.end):r.end:d.end=i.longs===String?c.Long.prototype.toString.call(r.end):i.longs===Number?new c.LongBits(r.end.low>>>0,r.end.high>>>0).toNumber():r.end),d},e.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},e}(),t.DataLocation=function(){var e={},r=Object.create(e);return r[e[0]="DEFAULT"]=0,r[e[1]="EXTERNAL"]=1,r}(),t}(),p.TensorShapeProto=function(){function t(e){if(this.dim=[],e)for(var r=Object.keys(e),i=0;i>>3==1?(d.dim&&d.dim.length||(d.dim=[]),d.dim.push(o.onnx.TensorShapeProto.Dimension.decode(e,e.uint32()))):e.skipType(7&g)}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.dim!=null&&e.hasOwnProperty("dim")){if(!Array.isArray(e.dim))return"dim: array expected";for(var r=0;r>>3){case 1:m.dimValue=i.int64();break;case 2:m.dimParam=i.string();break;case 3:m.denotation=i.string();break;default:i.skipType(7&b)}}return m},e.decodeDelimited=function(i){return i instanceof h||(i=new h(i)),this.decode(i,i.uint32())},e.verify=function(i){if(typeof i!="object"||i===null)return"object expected";var d={};if(i.dimValue!=null&&i.hasOwnProperty("dimValue")&&(d.value=1,!(c.isInteger(i.dimValue)||i.dimValue&&c.isInteger(i.dimValue.low)&&c.isInteger(i.dimValue.high))))return"dimValue: integer|Long expected";if(i.dimParam!=null&&i.hasOwnProperty("dimParam")){if(d.value===1)return"value: multiple values";if(d.value=1,!c.isString(i.dimParam))return"dimParam: string expected"}return i.denotation!=null&&i.hasOwnProperty("denotation")&&!c.isString(i.denotation)?"denotation: string expected":null},e.fromObject=function(i){if(i instanceof o.onnx.TensorShapeProto.Dimension)return i;var d=new o.onnx.TensorShapeProto.Dimension;return i.dimValue!=null&&(c.Long?(d.dimValue=c.Long.fromValue(i.dimValue)).unsigned=!1:typeof i.dimValue=="string"?d.dimValue=parseInt(i.dimValue,10):typeof i.dimValue=="number"?d.dimValue=i.dimValue:typeof i.dimValue=="object"&&(d.dimValue=new c.LongBits(i.dimValue.low>>>0,i.dimValue.high>>>0).toNumber())),i.dimParam!=null&&(d.dimParam=String(i.dimParam)),i.denotation!=null&&(d.denotation=String(i.denotation)),d},e.toObject=function(i,d){d||(d={});var g={};return d.defaults&&(g.denotation=""),i.dimValue!=null&&i.hasOwnProperty("dimValue")&&(typeof i.dimValue=="number"?g.dimValue=d.longs===String?String(i.dimValue):i.dimValue:g.dimValue=d.longs===String?c.Long.prototype.toString.call(i.dimValue):d.longs===Number?new c.LongBits(i.dimValue.low>>>0,i.dimValue.high>>>0).toNumber():i.dimValue,d.oneofs&&(g.value="dimValue")),i.dimParam!=null&&i.hasOwnProperty("dimParam")&&(g.dimParam=i.dimParam,d.oneofs&&(g.value="dimParam")),i.denotation!=null&&i.hasOwnProperty("denotation")&&(g.denotation=i.denotation),g},e.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},e}(),t}(),p.TypeProto=function(){function t(r){if(r)for(var i=Object.keys(r),d=0;d>>3){case 1:g.tensorType=o.onnx.TypeProto.Tensor.decode(r,r.uint32());break;case 6:g.denotation=r.string();break;default:r.skipType(7&m)}}return g},t.decodeDelimited=function(r){return r instanceof h||(r=new h(r)),this.decode(r,r.uint32())},t.verify=function(r){if(typeof r!="object"||r===null)return"object expected";if(r.tensorType!=null&&r.hasOwnProperty("tensorType")){var i=o.onnx.TypeProto.Tensor.verify(r.tensorType);if(i)return"tensorType."+i}return r.denotation!=null&&r.hasOwnProperty("denotation")&&!c.isString(r.denotation)?"denotation: string expected":null},t.fromObject=function(r){if(r instanceof o.onnx.TypeProto)return r;var i=new o.onnx.TypeProto;if(r.tensorType!=null){if(typeof r.tensorType!="object")throw TypeError(".onnx.TypeProto.tensorType: object expected");i.tensorType=o.onnx.TypeProto.Tensor.fromObject(r.tensorType)}return r.denotation!=null&&(i.denotation=String(r.denotation)),i},t.toObject=function(r,i){i||(i={});var d={};return i.defaults&&(d.denotation=""),r.tensorType!=null&&r.hasOwnProperty("tensorType")&&(d.tensorType=o.onnx.TypeProto.Tensor.toObject(r.tensorType,i),i.oneofs&&(d.value="tensorType")),r.denotation!=null&&r.hasOwnProperty("denotation")&&(d.denotation=r.denotation),d},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t.Tensor=function(){function r(i){if(i)for(var d=Object.keys(i),g=0;g>>3){case 1:m.elemType=i.int32();break;case 2:m.shape=o.onnx.TensorShapeProto.decode(i,i.uint32());break;default:i.skipType(7&b)}}return m},r.decodeDelimited=function(i){return i instanceof h||(i=new h(i)),this.decode(i,i.uint32())},r.verify=function(i){if(typeof i!="object"||i===null)return"object expected";if(i.elemType!=null&&i.hasOwnProperty("elemType")&&!c.isInteger(i.elemType))return"elemType: integer expected";if(i.shape!=null&&i.hasOwnProperty("shape")){var d=o.onnx.TensorShapeProto.verify(i.shape);if(d)return"shape."+d}return null},r.fromObject=function(i){if(i instanceof o.onnx.TypeProto.Tensor)return i;var d=new o.onnx.TypeProto.Tensor;if(i.elemType!=null&&(d.elemType=0|i.elemType),i.shape!=null){if(typeof i.shape!="object")throw TypeError(".onnx.TypeProto.Tensor.shape: object expected");d.shape=o.onnx.TensorShapeProto.fromObject(i.shape)}return d},r.toObject=function(i,d){d||(d={});var g={};return d.defaults&&(g.elemType=0,g.shape=null),i.elemType!=null&&i.hasOwnProperty("elemType")&&(g.elemType=i.elemType),i.shape!=null&&i.hasOwnProperty("shape")&&(g.shape=o.onnx.TensorShapeProto.toObject(i.shape,d)),g},r.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},r}(),t}(),p.OperatorSetIdProto=function(){function t(e){if(e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.domain=e.string();break;case 2:d.version=e.int64();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){return typeof e!="object"||e===null?"object expected":e.domain!=null&&e.hasOwnProperty("domain")&&!c.isString(e.domain)?"domain: string expected":e.version!=null&&e.hasOwnProperty("version")&&!(c.isInteger(e.version)||e.version&&c.isInteger(e.version.low)&&c.isInteger(e.version.high))?"version: integer|Long expected":null},t.fromObject=function(e){if(e instanceof o.onnx.OperatorSetIdProto)return e;var r=new o.onnx.OperatorSetIdProto;return e.domain!=null&&(r.domain=String(e.domain)),e.version!=null&&(c.Long?(r.version=c.Long.fromValue(e.version)).unsigned=!1:typeof e.version=="string"?r.version=parseInt(e.version,10):typeof e.version=="number"?r.version=e.version:typeof e.version=="object"&&(r.version=new c.LongBits(e.version.low>>>0,e.version.high>>>0).toNumber())),r},t.toObject=function(e,r){r||(r={});var i={};if(r.defaults)if(i.domain="",c.Long){var d=new c.Long(0,0,!1);i.version=r.longs===String?d.toString():r.longs===Number?d.toNumber():d}else i.version=r.longs===String?"0":0;return e.domain!=null&&e.hasOwnProperty("domain")&&(i.domain=e.domain),e.version!=null&&e.hasOwnProperty("version")&&(typeof e.version=="number"?i.version=r.longs===String?String(e.version):e.version:i.version=r.longs===String?c.Long.prototype.toString.call(e.version):r.longs===Number?new c.LongBits(e.version.low>>>0,e.version.high>>>0).toNumber():e.version),i},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t}(),p),_.exports=o},2100:(_,n,a)=>{_.exports=a(9482)},9482:(_,n,a)=>{var u=n;function l(){u.util._configure(),u.Writer._configure(u.BufferWriter),u.Reader._configure(u.BufferReader)}u.build="minimal",u.Writer=a(1173),u.BufferWriter=a(3155),u.Reader=a(1408),u.BufferReader=a(593),u.util=a(9693),u.rpc=a(5994),u.roots=a(5054),u.configure=l,l()},1408:(_,n,a)=>{_.exports=f;var u,l=a(9693),p=l.LongBits,s=l.utf8;function h(d,g){return RangeError("index out of range: "+d.pos+" + "+(g||1)+" > "+d.len)}function f(d){this.buf=d,this.pos=0,this.len=d.length}var c,o=typeof Uint8Array<"u"?function(d){if(d instanceof Uint8Array||Array.isArray(d))return new f(d);throw Error("illegal buffer")}:function(d){if(Array.isArray(d))return new f(d);throw Error("illegal buffer")},t=function(){return l.Buffer?function(d){return(f.create=function(g){return l.Buffer.isBuffer(g)?new u(g):o(g)})(d)}:o};function e(){var d=new p(0,0),g=0;if(!(this.len-this.pos>4)){for(;g<3;++g){if(this.pos>=this.len)throw h(this);if(d.lo=(d.lo|(127&this.buf[this.pos])<<7*g)>>>0,this.buf[this.pos++]<128)return d}return d.lo=(d.lo|(127&this.buf[this.pos++])<<7*g)>>>0,d}for(;g<4;++g)if(d.lo=(d.lo|(127&this.buf[this.pos])<<7*g)>>>0,this.buf[this.pos++]<128)return d;if(d.lo=(d.lo|(127&this.buf[this.pos])<<28)>>>0,d.hi=(d.hi|(127&this.buf[this.pos])>>4)>>>0,this.buf[this.pos++]<128)return d;if(g=0,this.len-this.pos>4){for(;g<5;++g)if(d.hi=(d.hi|(127&this.buf[this.pos])<<7*g+3)>>>0,this.buf[this.pos++]<128)return d}else for(;g<5;++g){if(this.pos>=this.len)throw h(this);if(d.hi=(d.hi|(127&this.buf[this.pos])<<7*g+3)>>>0,this.buf[this.pos++]<128)return d}throw Error("invalid varint encoding")}function r(d,g){return(d[g-4]|d[g-3]<<8|d[g-2]<<16|d[g-1]<<24)>>>0}function i(){if(this.pos+8>this.len)throw h(this,8);return new p(r(this.buf,this.pos+=4),r(this.buf,this.pos+=4))}f.create=t(),f.prototype._slice=l.Array.prototype.subarray||l.Array.prototype.slice,f.prototype.uint32=(c=4294967295,function(){if(c=(127&this.buf[this.pos])>>>0,this.buf[this.pos++]<128||(c=(c|(127&this.buf[this.pos])<<7)>>>0,this.buf[this.pos++]<128)||(c=(c|(127&this.buf[this.pos])<<14)>>>0,this.buf[this.pos++]<128)||(c=(c|(127&this.buf[this.pos])<<21)>>>0,this.buf[this.pos++]<128)||(c=(c|(15&this.buf[this.pos])<<28)>>>0,this.buf[this.pos++]<128))return c;if((this.pos+=5)>this.len)throw this.pos=this.len,h(this,10);return c}),f.prototype.int32=function(){return 0|this.uint32()},f.prototype.sint32=function(){var d=this.uint32();return d>>>1^-(1&d)|0},f.prototype.bool=function(){return this.uint32()!==0},f.prototype.fixed32=function(){if(this.pos+4>this.len)throw h(this,4);return r(this.buf,this.pos+=4)},f.prototype.sfixed32=function(){if(this.pos+4>this.len)throw h(this,4);return 0|r(this.buf,this.pos+=4)},f.prototype.float=function(){if(this.pos+4>this.len)throw h(this,4);var d=l.float.readFloatLE(this.buf,this.pos);return this.pos+=4,d},f.prototype.double=function(){if(this.pos+8>this.len)throw h(this,4);var d=l.float.readDoubleLE(this.buf,this.pos);return this.pos+=8,d},f.prototype.bytes=function(){var d=this.uint32(),g=this.pos,m=this.pos+d;if(m>this.len)throw h(this,d);return this.pos+=d,Array.isArray(this.buf)?this.buf.slice(g,m):g===m?new this.buf.constructor(0):this._slice.call(this.buf,g,m)},f.prototype.string=function(){var d=this.bytes();return s.read(d,0,d.length)},f.prototype.skip=function(d){if(typeof d=="number"){if(this.pos+d>this.len)throw h(this,d);this.pos+=d}else do if(this.pos>=this.len)throw h(this);while(128&this.buf[this.pos++]);return this},f.prototype.skipType=function(d){switch(d){case 0:this.skip();break;case 1:this.skip(8);break;case 2:this.skip(this.uint32());break;case 3:for(;(d=7&this.uint32())!=4;)this.skipType(d);break;case 5:this.skip(4);break;default:throw Error("invalid wire type "+d+" at offset "+this.pos)}return this},f._configure=function(d){u=d,f.create=t(),u._configure();var g=l.Long?"toLong":"toNumber";l.merge(f.prototype,{int64:function(){return e.call(this)[g](!1)},uint64:function(){return e.call(this)[g](!0)},sint64:function(){return e.call(this).zzDecode()[g](!1)},fixed64:function(){return i.call(this)[g](!0)},sfixed64:function(){return i.call(this)[g](!1)}})}},593:(_,n,a)=>{_.exports=p;var u=a(1408);(p.prototype=Object.create(u.prototype)).constructor=p;var l=a(9693);function p(s){u.call(this,s)}p._configure=function(){l.Buffer&&(p.prototype._slice=l.Buffer.prototype.slice)},p.prototype.string=function(){var s=this.uint32();return this.buf.utf8Slice?this.buf.utf8Slice(this.pos,this.pos=Math.min(this.pos+s,this.len)):this.buf.toString("utf-8",this.pos,this.pos=Math.min(this.pos+s,this.len))},p._configure()},5054:_=>{_.exports={}},5994:(_,n,a)=>{n.Service=a(7948)},7948:(_,n,a)=>{_.exports=l;var u=a(9693);function l(p,s,h){if(typeof p!="function")throw TypeError("rpcImpl must be a function");u.EventEmitter.call(this),this.rpcImpl=p,this.requestDelimited=!!s,this.responseDelimited=!!h}(l.prototype=Object.create(u.EventEmitter.prototype)).constructor=l,l.prototype.rpcCall=function p(s,h,f,c,o){if(!c)throw TypeError("request must be specified");var t=this;if(!o)return u.asPromise(p,t,s,h,f,c);if(t.rpcImpl)try{return t.rpcImpl(s,h[t.requestDelimited?"encodeDelimited":"encode"](c).finish(),function(e,r){if(e)return t.emit("error",e,s),o(e);if(r!==null){if(!(r instanceof f))try{r=f[t.responseDelimited?"decodeDelimited":"decode"](r)}catch(i){return t.emit("error",i,s),o(i)}return t.emit("data",r,s),o(null,r)}t.end(!0)})}catch(e){return t.emit("error",e,s),void setTimeout(function(){o(e)},0)}else setTimeout(function(){o(Error("already ended"))},0)},l.prototype.end=function(p){return this.rpcImpl&&(p||this.rpcImpl(null,null,null),this.rpcImpl=null,this.emit("end").off()),this}},1945:(_,n,a)=>{_.exports=l;var u=a(9693);function l(f,c){this.lo=f>>>0,this.hi=c>>>0}var p=l.zero=new l(0,0);p.toNumber=function(){return 0},p.zzEncode=p.zzDecode=function(){return this},p.length=function(){return 1};var s=l.zeroHash="\0\0\0\0\0\0\0\0";l.fromNumber=function(f){if(f===0)return p;var c=f<0;c&&(f=-f);var o=f>>>0,t=(f-o)/4294967296>>>0;return c&&(t=~t>>>0,o=~o>>>0,++o>4294967295&&(o=0,++t>4294967295&&(t=0))),new l(o,t)},l.from=function(f){if(typeof f=="number")return l.fromNumber(f);if(u.isString(f)){if(!u.Long)return l.fromNumber(parseInt(f,10));f=u.Long.fromString(f)}return f.low||f.high?new l(f.low>>>0,f.high>>>0):p},l.prototype.toNumber=function(f){if(!f&&this.hi>>>31){var c=1+~this.lo>>>0,o=~this.hi>>>0;return c||(o=o+1>>>0),-(c+4294967296*o)}return this.lo+4294967296*this.hi},l.prototype.toLong=function(f){return u.Long?new u.Long(0|this.lo,0|this.hi,!!f):{low:0|this.lo,high:0|this.hi,unsigned:!!f}};var h=String.prototype.charCodeAt;l.fromHash=function(f){return f===s?p:new l((h.call(f,0)|h.call(f,1)<<8|h.call(f,2)<<16|h.call(f,3)<<24)>>>0,(h.call(f,4)|h.call(f,5)<<8|h.call(f,6)<<16|h.call(f,7)<<24)>>>0)},l.prototype.toHash=function(){return String.fromCharCode(255&this.lo,this.lo>>>8&255,this.lo>>>16&255,this.lo>>>24,255&this.hi,this.hi>>>8&255,this.hi>>>16&255,this.hi>>>24)},l.prototype.zzEncode=function(){var f=this.hi>>31;return this.hi=((this.hi<<1|this.lo>>>31)^f)>>>0,this.lo=(this.lo<<1^f)>>>0,this},l.prototype.zzDecode=function(){var f=-(1&this.lo);return this.lo=((this.lo>>>1|this.hi<<31)^f)>>>0,this.hi=(this.hi>>>1^f)>>>0,this},l.prototype.length=function(){var f=this.lo,c=(this.lo>>>28|this.hi<<4)>>>0,o=this.hi>>>24;return o===0?c===0?f<16384?f<128?1:2:f<2097152?3:4:c<16384?c<128?5:6:c<2097152?7:8:o<128?9:10}},9693:function(_,n,a){var u=n;function l(s,h,f){for(var c=Object.keys(h),o=0;o0)},u.Buffer=function(){try{var s=u.inquire("buffer").Buffer;return s.prototype.utf8Write?s:null}catch{return null}}(),u._Buffer_from=null,u._Buffer_allocUnsafe=null,u.newBuffer=function(s){return typeof s=="number"?u.Buffer?u._Buffer_allocUnsafe(s):new u.Array(s):u.Buffer?u._Buffer_from(s):typeof Uint8Array>"u"?s:new Uint8Array(s)},u.Array=typeof Uint8Array<"u"?Uint8Array:Array,u.Long=u.global.dcodeIO&&u.global.dcodeIO.Long||u.global.Long||u.inquire("long"),u.key2Re=/^true|false|0|1$/,u.key32Re=/^-?(?:0|[1-9][0-9]*)$/,u.key64Re=/^(?:[\\x00-\\xff]{8}|-?(?:0|[1-9][0-9]*))$/,u.longToHash=function(s){return s?u.LongBits.from(s).toHash():u.LongBits.zeroHash},u.longFromHash=function(s,h){var f=u.LongBits.fromHash(s);return u.Long?u.Long.fromBits(f.lo,f.hi,h):f.toNumber(!!h)},u.merge=l,u.lcFirst=function(s){return s.charAt(0).toLowerCase()+s.substring(1)},u.newError=p,u.ProtocolError=p("ProtocolError"),u.oneOfGetter=function(s){for(var h={},f=0;f-1;--o)if(h[c[o]]===1&&this[c[o]]!==void 0&&this[c[o]]!==null)return c[o]}},u.oneOfSetter=function(s){return function(h){for(var f=0;f{_.exports=t;var u,l=a(9693),p=l.LongBits,s=l.base64,h=l.utf8;function f(b,y,w){this.fn=b,this.len=y,this.next=void 0,this.val=w}function c(){}function o(b){this.head=b.head,this.tail=b.tail,this.len=b.len,this.next=b.states}function t(){this.len=0,this.head=new f(c,0,0),this.tail=this.head,this.states=null}var e=function(){return l.Buffer?function(){return(t.create=function(){return new u})()}:function(){return new t}};function r(b,y,w){y[w]=255&b}function i(b,y){this.len=b,this.next=void 0,this.val=y}function d(b,y,w){for(;b.hi;)y[w++]=127&b.lo|128,b.lo=(b.lo>>>7|b.hi<<25)>>>0,b.hi>>>=7;for(;b.lo>127;)y[w++]=127&b.lo|128,b.lo=b.lo>>>7;y[w++]=b.lo}function g(b,y,w){y[w]=255&b,y[w+1]=b>>>8&255,y[w+2]=b>>>16&255,y[w+3]=b>>>24}t.create=e(),t.alloc=function(b){return new l.Array(b)},l.Array!==Array&&(t.alloc=l.pool(t.alloc,l.Array.prototype.subarray)),t.prototype._push=function(b,y,w){return this.tail=this.tail.next=new f(b,y,w),this.len+=y,this},i.prototype=Object.create(f.prototype),i.prototype.fn=function(b,y,w){for(;b>127;)y[w++]=127&b|128,b>>>=7;y[w]=b},t.prototype.uint32=function(b){return this.len+=(this.tail=this.tail.next=new i((b>>>=0)<128?1:b<16384?2:b<2097152?3:b<268435456?4:5,b)).len,this},t.prototype.int32=function(b){return b<0?this._push(d,10,p.fromNumber(b)):this.uint32(b)},t.prototype.sint32=function(b){return this.uint32((b<<1^b>>31)>>>0)},t.prototype.uint64=function(b){var y=p.from(b);return this._push(d,y.length(),y)},t.prototype.int64=t.prototype.uint64,t.prototype.sint64=function(b){var y=p.from(b).zzEncode();return this._push(d,y.length(),y)},t.prototype.bool=function(b){return this._push(r,1,b?1:0)},t.prototype.fixed32=function(b){return this._push(g,4,b>>>0)},t.prototype.sfixed32=t.prototype.fixed32,t.prototype.fixed64=function(b){var y=p.from(b);return this._push(g,4,y.lo)._push(g,4,y.hi)},t.prototype.sfixed64=t.prototype.fixed64,t.prototype.float=function(b){return this._push(l.float.writeFloatLE,4,b)},t.prototype.double=function(b){return this._push(l.float.writeDoubleLE,8,b)};var m=l.Array.prototype.set?function(b,y,w){y.set(b,w)}:function(b,y,w){for(var v=0;v>>0;if(!y)return this._push(r,1,0);if(l.isString(b)){var w=t.alloc(y=s.length(b));s.decode(b,w,0),b=w}return this.uint32(y)._push(m,y,b)},t.prototype.string=function(b){var y=h.length(b);return y?this.uint32(y)._push(h.write,y,b):this._push(r,1,0)},t.prototype.fork=function(){return this.states=new o(this),this.head=this.tail=new f(c,0,0),this.len=0,this},t.prototype.reset=function(){return this.states?(this.head=this.states.head,this.tail=this.states.tail,this.len=this.states.len,this.states=this.states.next):(this.head=this.tail=new f(c,0,0),this.len=0),this},t.prototype.ldelim=function(){var b=this.head,y=this.tail,w=this.len;return this.reset().uint32(w),w&&(this.tail.next=b.next,this.tail=y,this.len+=w),this},t.prototype.finish=function(){for(var b=this.head.next,y=this.constructor.alloc(this.len),w=0;b;)b.fn(b.val,y,w),w+=b.len,b=b.next;return y},t._configure=function(b){u=b,t.create=e(),u._configure()}},3155:(_,n,a)=>{_.exports=p;var u=a(1173);(p.prototype=Object.create(u.prototype)).constructor=p;var l=a(9693);function p(){u.call(this)}function s(h,f,c){h.length<40?l.utf8.write(h,f,c):f.utf8Write?f.utf8Write(h,c):f.write(h,c)}p._configure=function(){p.alloc=l._Buffer_allocUnsafe,p.writeBytesBuffer=l.Buffer&&l.Buffer.prototype instanceof Uint8Array&&l.Buffer.prototype.set.name==="set"?function(h,f,c){f.set(h,c)}:function(h,f,c){if(h.copy)h.copy(f,c,0,h.length);else for(var o=0;o>>0;return this.uint32(f),f&&this._push(p.writeBytesBuffer,f,h),this},p.prototype.string=function(h){var f=l.Buffer.byteLength(h);return this.uint32(f),f&&this._push(s,f,h),this},p._configure()},7714:(_,n,a)=>{n.R=void 0;const u=a(6919),l=a(7448);n.R=new class{async init(){}async createSessionHandler(p,s){const h=new u.Session(s);return await h.loadModel(p),new l.OnnxjsSessionHandler(h)}}},4200:(_,n,a)=>{n.c8=n.rX=void 0;const u=a(1670),l=a(5381),p=a(2157),s=a(2306);n.rX=()=>{if((typeof u.env.wasm.initTimeout!="number"||u.env.wasm.initTimeout<0)&&(u.env.wasm.initTimeout=0),typeof u.env.wasm.simd!="boolean"&&(u.env.wasm.simd=!0),typeof u.env.wasm.proxy!="boolean"&&(u.env.wasm.proxy=!1),typeof u.env.wasm.numThreads!="number"||!Number.isInteger(u.env.wasm.numThreads)||u.env.wasm.numThreads<=0){const h=typeof navigator>"u"?(0,l.cpus)().length:navigator.hardwareConcurrency;u.env.wasm.numThreads=Math.min(4,Math.ceil((h||1)/2))}},n.c8=new class{async init(){(0,n.rX)(),await(0,p.initWasm)()}async createSessionHandler(h,f){const c=new s.OnnxruntimeWebAssemblySessionHandler;return await c.loadModel(h,f),Promise.resolve(c)}}},6018:function(_,n,a){var u=this&&this.__createBinding||(Object.create?function(s,h,f,c){c===void 0&&(c=f);var o=Object.getOwnPropertyDescriptor(h,f);o&&!("get"in o?!h.__esModule:o.writable||o.configurable)||(o={enumerable:!0,get:function(){return h[f]}}),Object.defineProperty(s,c,o)}:function(s,h,f,c){c===void 0&&(c=f),s[c]=h[f]}),l=this&&this.__exportStar||function(s,h){for(var f in s)f==="default"||Object.prototype.hasOwnProperty.call(h,f)||u(h,s,f)};Object.defineProperty(n,"__esModule",{value:!0}),l(a(1670),n);const p=a(1670);{const s=a(7714).R;(0,p.registerBackend)("webgl",s,-10)}{const s=a(4200).c8;(0,p.registerBackend)("cpu",s,10),(0,p.registerBackend)("wasm",s,10),(0,p.registerBackend)("xnnpack",s,9)}},246:(_,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createAttributeWithCacheKey=void 0;class a{constructor(l){Object.assign(this,l)}get cacheKey(){return this._cacheKey||(this._cacheKey=Object.getOwnPropertyNames(this).sort().map(l=>`${this[l]}`).join(";")),this._cacheKey}}n.createAttributeWithCacheKey=u=>new a(u)},7778:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Attribute=void 0;const u=a(1446),l=a(9395),p=a(9162),s=a(2517);var h=l.onnxruntime.experimental.fbs;class f{constructor(o){if(this._attributes=new Map,o!=null){for(const t of o)t instanceof u.onnx.AttributeProto?this._attributes.set(t.name,[f.getValue(t),f.getType(t)]):t instanceof h.Attribute&&this._attributes.set(t.name(),[f.getValue(t),f.getType(t)]);if(this._attributes.sizep.Tensor.fromProto(r));if(o instanceof h.Attribute)return e.map(r=>p.Tensor.fromOrtTensor(r))}if(t===u.onnx.AttributeProto.AttributeType.STRING&&o instanceof u.onnx.AttributeProto){const r=e;return(0,s.decodeUtf8String)(r)}return t===u.onnx.AttributeProto.AttributeType.STRINGS&&o instanceof u.onnx.AttributeProto?e.map(s.decodeUtf8String):e}static getValueNoCheck(o){return o instanceof u.onnx.AttributeProto?this.getValueNoCheckFromOnnxFormat(o):this.getValueNoCheckFromOrtFormat(o)}static getValueNoCheckFromOnnxFormat(o){switch(o.type){case u.onnx.AttributeProto.AttributeType.FLOAT:return o.f;case u.onnx.AttributeProto.AttributeType.INT:return o.i;case u.onnx.AttributeProto.AttributeType.STRING:return o.s;case u.onnx.AttributeProto.AttributeType.TENSOR:return o.t;case u.onnx.AttributeProto.AttributeType.GRAPH:return o.g;case u.onnx.AttributeProto.AttributeType.FLOATS:return o.floats;case u.onnx.AttributeProto.AttributeType.INTS:return o.ints;case u.onnx.AttributeProto.AttributeType.STRINGS:return o.strings;case u.onnx.AttributeProto.AttributeType.TENSORS:return o.tensors;case u.onnx.AttributeProto.AttributeType.GRAPHS:return o.graphs;default:throw new Error(`unsupported attribute type: ${u.onnx.AttributeProto.AttributeType[o.type]}`)}}static getValueNoCheckFromOrtFormat(o){switch(o.type()){case h.AttributeType.FLOAT:return o.f();case h.AttributeType.INT:return o.i();case h.AttributeType.STRING:return o.s();case h.AttributeType.TENSOR:return o.t();case h.AttributeType.GRAPH:return o.g();case h.AttributeType.FLOATS:return o.floatsArray();case h.AttributeType.INTS:{const t=[];for(let e=0;e{Object.defineProperty(n,"__esModule",{value:!0}),n.resolveBackend=n.backend=void 0;const u=a(5038),l=new Map;async function p(s){const h=n.backend;if(h[s]!==void 0&&function(f){const c=f;return"initialize"in c&&typeof c.initialize=="function"&&"createSessionHandler"in c&&typeof c.createSessionHandler=="function"&&"dispose"in c&&typeof c.dispose=="function"}(h[s])){const f=h[s];let c=f.initialize();if(typeof c=="object"&&"then"in c&&(c=await c),c)return l.set(s,f),f}}n.backend={webgl:new u.WebGLBackend},n.resolveBackend=async function s(h){if(!h)return s(["webgl"]);{const f=typeof h=="string"?[h]:h;for(const c of f){const o=l.get(c);if(o)return o;const t=await p(c);if(t)return t}}throw new Error("no available backend to use")}},5038:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLBackend=void 0;const u=a(1670),l=a(6231),p=a(6416),s=a(7305);n.WebGLBackend=class{get contextId(){return u.env.webgl.contextId}set contextId(h){u.env.webgl.contextId=h}get matmulMaxBatchSize(){return u.env.webgl.matmulMaxBatchSize}set matmulMaxBatchSize(h){u.env.webgl.matmulMaxBatchSize=h}get textureCacheMode(){return u.env.webgl.textureCacheMode}set textureCacheMode(h){u.env.webgl.textureCacheMode=h}get pack(){return u.env.webgl.pack}set pack(h){u.env.webgl.pack=h}get async(){return u.env.webgl.async}set async(h){u.env.webgl.async=h}initialize(){try{return this.glContext=(0,s.createWebGLContext)(this.contextId),typeof this.matmulMaxBatchSize!="number"&&(this.matmulMaxBatchSize=16),typeof this.textureCacheMode!="string"&&(this.textureCacheMode="full"),typeof this.pack!="boolean"&&(this.pack=!1),typeof this.async!="boolean"&&(this.async=!1),l.Logger.setWithEnv(u.env),l.Logger.verbose("WebGLBackend",`Created WebGLContext: ${typeof this.glContext} with matmulMaxBatchSize: ${this.matmulMaxBatchSize}; textureCacheMode: ${this.textureCacheMode}; pack: ${this.pack}; async: ${this.async}.`),!0}catch(h){return l.Logger.warning("WebGLBackend",`Unable to initialize WebGLBackend. ${h}`),!1}}createSessionHandler(h){return new p.WebGLSessionHandler(this,h)}dispose(){this.glContext.dispose()}}},5107:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.CoordsGlslLib=void 0;const u=a(2517),l=a(8520),p=a(5060),s=a(7859),h=a(9390);class f extends l.GlslLib{constructor(o){super(o)}getFunctions(){return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},this.offsetToCoords()),this.coordsToOffset()),this.toVec()),this.valueFrom()),this.getCommonUtilFuncs()),this.getInputsSamplingSnippets()),this.getOutputSamplingSnippet())}getCustomTypes(){return{}}offsetToCoords(){return{offsetToCoords:new l.GlslLibRoutine(`
- vec2 offsetToCoords(int offset, int width, int height) {
- int t = offset / width;
- int s = offset - t*width;
- vec2 coords = (vec2(s,t) + vec2(0.5,0.5)) / vec2(width, height);
- return coords;
- }
- `)}}coordsToOffset(){return{coordsToOffset:new l.GlslLibRoutine(`
- int coordsToOffset(vec2 coords, int width, int height) {
- float s = coords.s * float(width);
- float t = coords.t * float(height);
- int offset = int(t) * width + int(s);
- return offset;
- }
- `)}}getOutputSamplingSnippet(){const o=this.context.outputTextureLayout;return o.isPacked?this.getPackedOutputSamplingSnippet(o):this.getUnpackedOutputSamplingSnippet(o)}getPackedOutputSamplingSnippet(o){const t=o.unpackedShape,e=[o.width,o.height],r={},i="getOutputCoords";switch(t.length){case 0:r[i]=this.getOutputScalarCoords();break;case 1:r[i]=this.getOutputPacked1DCoords(t,e);break;case 2:r[i]=this.getOutputPacked2DCoords(t,e);break;case 3:r[i]=this.getOutputPacked3DCoords(t,e);break;default:r[i]=this.getOutputPackedNDCoords(t,e)}const d=`
- void setOutput(vec4 val) {
- ${(0,p.getGlsl)(this.context.glContext.version).output} = val;
- }
- `;return r.floatTextureSetRGBA=new l.GlslLibRoutine(d),r}getUnpackedOutputSamplingSnippet(o){const t=o.unpackedShape,e=[o.width,o.height],r={},i="getOutputCoords";switch(t.length){case 0:r[i]=this.getOutputScalarCoords();break;case 1:r[i]=this.getOutputUnpacked1DCoords(t,e);break;case 2:r[i]=this.getOutputUnpacked2DCoords(t,e);break;case 3:r[i]=this.getOutputUnpacked3DCoords(t,e);break;case 4:r[i]=this.getOutputUnpacked4DCoords(t,e);break;case 5:r[i]=this.getOutputUnpacked5DCoords(t,e);break;case 6:r[i]=this.getOutputUnpacked6DCoords(t,e);break;default:throw new Error(`Unsupported output dimensionality: ${t.length}`)}const d=`
- void setOutput(float val) {
- ${(0,p.getGlsl)(this.context.glContext.version).output} = vec4(val, 0, 0, 0);
- }
- `;return r.floatTextureSetR=new l.GlslLibRoutine(d),r}getOutputScalarCoords(){return new l.GlslLibRoutine(`
- int getOutputCoords() {
- return 0;
- }
- `)}getOutputPacked1DCoords(o,t){const e=t;let r="";return e[0]===1?(r=`
- int getOutputCoords() {
- return 2 * int(TexCoords.y * ${e[1]}.0);
- }
- `,new l.GlslLibRoutine(r)):e[1]===1?(r=`
- int getOutputCoords() {
- return 2 * int(TexCoords.x * ${e[0]}.0);
- }
- `,new l.GlslLibRoutine(r)):(r=`
- int getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${e[0]}, ${e[1]}));
- return 2 * (resTexRC.y * ${e[0]} + resTexRC.x);
- }
- `,new l.GlslLibRoutine(r))}getOutputPacked2DCoords(o,t){let e="";if(u.ArrayUtil.arraysEqual(o,t))return e=`
- ivec2 getOutputCoords() {
- return 2 * ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]}));
- }
- `,new l.GlslLibRoutine(e);const r=t,i=Math.ceil(o[1]/2);return e=`
- ivec2 getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${r[0]}, ${r[1]}));
-
- int index = resTexRC.y * ${r[0]} + resTexRC.x;
-
- // reverse r and c order for packed texture
- int r = imod(index, ${i}) * 2;
- int c = 2 * (index / ${i});
-
- return ivec2(r, c);
- }
- `,new l.GlslLibRoutine(e)}getOutputPacked3DCoords(o,t){const e=[t[0],t[1]],r=Math.ceil(o[2]/2),i=r*Math.ceil(o[1]/2),d=`
- ivec3 getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${e[0]}, ${e[1]}));
- int index = resTexRC.y * ${e[0]} + resTexRC.x;
-
- int b = index / ${i};
- index -= b * ${i};
-
- // reverse r and c order for packed texture
- int r = imod(index, ${r}) * 2;
- int c = 2 * (index / ${r});
-
- return ivec3(b, r, c);
- }
- `;return new l.GlslLibRoutine(d)}getOutputPackedNDCoords(o,t){const e=[t[0],t[1]],r=Math.ceil(o[o.length-1]/2),i=r*Math.ceil(o[o.length-2]/2);let d=i,g="",m="b, r, c";for(let y=2;y=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=`
- ivec3 getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${t[0]}, ${t[1]}));
- int index = resTexRC.y * ${t[0]} + resTexRC.x;
- ${g}
- return ivec3(r, c, d);
- }
- `,new l.GlslLibRoutine(e)}getOutputUnpacked4DCoords(o,t){let e="";const r=o.length;let i=null;r<2&&(i=[]),i=new Array(r-1),i[r-2]=o[r-1];for(let m=r-3;m>=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d","d2"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=`
- ivec4 getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${t[0]}, ${t[1]}));
- int index = resTexRC.y * ${t[0]} + resTexRC.x;
- ${g}
- return ivec4(r, c, d, d2);
- }
- `,new l.GlslLibRoutine(e)}getOutputUnpacked5DCoords(o,t){let e="";const r=o.length;let i=null;r<2&&(i=[]),i=new Array(r-1),i[r-2]=o[r-1];for(let m=r-3;m>=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d","d2","d3"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=`
- ivec5 getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${t[0]}, ${t[1]}));
- int index = resTexRC.y * ${t[0]} + resTexRC.x;
- ${g}
- return ivec5(r, c, d, d2, d3);
- }
- `,new l.GlslLibRoutine(e)}getOutputUnpacked6DCoords(o,t){let e="";const r=o.length;let i=null;r<2&&(i=[]),i=new Array(r-1),i[r-2]=o[r-1];for(let m=r-3;m>=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d","d2","d3","d4"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=`
- ivec6 getOutputCoords() {
- ivec2 resTexRC = ivec2(TexCoords.xy *
- vec2(${t[0]}, ${t[1]}));
- int index = resTexRC.y * ${t[0]} + resTexRC.x;
- ${g}
- return ivec6(r, c, d, d2, d3, d4);
- }
- `,new l.GlslLibRoutine(e)}getCommonUtilFuncs(){const o={};let t="uvFromFlat";o[t]=new l.GlslLibRoutine(`
- vec2 uvFromFlat(int texNumR, int texNumC, int index) {
- int texC = index / texNumR;
- int texR = index - texC * texNumR;
- // TODO: swap texR, texC order in following function so row is corresponding to u and column is corresponding to
- // v.
- return (vec2(texR, texC) + halfCR) / vec2(texNumR, texNumC);
- }
- `),t="packedUVfrom1D",o[t]=new l.GlslLibRoutine(`
- vec2 packedUVfrom1D(int texNumR, int texNumC, int index) {
- int texelIndex = index / 2;
- int texR = texelIndex / texNumC;
- int texC = texelIndex - texR * texNumC;
- return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);
- }
- `),t="packedUVfrom2D",o[t]=new l.GlslLibRoutine(`
- vec2 packedUVfrom2D(int texNumR, int texNumC, int texelsInLogicalRow, int row, int col) {
- int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2);
- int texR = texelIndex / texNumC;
- int texC = texelIndex - texR * texNumC;
- return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);
- }
- `),t="packedUVfrom3D",o[t]=new l.GlslLibRoutine(`
- vec2 packedUVfrom3D(int texNumR, int texNumC,
- int texelsInBatch, int texelsInLogicalRow, int b,
- int row, int col) {
- int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2);
- int texR = index / texNumC;
- int texC = index - texR * texNumC;
- return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);
- }
- `),t="sampleTexture";const e=(0,p.getGlsl)(this.context.glContext.version);return o[t]=new l.GlslLibRoutine(`
- float sampleTexture(sampler2D textureSampler, vec2 uv) {
- return ${e.texture2D}(textureSampler, uv).r;
- }`),o}getInputsSamplingSnippets(){const o={},t=this.context.outputTextureLayout;return this.context.programInfo.inputNames.forEach((e,r)=>{const i=this.context.inputTextureLayouts[r],d=(0,h.generateShaderFuncNameFromInputSamplerName)(e);i.isPacked?o[d]=this.getPackedSamplerFromInput(d,e,i):o[d]=this.getUnpackedSamplerFromInput(d,e,i);const g=(0,h.generateShaderFuncNameFromInputSamplerNameAtOutCoords)(e);i.unpackedShape.length<=t.unpackedShape.length&&(i.isPacked?o[g]=this.getPackedSamplerAtOutputCoords(g,i,t,e):o[g]=this.getUnpackedSamplerAtOutputCoords(g,i,t,e))}),o}getPackedSamplerAtOutputCoords(o,t,e,r){const i=t.unpackedShape,d=e.unpackedShape,g=r,m=(0,h.generateShaderFuncNameFromInputSamplerName)(g),b=i.length,y=d.length,w=u.BroadcastUtil.getBroadcastDims(i,d),v=(0,h.getCoordsDataType)(y),S=y-b;let A;const O=(0,h.getGlChannels)();A=b===0?"":y<2&&w.length>=1?"coords = 0;":w.map($=>`coords.${O[$+S]} = 0;`).join(`
-`);let x="";x=y<2&&b>0?"coords":i.map(($,G)=>`coords.${O[G+S]}`).join(", ");let I="return outputValue;";const N=u.ShapeUtil.size(i)===1,R=u.ShapeUtil.size(d)===1;if(b!==1||N||R){if(N&&!R)I=y===1?`
- return vec4(outputValue.x, outputValue.x, 0., 0.);
- `:`
- return vec4(outputValue.x);
- `;else if(w.length){const $=b-2,G=b-1;w.indexOf($)>-1&&w.indexOf(G)>-1?I="return vec4(outputValue.x);":w.indexOf($)>-1?I="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":w.indexOf(G)>-1&&(I="return vec4(outputValue.xx, outputValue.zz);")}}else I=`
- return vec4(outputValue.xy, outputValue.xy);
- `;const L=`
- vec4 ${o}() {
- ${v} coords = getOutputCoords();
-
- int lastDim = coords.${O[y-1]};
- coords.${O[y-1]} = coords.${O[y-2]};
- coords.${O[y-2]} = lastDim;
-
- ${A}
- vec4 outputValue = ${m}(${x});
- ${I}
- }
- `;return new l.GlslLibRoutine(L,["coordinates.getOutputCoords"])}getUnpackedSamplerAtOutputCoords(o,t,e,r){const i=[e.width,e.height],d=[t.width,t.height],g=t.unpackedShape.length,m=e.unpackedShape.length,b=t.unpackedShape,y=e.unpackedShape,w=(0,h.generateShaderFuncNameFromInputSamplerName)(r);if(g===m&&u.ArrayUtil.arraysEqual(d,i)){const R=`
- float ${o}() {
- return sampleTexture(${r}, TexCoords);
- }
- `;return new l.GlslLibRoutine(R,["coordinates.sampleTexture"])}const v=(0,h.getCoordsDataType)(m),S=u.BroadcastUtil.getBroadcastDims(b,y),A=m-g;let O;const x=(0,h.getGlChannels)();O=g===0?"":m<2&&S.length>=1?"coords = 0;":S.map(R=>`coords.${x[R+A]} = 0;`).join(`
-`);let I="";I=m<2&&g>0?"coords":t.unpackedShape.map((R,L)=>`coords.${x[L+A]}`).join(", ");const N=`
- float ${o}() {
- ${v} coords = getOutputCoords();
- ${O}
- return ${w}(${I});
- }
- `;return new l.GlslLibRoutine(N,["coordinates.getOutputCoords"])}getPackedSamplerFromInput(o,t,e){switch(e.unpackedShape.length){case 0:return this.getPackedSamplerScalar(o,t);case 1:return this.getPackedSampler1D(o,t,e);case 2:return this.getPackedSampler2D(o,t,e);case 3:return this.getPackedSampler3D(o,t,e);default:return this.getPackedSamplerND(o,t,e)}}getUnpackedSamplerFromInput(o,t,e){const r=e.unpackedShape;switch(r.length){case 0:return this.getUnpackedSamplerScalar(o,t,e);case 1:return this.getUnpackedSampler1D(o,t,e);case 2:return this.getUnpackedSampler2D(o,t,e);case 3:return this.getUnpackedSampler3D(o,t,e);case 4:return this.getUnpackedSampler4D(o,t,e);case 5:return this.getUnpackedSampler5D(o,t,e);case 6:return this.getUnpackedSampler6D(o,t,e);default:throw new Error(`Unsupported dimension ${r.length}-D`)}}getPackedSamplerScalar(o,t){const e=`
- vec4 ${o}() {
- return ${(0,p.getGlsl)(this.context.glContext.version).texture2D}(${t}, halfCR);
- }
- `;return new l.GlslLibRoutine(e)}getPackedSampler1D(o,t,e){const r=[e.width,e.height],i=[r[1],r[0]],d=(0,p.getGlsl)(this.context.glContext.version),g=`vec4 ${o}(int index) {
- vec2 uv = packedUVfrom1D(
- ${i[0]}, ${i[1]}, index);
- return ${d.texture2D}(${t}, uv);
- }`;return new l.GlslLibRoutine(g,["coordinates.packedUVfrom1D"])}getPackedSampler2D(o,t,e){const r=e.unpackedShape,i=[e.width,e.height],d=(0,p.getGlsl)(this.context.glContext.version),g=i[0],m=i[1];if(i!=null&&u.ArrayUtil.arraysEqual(r,i)){const v=`vec4 ${o}(int row, int col) {
- vec2 uv = (vec2(col, row) + halfCR) / vec2(${m}.0, ${g}.0);
- return ${d.texture2D}(${t}, uv);
- }`;return new l.GlslLibRoutine(v)}const b=i,y=Math.ceil(r[1]/2),w=`vec4 ${o}(int row, int col) {
- vec2 uv = packedUVfrom2D(${b[1]}, ${b[0]}, ${y}, row, col);
- return ${d.texture2D}(${t}, uv);
- }`;return new l.GlslLibRoutine(w,["coordinates.packedUVfrom2D"])}getPackedSampler3D(o,t,e){const r=e.unpackedShape,i=[e.width,e.height],d=[i[0],i[1]],g=(0,p.getGlsl)(this.context.glContext.version);if(r[0]===1){const v=r.slice(1),S=[1,2],A=(0,h.squeezeInputShape)(r,v),O=["b","row","col"],x=JSON.parse(JSON.stringify(e));x.unpackedShape=A;const I=this.getPackedSamplerFromInput(o,t,x),N=`${I.routineBody}
- vec4 ${o}(int b, int row, int col) {
- return ${o}(${(0,h.getSqueezedParams)(O,S)});
- } `;return new l.GlslLibRoutine(N,I.dependencies)}const m=d[0],b=d[1],y=Math.ceil(r[2]/2),w=`vec4 ${o}(int b, int row, int col) {
- vec2 uv = packedUVfrom3D(
- ${b}, ${m}, ${y*Math.ceil(r[1]/2)}, ${y}, b, row, col);
- return ${g.texture2D}(${t}, uv);}`;return new l.GlslLibRoutine(w,["coordinates.packedUVfrom3D"])}getPackedSamplerND(o,t,e){const r=e.unpackedShape,i=r.length,d=[e.width,e.height],g=(0,p.getGlsl)(this.context.glContext.version),m=[d[0],d[1]],b=m[1],y=m[0],w=Math.ceil(r[i-1]/2);let v=w*Math.ceil(r[i-2]/2),S="int b, int row, int col",A=`b * ${v} + (row / 2) * ${w} + (col / 2)`;for(let x=2;x{const r=this.context.inputTextureLayouts[e],i=(r.unpackedShape.length>0?r.unpackedShape:r.shape).length;let d=`_${t}`;o[d]=new l.GlslLibRoutine(this.getValueFromSingle(t,i,r.width,r.height,!1),[`shapeUtils.indicesToOffset${d}`,"coordinates.offsetToCoords","fragcolor.getColorAsFloat"]),d+="_T",o[d]=new l.GlslLibRoutine(this.getValueFromSingle(t,i,r.width,r.height,!0),[`shapeUtils.indicesToOffset${d}`,"coordinates.offsetToCoords","fragcolor.getColorAsFloat"])}),o}getValueFromSingle(o,t,e,r,i){let d=`_${o}`;return i&&(d+="_T"),`
- float ${d}(int m[${t}]) {
- int offset = indicesToOffset${d}(m);
- vec2 coords = offsetToCoords(offset, ${e}, ${r});
- float value = getColorAsFloat(${(0,p.getGlsl)(this.context.glContext.version).texture2D}(${o}, coords));
- return value;
- }
- `}getPackedValueFrom(o,t,e,r,i){let d=`_${o}_Pack`;return i&&(d+="_T"),`
- vec4 ${d}(int m[${t}]) {
- int offset = indicesToOffset_${o}(m);
- vec2 coords = offsetToCoords(offset, ${e}, ${r});
- return ${(0,p.getGlsl)(this.context.glContext.version).texture2D}(${o}, coords);
- }
- `}}n.CoordsGlslLib=f},8520:(_,n)=>{var a;Object.defineProperty(n,"__esModule",{value:!0}),n.TopologicalSortGlslRoutines=n.GlslLibRoutineNode=n.GlslLibRoutine=n.GlslLib=n.GlslContext=n.FunctionType=void 0,(a=n.FunctionType||(n.FunctionType={}))[a.ValueBased=0]="ValueBased",a[a.Positional=1]="Positional",n.GlslContext=class{constructor(u,l,p,s){this.glContext=u,this.programInfo=l,this.inputTextureLayouts=p,this.outputTextureLayout=s}},n.GlslLib=class{constructor(u){this.context=u}},n.GlslLibRoutine=class{constructor(u,l){this.routineBody=u,this.dependencies=l}},n.GlslLibRoutineNode=class{constructor(u,l,p){this.name=u,this.dependencies=p||[],l&&(this.routineBody=l)}addDependency(u){u&&this.dependencies.push(u)}},n.TopologicalSortGlslRoutines=class{static returnOrderedNodes(u){if(!u||u.length===0)return[];if(u.length===1)return u;const l=new Set,p=new Set,s=new Array;return this.createOrderedNodes(u,l,p,s),s}static createOrderedNodes(u,l,p,s){for(let h=0;h0)for(let f=0;f{Object.defineProperty(n,"__esModule",{value:!0}),n.EncodingGlslLib=void 0;const u=a(8520);class l extends u.GlslLib{constructor(s){super(s)}getFunctions(){return Object.assign(Object.assign({},this.encodeFloat32()),this.decodeFloat32())}getCustomTypes(){return{}}encodeFloat32(){return{encode:new u.GlslLibRoutine(`highp vec4 encode(highp float f) {
- return vec4(f, 0.0, 0.0, 0.0);
- }
- `)}}decodeFloat32(){return{decode:new u.GlslLibRoutine(`highp float decode(highp vec4 rgba) {
- return rgba.r;
- }
- `)}}encodeUint8(){const s=l.isLittleEndian()?"rgba.rgba=rgba.abgr;":"";return{encode:new u.GlslLibRoutine(`
- highp vec4 encode(highp float f) {
- highp float F = abs(f);
- highp float Sign = step(0.0,-f);
- highp float Exponent = floor(log2(F));
- highp float Mantissa = (exp2(- Exponent) * F);
- Exponent = floor(log2(F) + 127.0) + floor(log2(Mantissa));
- highp vec4 rgba;
- rgba[0] = 128.0 * Sign + floor(Exponent*exp2(-1.0));
- rgba[1] = 128.0 * mod(Exponent,2.0) + mod(floor(Mantissa*128.0),128.0);
- rgba[2] = floor(mod(floor(Mantissa*exp2(23.0 -8.0)),exp2(8.0)));
- rgba[3] = floor(exp2(23.0)*mod(Mantissa,exp2(-15.0)));
- ${s}
- rgba = rgba / 255.0; // values need to be normalized to [0,1]
- return rgba;
- }
- `)}}decodeUint8(){const s=l.isLittleEndian()?"rgba.rgba=rgba.abgr;":"";return{decode:new u.GlslLibRoutine(`
- highp float decode(highp vec4 rgba) {
- rgba = rgba * 255.0; // values need to be de-normalized from [0,1] to [0,255]
- ${s}
- highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
- highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
- highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
- highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
- return Result;
- }
- `)}}static isLittleEndian(){const s=new ArrayBuffer(4),h=new Uint32Array(s),f=new Uint8Array(s);if(h[0]=3735928559,f[0]===239)return!0;if(f[0]===222)return!1;throw new Error("unknown endianness")}}n.EncodingGlslLib=l},9894:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.FragColorGlslLib=void 0;const u=a(8520),l=a(5060);class p extends u.GlslLib{constructor(h){super(h)}getFunctions(){return Object.assign(Object.assign({},this.setFragColor()),this.getColorAsFloat())}getCustomTypes(){return{}}setFragColor(){const h=(0,l.getGlsl)(this.context.glContext.version);return{setFragColor:new u.GlslLibRoutine(`
- void setFragColor(float value) {
- ${h.output} = encode(value);
- }
- `,["encoding.encode"])}}getColorAsFloat(){return{getColorAsFloat:new u.GlslLibRoutine(`
- float getColorAsFloat(vec4 color) {
- return decode(color);
- }
- `,["encoding.decode"])}}}n.FragColorGlslLib=p},2848:(_,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.replaceInlines=void 0;const a=/@inline[\s\n\r]+(\w+)[\s\n\r]+([0-9a-zA-Z_]+)\s*\(([^)]*)\)\s*{(([^}]|[\n\r])*)}/gm;n.replaceInlines=function(u){const l={};let p;for(;(p=a.exec(u))!==null;){const s=p[3].split(",").map(h=>{const f=h.trim().split(" ");return f&&f.length===2?{type:f[0],name:f[1]}:null}).filter(h=>h!==null);l[p[2]]={params:s,body:p[4]}}for(const s in l){const h="(\\w+)?\\s+([_0-9a-zA-Z]+)\\s+=\\s+__FUNC__\\((.*)\\)\\s*;".replace("__FUNC__",s),f=new RegExp(h,"gm");for(;(p=f.exec(u))!==null;){const c=p[1],o=p[2],t=p[3].split(","),e=c?`${c} ${o};`:"";let r=l[s].body,i="";l[s].params.forEach((g,m)=>{g&&(i+=`${g.type} ${g.name} = ${t[m]};
-`)}),r=`${i}
- ${r}`,r=r.replace("return",`${o} = `);const d=`
- ${e}
- {
- ${r}
- }
- `;u=u.replace(p[0],d)}}return u.replace(a,"")}},8879:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.GlslPreprocessor=void 0;const u=a(8520),l=a(2848),p=a(5483),s=a(5060);n.GlslPreprocessor=class{constructor(h,f,c,o){this.libs={},this.glslLibRoutineDependencyGraph={},this.context=new u.GlslContext(h,f,c,o),Object.keys(p.glslRegistry).forEach(e=>{const r=new p.glslRegistry[e](this.context);this.libs[e]=r});const t=this.glslLibRoutineDependencyGraph;for(const e in this.libs){const r=this.libs[e].getFunctions();for(const i in r){const d=e+"."+i;let g;t[d]?(g=t[d],g.routineBody=r[i].routineBody):(g=new u.GlslLibRoutineNode(d,r[i].routineBody),t[d]=g);const m=r[i].dependencies;if(m)for(let b=0;b{const o=c.split(".")[1];h.indexOf(o)!==-1&&f.push(this.glslLibRoutineDependencyGraph[c])}),u.TopologicalSortGlslRoutines.returnOrderedNodes(f)}getUniforms(h,f){const c=[];if(h)for(const o of h)c.push(`uniform sampler2D ${o};`);if(f)for(const o of f)c.push(`uniform ${o.type} ${o.name}${o.arrayLength?`[${o.arrayLength}]`:""};`);return c.join(`
-`)}}},5483:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.glslRegistry=void 0;const u=a(5107),l=a(7341),p=a(9894),s=a(2655),h=a(3891);n.glslRegistry={encoding:l.EncodingGlslLib,fragcolor:p.FragColorGlslLib,vec:h.VecGlslLib,shapeUtils:s.ShapeUtilsGlslLib,coordinates:u.CoordsGlslLib}},2655:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.ShapeUtilsGlslLib=void 0;const u=a(8520);class l extends u.GlslLib{constructor(s){super(s)}getFunctions(){return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},this.bcastIndex()),this.bcastMatmulIndex()),this.offsetToIndices()),this.indicesToOffset()),this.incrementIndices())}getCustomTypes(){return{}}bcastIndex(){const s=this.context.outputTextureLayout.shape.length,h={};return this.context.programInfo.inputNames.forEach((f,c)=>{const o=this.context.inputTextureLayouts[c].unpackedShape;if(o.length<=s){const t=o.length,e=s-t,r=`bcastIndices_${f}`;let i="";for(let g=0;g{const o=this.context.inputTextureLayouts[c].shape;if(!(o.length<2||o.length>s)){const t=o.length,e=s-t,r=`bcastMatmulIndices_${f}`;let i="";for(let g=0;g{const c=this.context.inputTextureLayouts[f].shape,o=this.context.inputTextureLayouts[f].strides,t=c.length;let e=`indicesToOffset_${h}`;s[e]=new u.GlslLibRoutine(l.indexToOffsetSingle(e,t,o)),e=`indicesToOffset_${h}_T`,s[e]=new u.GlslLibRoutine(l.indexToOffsetSingle(e,t,o.slice().reverse()))}),s}static indexToOffsetSingle(s,h,f){let c="";for(let o=h-1;o>=0;--o)c+=`
- offset += indices[${o}] * ${f[o]};
- `;return`
- int ${s}(int indices[${h}]) {
- int offset = 0;
- ${c}
- return offset;
- }
- `}offsetToIndices(){const s={};return this.context.programInfo.inputNames.forEach((h,f)=>{const c=this.context.inputTextureLayouts[f].shape,o=this.context.inputTextureLayouts[f].strides,t=c.length;let e=`offsetToIndices_${h}`;s[e]=new u.GlslLibRoutine(l.offsetToIndicesSingle(e,t,o)),e=`offsetToIndices_${h}_T`,s[e]=new u.GlslLibRoutine(l.offsetToIndicesSingle(e,t,o.slice().reverse()))}),s}static offsetToIndicesSingle(s,h,f){const c=[];for(let o=0;o{const c=this.context.inputTextureLayouts[f].shape,o=c.length,t=`incrementIndices_${h}`;let e="";for(let i=0;i= 0; --i) {
- if(i > axis) continue;
- indices[i] += 1;
- if(indices[i] < shape[i]) {
- break;
- }
- indices[i] = 0;
- }
- }
- `;s[t]=new u.GlslLibRoutine(r)}),s}}n.ShapeUtilsGlslLib=l},5060:(_,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.getDefaultFragShaderMain=n.getFragShaderPreamble=n.getVertexShaderSource=n.getGlsl=void 0;const a={version:"",attribute:"attribute",varyingVertex:"varying",varyingFrag:"varying",texture2D:"texture2D",output:"gl_FragColor",outputDeclaration:""},u={version:"#version 300 es",attribute:"in",varyingVertex:"out",varyingFrag:"in",texture2D:"texture",output:"outputColor",outputDeclaration:"out vec4 outputColor;"};function l(p){return p===1?a:u}n.getGlsl=l,n.getVertexShaderSource=function(p){const s=l(p);return`${s.version}
- precision highp float;
- ${s.attribute} vec3 position;
- ${s.attribute} vec2 textureCoord;
-
- ${s.varyingVertex} vec2 TexCoords;
-
- void main()
- {
- gl_Position = vec4(position, 1.0);
- TexCoords = textureCoord;
- }`},n.getFragShaderPreamble=function(p){const s=l(p);return`${s.version}
- precision highp float;
- precision highp int;
- precision highp sampler2D;
- ${s.varyingFrag} vec2 TexCoords;
- ${s.outputDeclaration}
- const vec2 halfCR = vec2(0.5, 0.5);
-
- // Custom vector types to handle higher dimenalities.
- struct ivec5
- {
- int x;
- int y;
- int z;
- int w;
- int u;
- };
-
- struct ivec6
- {
- int x;
- int y;
- int z;
- int w;
- int u;
- int v;
- };
-
- int imod(int x, int y) {
- return x - y * (x / y);
- }
-
- `},n.getDefaultFragShaderMain=function(p,s){return`
- void main() {
- int indices[${s}];
- toVec(TexCoords, indices);
- vec4 result = vec4(process(indices));
- ${l(p).output} = result;
- }
- `}},3891:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.VecGlslLib=void 0;const u=a(8520);class l extends u.GlslLib{constructor(s){super(s)}getCustomTypes(){return{}}getFunctions(){return Object.assign(Object.assign(Object.assign(Object.assign({},this.binaryVecFunctions()),this.copyVec()),this.setVecItem()),this.getVecItem())}binaryVecFunctions(){const s=this.context.outputTextureLayout.shape.length,h={add:"+=",sub:"-=",mul:"*=",div:"/="},f={};for(const c in h){const o=`${c}Vec`;let t="";for(let r=0;r{Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLInferenceHandler=void 0;const u=a(6231),l=a(9162),p=a(2517),s=a(2403),h=a(7019),f=a(8710),c=a(5611),o=a(4057),t=a(2039);n.WebGLInferenceHandler=class{constructor(e){this.session=e,this.packedTextureDataCache=new Map,this.unpackedTextureDataCache=new Map}calculateTextureWidthAndHeight(e,r){return(0,o.calculateTextureWidthAndHeight)(this.session.layoutStrategy,e,r)}executeProgram(e,r){if(r.length{const S=v.map(O=>`${O.unpackedShape.join(",")};${O.width}x${O.height}`).join("_");let A=w.name;return w.cacheHint&&(A+="["+w.cacheHint+"]"),A+=":"+S,A})(e,i);let g=this.session.programManager.getArtifact(d);const m=g?g.programInfo:typeof e.get=="function"?e.get():e,b=(0,o.createTextureLayoutFromTextureType)(this.session.layoutStrategy,m.output.dims,m.output.textureType),y=this.createTextureData(b,m.output.type);return g||(g=this.session.programManager.build(m,i,y),this.session.programManager.setArtifact(d,g)),this.runProgram(g,i,y),y}run(e,r){return this.executeProgram(e,r).tensor}runProgram(e,r,i){for(let d=0;dthis.readTexture(m),async b=>this.readTextureAsync(m),void 0,g),texture:i});return this.setTextureData(m.tensor.dataId,m,e.isPacked),m}getTextureData(e,r=!1){return this.session.isInitializer(e)?this.session.getTextureData(e,r):r?this.packedTextureDataCache.get(e):this.unpackedTextureDataCache.get(e)}setTextureData(e,r,i=!1){this.session.isInitializer(e)?this.session.setTextureData(e,r,i):(i?this.packedTextureDataCache:this.unpackedTextureDataCache).set(e,r)}isTextureLayoutCached(e,r=!1){return!!this.getTextureData(e.dataId,r)}dispose(){this.session.textureManager.clearActiveTextures(),this.packedTextureDataCache.forEach(e=>this.session.textureManager.releaseTexture(e)),this.packedTextureDataCache=new Map,this.unpackedTextureDataCache.forEach(e=>this.session.textureManager.releaseTexture(e)),this.unpackedTextureDataCache=new Map}readTexture(e){return e.isPacked?this.readTexture(this.unpack(e)):this.session.backend.glContext.isFloat32DownloadSupported?this.session.textureManager.readTexture(e,e.tensor.type,e.channels):this.session.textureManager.readUint8TextureAsFloat((0,f.encodeAsUint8)(this,e))}async readTextureAsync(e){return e.isPacked?this.readTextureAsync(this.unpack(e)):this.session.backend.glContext.isFloat32DownloadSupported?this.session.textureManager.readTextureAsync(e,e.tensor.type,e.channels):this.session.textureManager.readUint8TextureAsFloat((0,f.encodeAsUint8)(this,e))}pack(e){return this.executeProgram((0,s.createPackProgramInfoLoader)(this,e.tensor),[e.tensor])}unpack(e){return this.executeProgram((0,c.createUnpackProgramInfoLoader)(this,e.tensor),[e.tensor])}}},1640:function(_,n,a){var u=this&&this.__createBinding||(Object.create?function(X,J,ee,ue){ue===void 0&&(ue=ee);var Se=Object.getOwnPropertyDescriptor(J,ee);Se&&!("get"in Se?!J.__esModule:Se.writable||Se.configurable)||(Se={enumerable:!0,get:function(){return J[ee]}}),Object.defineProperty(X,ue,Se)}:function(X,J,ee,ue){ue===void 0&&(ue=ee),X[ue]=J[ee]}),l=this&&this.__setModuleDefault||(Object.create?function(X,J){Object.defineProperty(X,"default",{enumerable:!0,value:J})}:function(X,J){X.default=J}),p=this&&this.__importStar||function(X){if(X&&X.__esModule)return X;var J={};if(X!=null)for(var ee in X)ee!=="default"&&Object.prototype.hasOwnProperty.call(X,ee)&&u(J,X,ee);return l(J,X),J};Object.defineProperty(n,"__esModule",{value:!0}),n.WEBGL_OP_RESOLVE_RULES=void 0;const s=a(2898),h=p(a(7839)),f=a(4196),c=a(2069),o=a(8138),t=a(9663),e=a(5193),r=a(7992),i=a(1253),d=a(4776),g=a(6572),m=a(3346),b=a(5623),y=a(2870),w=a(2143),v=a(4939),S=a(718),A=a(2268),O=a(8117),x=a(2278),I=a(5524),N=a(5975),R=a(3933),L=a(6558),$=a(5723),G=a(3738),D=p(a(4909)),j=a(8428),Z=a(9793);n.WEBGL_OP_RESOLVE_RULES=[["Abs","","6+",D.abs],["Acos","","7+",D.acos],["Add","","7+",h.add],["And","","7+",h.and],["Asin","","7+",D.asin],["Atan","","7+",D.atan],["AveragePool","","7+",w.averagePool,w.parseAveragePoolAttributes],["BatchNormalization","","7+",s.batchNormalization,s.parseBatchNormalizationAttributes],["Cast","","6+",f.cast,f.parseCastAttributes],["Ceil","","6+",D.ceil],["Clip","","6-10",D.clip,D.parseClipAttributes],["Clip","","11+",D.clipV11],["Concat","","4+",c.concat,c.parseConcatAttributes],["Conv","","1+",o.conv,o.parseConvAttributes],["ConvTranspose","","1+",t.convTranspose,t.parseConvTransposeAttributes],["Cos","","7+",D.cos],["Div","","7+",h.div],["Dropout","","7+",D.identity],["DepthToSpace","","1+",e.depthToSpace,e.parseDepthToSpaceAttributes],["Equal","","7+",h.equal],["Elu","","6+",D.elu,D.parseEluAttributes],["Exp","","6+",D.exp],["Flatten","","1+",r.flatten,r.parseFlattenAttributes],["Floor","","6+",D.floor],["FusedConv","com.microsoft","1+",o.conv,o.parseConvAttributes],["Gather","","1+",i.gather,i.parseGatherAttributes],["Gemm","","7-10",d.gemm,d.parseGemmAttributesV7],["Gemm","","11+",d.gemm,d.parseGemmAttributesV11],["GlobalAveragePool","","1+",w.globalAveragePool,w.parseGlobalAveragePoolAttributes],["GlobalMaxPool","","1+",w.globalMaxPool],["Greater","","7+",h.greater],["Identity","","1+",D.identity],["ImageScaler","","1+",g.imageScaler,g.parseImageScalerAttributes],["InstanceNormalization","","6+",m.instanceNormalization,m.parseInstanceNormalizationAttributes],["LeakyRelu","","6+",D.leakyRelu,D.parseLeakyReluAttributes],["Less","","7+",h.less],["Log","","6+",D.log],["MatMul","","1+",b.matMul,b.parseMatMulAttributes],["MaxPool","","1+",w.maxPool,w.parseMaxPoolAttributes],["Mul","","7+",h.mul],["Neg","","6+",D.neg],["Not","","1+",D.not],["Or","","7+",h.or],["Pad","","2-10",y.padV2,y.parsePadAttributesV2],["Pad","","11+",y.padV11,y.parsePadAttributesV11],["Pow","","7+",h.pow],["PRelu","","7+",h.pRelu],["ReduceLogSum","","1+",v.reduceLogSum,v.parseReduceAttributes],["ReduceMax","","1+",v.reduceMax,v.parseReduceAttributes],["ReduceMean","","1+",v.reduceMean,v.parseReduceAttributes],["ReduceMin","","1+",v.reduceMin,v.parseReduceAttributes],["ReduceProd","","1+",v.reduceProd,v.parseReduceAttributes],["ReduceSum","","1-12",v.reduceSum,v.parseReduceAttributes],["ReduceSumSquare","","1+",v.reduceLogSumSquare,v.parseReduceAttributes],["Relu","","6+",D.relu],["Reshape","","5+",S.reshape],["Resize","","10",A.resize,A.parseResizeAttributesV10],["Resize","","11+",A.resize,A.parseResizeAttributesV11],["Shape","","1+",O.shape],["Sigmoid","","6+",D.sigmoid],["Sin","","7+",D.sin],["Slice","","10+",x.sliceV10],["Slice","","1-9",x.slice,x.parseSliceAttributes],["Softmax","","1-12",I.softmax,I.parseSoftmaxAttributes],["Softmax","","13+",I.softmaxV13,I.parseSoftmaxAttributesV13],["Split","","2-12",N.split,N.parseSplitAttributes],["Sqrt","","6+",D.sqrt],["Squeeze","","1-12",R.squeeze,R.parseSqueezeAttributes],["Squeeze","","13+",R.squeezeV13],["Sub","","7+",h.sub],["Sum","","6+",L.sum],["Tan","","7+",D.tan],["Tanh","","6+",D.tanh],["Tile","","6+",$.tile],["Transpose","","1+",G.transpose,G.parseTransposeAttributes],["Upsample","","7-8",Z.upsample,Z.parseUpsampleAttributesV7],["Upsample","","9",Z.upsample,Z.parseUpsampleAttributesV9],["Unsqueeze","","1-12",j.unsqueeze,j.parseUnsqueezeAttributes],["Unsqueeze","","13+",j.unsqueezeV13],["Xor","","7+",h.xor]]},2898:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseBatchNormalizationAttributes=n.batchNormalization=void 0;const u=a(246),l=a(5060),p=a(2039),s={name:"BatchNormalization",inputNames:["A","Scale","B","Mean","Variance"],inputTypes:[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]};n.batchNormalization=(c,o,t)=>(f(o),[c.run(Object.assign(Object.assign({},s),{cacheHint:t.cacheKey,get:()=>h(c,o,t)}),o)]),n.parseBatchNormalizationAttributes=c=>{const o=c.attributes.getFloat("epsilon",1e-5),t=c.attributes.getFloat("momentum",.9),e=c.attributes.getInt("spatial",1);return(0,u.createAttributeWithCacheKey)({epsilon:o,momentum:t,spatial:e})};const h=(c,o,t)=>{const e=(0,l.getGlsl)(c.session.backend.glContext.version),r=o[0].dims.length,[i,d]=c.calculateTextureWidthAndHeight(o[1].dims,p.TextureType.unpacked),g=`
- float process(int[${r}] indices) {
- vec2 position = offsetToCoords(indices[1], ${i}, ${d});
- float scale = getColorAsFloat(${e.texture2D}(Scale, position));
- float mean = getColorAsFloat(${e.texture2D}(Mean, position));
- float variance = getColorAsFloat(${e.texture2D}(Variance, position));
- float b = getColorAsFloat(${e.texture2D}(B, position));
-
- return scale * ( (_A(indices) - mean) / sqrt(variance + float(${t.epsilon})) ) + b;
- }`;return Object.assign(Object.assign({},s),{output:{dims:o[0].dims,type:o[0].type,textureType:p.TextureType.unpacked},shaderSource:g})},f=c=>{if(!c||c.length!==5)throw new Error("BatchNormalization requires 5 inputs.");const o=c[0],t=c[1],e=c[2],r=c[3],i=c[4];if(o.dims.length<3||t.dims.length!==1||e.dims.length!==1||r.dims.length!==1||i.dims.length!==1)throw new Error("invalid input shape.");if(t.dims[0]!==o.dims[1]||e.dims[0]!==o.dims[1]||r.dims[0]!==o.dims[1]||i.dims[0]!==o.dims[1])throw new Error("invalid input shape.");if(o.type!=="float32"&&o.type!=="float64"||t.type!=="float32"&&t.type!=="float64"||e.type!=="float32"&&e.type!=="float64"||r.type!=="float32"&&r.type!=="float64"||i.type!=="float32"&&i.type!=="float64")throw new Error("invalid input tensor types.")}},7839:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.xor=n.sub=n.pRelu=n.pow=n.or=n.mul=n.less=n.greater=n.equal=n.div=n.and=n.add=n.glslPRelu=n.glslPow=n.glslXor=n.glslOr=n.glslAnd=n.glslLess=n.glslGreater=n.glslEqual=n.glslSub=n.glslMul=n.glslDiv=n.glslAdd=void 0;const u=a(2517),l=a(8520),p=a(5060),s=a(2039);function h(){const v="add_";return{body:`
- float ${v}(float a, float b) {
- return a + b;
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return v1 + v2;
- }
- `,name:v,type:l.FunctionType.ValueBased}}function f(){const v="div_";return{body:`
- float ${v}(float a, float b) {
- return a / b;
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return v1 / v2;
- }
- `,name:v,type:l.FunctionType.ValueBased}}function c(){const v="mul_";return{body:`
- float ${v}(float a, float b) {
- return a * b;
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return v1 * v2;
- }
- `,name:v,type:l.FunctionType.ValueBased}}function o(){const v="sub_";return{body:`
- float ${v}(float a, float b) {
- return a - b;
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return v1 - v2;
- }
- `,name:v,type:l.FunctionType.ValueBased}}function t(){const v="equal_";return{body:`
- float ${v}(float a, float b) {
- return float(a == b);
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return vec4(equal(v1, v2));
- }
- `,name:v,type:l.FunctionType.ValueBased}}function e(){const v="greater_";return{body:`
- float ${v}(float a, float b) {
- return float(a > b);
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return vec4( v1.r > v2.r ,
- v1.g > v2.g,
- v1.b > v2.b,
- v1.a > v2.a );
- }
- `,name:v,type:l.FunctionType.ValueBased}}function r(){const v="less_";return{body:`
- float ${v}(float a, float b) {
- return float(a < b);
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return vec4( v1.r < v2.r ,
- v1.g < v2.g,
- v1.b < v2.b,
- v1.a < v2.a );
- }
- `,name:v,type:l.FunctionType.ValueBased}}function i(){const v="and_";return{body:`
- float ${v}(float a, float b) {
- return float( bool(a) && bool(b) );
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- bvec4 b1 = bvec4(v1);
- bvec4 b2 = bvec4(v2);
- return vec4( b1.r && b2.r ,
- b1.g && b2.g,
- b1.b && b2.b,
- b1.a && b2.a );
- }
- `,name:v,type:l.FunctionType.ValueBased}}function d(){const v="or_";return{body:`
- float ${v}(float a, float b) {
- return float( bool(a) || bool(b) );
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- bvec4 b1 = bvec4(v1);
- bvec4 b2 = bvec4(v2);
- return vec4( b1.r || b2.r ,
- b1.g || b2.g,
- b1.b || b2.b,
- b1.a || b2.a );
- }
- `,name:v,type:l.FunctionType.ValueBased}}function g(){const v="xor_";return{body:`
- float ${v}(float a, float b) {
- return float( bool(a) ^^ bool(b) );
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- bvec4 b1 = bvec4(v1);
- bvec4 b2 = bvec4(v2);
- return vec4( b1.r ^^ b2.r ,
- b1.g ^^ b2.g,
- b1.b ^^ b2.b,
- b1.a ^^ b2.a );
- }
- `,name:v,type:l.FunctionType.ValueBased}}function m(){return function(v){const S=`${v}_`;return{body:`
- float ${S}(float a, float b) {
- return ${v}(a, b);
- }
- vec4 ${S}(vec4 v1, vec4 v2) {
- return ${v}(v1, v2);
- }
- `,name:S,type:l.FunctionType.ValueBased}}("pow")}function b(){const v="prelu_";return{body:`
- float ${v}(float a, float b) {
- return a < 0.0 ? a * b: a;
- }
- vec4 ${v}(vec4 v1, vec4 v2) {
- return vec4(
- v1.r < 0.0 ? v1.r * v2.r: v1.r,
- v1.g < 0.0 ? v1.g * v2.g: v1.g,
- v1.b < 0.0 ? v1.b * v2.b: v1.b,
- v1.a < 0.0 ? v1.a * v2.a: v1.a
- );
- }
- `,name:v,type:l.FunctionType.ValueBased}}n.glslAdd=h,n.glslDiv=f,n.glslMul=c,n.glslSub=o,n.glslEqual=t,n.glslGreater=e,n.glslLess=r,n.glslAnd=i,n.glslOr=d,n.glslXor=g,n.glslPow=m,n.glslPRelu=b;const y=(v,S,A,O=S[0].type,x)=>{const I=v.session.pack?s.TextureType.packed:s.TextureType.unpacked;return{name:A.name,inputNames:["A","B"],inputTypes:[I,I],cacheHint:x,get:()=>w(v,S,A,O)}},w=(v,S,A,O=S[0].type)=>{const x=v.session.pack?s.TextureType.packed:s.TextureType.unpacked,I=!u.ShapeUtil.areEqual(S[0].dims,S[1].dims);let N=S[0].dims;const R=v.session.pack;if(I){const G=u.BroadcastUtil.calcShape(S[0].dims,S[1].dims,!1);if(!G)throw new Error("Can't perform binary op on the given tensors");N=G;const D=N.length,j=S[0].dims.length!==0?S[0].dims.length:1,Z=S[1].dims.length!==0?S[1].dims.length:1,X=S[0].dims.length!==0?"bcastIndices_A(indices, aindices);":"aindices[0] = 0;",J=S[1].dims.length!==0?"bcastIndices_B(indices, bindices);":"bindices[0] = 0;",ee=(0,p.getGlsl)(v.session.backend.glContext.version),ue=R?`
- ${A.body}
- void main() {
- vec4 a = getAAtOutCoords();
- vec4 b = getBAtOutCoords();
- vec4 result = ${A.name}(a, b);
- ${ee.output} = result;
- }`:`
- ${A.body}
- float process(int indices[${D}]) {
- int aindices[${j}];
- int bindices[${Z}];
- ${X}
- ${J}
- return ${A.name}(_A(aindices), _B(bindices));
- }`;return{name:A.name,inputNames:["A","B"],inputTypes:[x,x],output:{dims:N,type:O,textureType:x},shaderSource:ue,hasMain:R}}const L=(0,p.getGlsl)(v.session.backend.glContext.version),$=`
- ${A.body}
- void main() {
- vec4 v1 = ${L.texture2D}(A, TexCoords);
- vec4 v2 = ${L.texture2D}(B, TexCoords);
- vec4 result = ${A.name}(v1, v2);
- ${L.output} = result;
- }
- `;return{name:A.name,inputNames:["A","B"],inputTypes:[x,x],output:{dims:S[0].dims,type:O,textureType:x},shaderSource:$,hasMain:!0}};n.add=(v,S)=>[v.run(y(v,S,h()),S)],n.and=(v,S)=>[v.run(y(v,S,i(),"bool"),S)],n.div=(v,S)=>[v.run(y(v,S,f()),S)],n.equal=(v,S)=>[v.run(y(v,S,t(),"bool"),S)],n.greater=(v,S)=>[v.run(y(v,S,e(),"bool"),S)],n.less=(v,S)=>[v.run(y(v,S,r(),"bool"),S)],n.mul=(v,S)=>[v.run(y(v,S,c()),S)],n.or=(v,S)=>[v.run(y(v,S,d(),"bool"),S)],n.pow=(v,S)=>[v.run(y(v,S,m()),S)],n.pRelu=(v,S)=>[v.run(y(v,S,b()),S)],n.sub=(v,S)=>[v.run(y(v,S,o()),S)],n.xor=(v,S)=>[v.run(y(v,S,g(),"bool"),S)]},4196:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseCastAttributes=n.cast=void 0;const u=a(2517);n.cast=(p,s,h)=>(l(s),[p.cast(s[0],h)]),n.parseCastAttributes=p=>u.ProtoUtil.tensorDataTypeFromProto(p.attributes.getInt("to"));const l=p=>{if(!p||p.length!==1)throw new Error("Cast requires 1 input.");if(p[0].type==="string")throw new Error("Invalid input type.")}},1163:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackedConcatProgramInfoLoader=void 0;const u=a(5060),l=a(2039),p=a(9390),s=a(2827);n.createPackedConcatProgramInfoLoader=(f,c,o)=>{const t=(e=c.length,r=o.cacheKey,{name:"Concat (packed)",inputNames:Array.from({length:e},(i,d)=>`X${d}`),inputTypes:Array(e).fill(l.TextureType.packed),cacheHint:r});var e,r;return Object.assign(Object.assign({},t),{get:()=>((i,d,g,m)=>{const b=g[0].dims.slice();if(m>=b.length||m<-1*b.length)throw new Error("axis specified for concat doesn't match input dimensionality");m<0&&(m=b.length+m);const y=b.slice(0);for(let X=1;XX.dims),x=(0,p.getGlChannels)(w),I=new Array(O.length-1);I[0]=O[0][m];for(let X=1;X= ${I[X-1]}) {
- return getChannel(
- getX${X}(${h(x,N,J)}),
- vec2(${h(R,N,J)}));
- }`}const G=I.length,D=I[I.length-1];$+=`
- return getChannel(
- getX${G}(${h(x,N,D)}),
- vec2(${h(R,N,D)}));`;const j=(0,u.getGlsl)(i.session.backend.glContext.version),Z=`
- ${A}
- float getValue(${x.map(X=>"int "+X)}) {
- ${$}
- }
-
- void main() {
- ${S} coords = getOutputCoords();
- int lastDim = coords.${x[w-1]};
- coords.${x[w-1]} = coords.${x[w-2]};
- coords.${x[w-2]} = lastDim;
-
- vec4 result = vec4(getValue(${v}), 0., 0., 0.);
-
- ${v[w-1]} = ${v[w-1]} + 1;
- if (${v[w-1]} < ${y[w-1]}) {
- result.g = getValue(${v});
- }
-
- ${v[w-2]} = ${v[w-2]} + 1;
- if (${v[w-2]} < ${y[w-2]}) {
- result.a = getValue(${v});
- }
-
- ${v[w-1]} = ${v[w-1]} - 1;
- if (${v[w-2]} < ${y[w-2]} &&
- ${v[w-1]} < ${y[w-1]}) {
- result.b = getValue(${v});
- }
- ${j.output} = result;
- }
- `;return Object.assign(Object.assign({},d),{output:{dims:y,type:g[0].type,textureType:l.TextureType.packed},shaderSource:Z,hasMain:!0})})(f,t,c,o.axis)})};const h=(f,c,o)=>{const t=f.indexOf(c);return f.map((e,r)=>r===t?`${e} - ${o}`:e).join()}},2069:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseConcatAttributes=n.concat=void 0;const u=a(246),l=a(2039),p=a(1163);n.concat=(e,r,i)=>(t(r),e.session.pack&&r[0].dims.length>1?[e.run((0,p.createPackedConcatProgramInfoLoader)(e,r,i),r)]:[e.run(s(e,r,i),r)]);const s=(e,r,i)=>{const d=(g=r.length,m=i.cacheKey,{name:"Concat",inputNames:Array.from({length:g},(b,y)=>`X${y}`),inputTypes:Array(g).fill(l.TextureType.unpacked),cacheHint:m});var g,m;return Object.assign(Object.assign({},d),{get:()=>((b,y,w,v)=>{const S=w[0].dims.slice();if(v>=S.length||v<-1*S.length)throw new Error("axis specified for concat doesn't match input dimensionality");v<0&&(v=S.length+v);const A=S.slice(0);for(let L=1;L`int getTextureWhereDataResides(int index) {
- ${e.map((r,i)=>`if(index<${r}) {return ${i};}
-`).join("")}
- }`,f=e=>h(e),c=(e,r)=>{const i=[`float fetchDataFromCorrectTexture(int textureIndex, int indices[${r}]) {`];for(let d=0;d{const r=["int getSizeInConcatAxisValueFromIndex(int index) {"];for(let i=0;i(0,u.createAttributeWithCacheKey)({axis:e.attributes.getInt("axis")});const t=e=>{if(!e||e.length<1)throw new Error("too few inputs");const r=e[0].type,i=e[0].dims.length;if(r==="string")throw new Error("string tensor is not supported yet");for(const d of e){if(d.type!==r)throw new Error("input tensors should be one type");if(d.dims.length!==i)throw new Error("input tensors should have the same shape")}}},4770:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createUnpackedGroupedConvProgramInfoLoader=void 0;const u=a(6231),l=a(5060),p=a(2039),s=a(8138),h=a(2823);n.createUnpackedGroupedConvProgramInfoLoader=(f,c,o)=>{const t=(e=c.length>2,r=o.cacheKey,{name:"GroupedConv",inputNames:e?["X","W","Bias"]:["X","W"],inputTypes:e?[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.unpacked],cacheHint:r});var e,r;return Object.assign(Object.assign({},t),{get:()=>((i,d,g,m)=>{const b=d.length>2?"value += getBias(output_channel);":"",y=d[0].dims.slice(),w=d[1].dims.slice(),v=w[0]/m.group;u.Logger.verbose("GroupedConv",`autpPad:${m.autoPad}, dilations:${m.dilations}, group:${m.group}, kernelShape:${m.kernelShape}, pads:${m.pads}, strides:${m.strides}`);const S=(0,s.calculateOutputShape)(y,w,m.dilations,m.pads,m.strides),A=(0,l.getGlsl)(i.session.backend.glContext.version),{activationFunction:O,applyActivation:x}=(0,h.getActivationSnippet)(m),I=`
- const ivec2 strides = ivec2(${m.strides[0]}, ${m.strides[1]});
- const ivec2 pads = ivec2(${m.pads[0]}, ${m.pads[1]});
- ${O}
- void main() {
- ivec4 coords = getOutputCoords();
- int batch = coords.x;
- int output_channel = coords.y;
- ivec2 xRCCorner = coords.zw * strides - pads;
- int group_id = output_channel / ${v};
-
- float value = 0.0;
- for (int wInChannel = 0; wInChannel < ${w[1]}; wInChannel++) {
- int input_channel = group_id * ${w[1]} + wInChannel;
- for (int wHeight = 0; wHeight < ${w[2]}; wHeight++) {
- int xHeight = xRCCorner.x + wHeight * ${m.dilations[0]};
-
- if (xHeight < 0 || xHeight >= ${y[2]}) {
- continue;
- }
-
- for (int wWidth = 0; wWidth < ${w[3]}; wWidth++) {
- int xWidth = xRCCorner.y + wWidth * ${m.dilations[1]};
- if (xWidth < 0 || xWidth >= ${y[3]}) {
- continue;
- }
-
- float xVal = getX(batch, input_channel, xWidth, xHeight);
- float wVal = getW(output_channel, wInChannel, wWidth, wHeight);
- value += xVal*wVal;
- }
- }
- }
- ${b}
- ${x}
- ${A.output} = vec4(value, .0, .0, .0);
- }
-`;return Object.assign(Object.assign({},g),{output:{dims:S,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:I,hasMain:!0})})(f,c,t,o)})}},1386:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.conv2DPacked=n.conv2DPackedPointwise=void 0;const u=a(8138),l=a(8555),p=a(708);n.conv2DPackedPointwise=(s,h,f)=>{const c=h[0].dims,o=h[1].dims,t=(0,u.calculateOutputShape)(c,o,f.dilations,f.pads,f.strides),e=s.reshapePacked(h[0],[c[1],c[2]*c[3]]),r=s.reshapePacked(h[1],[o[0],o[1]]),i=h.length>2?[r,e,h[2]]:[r,e],d=s.run((0,p.createPackedMatmulProgramInfoLoader)(s,i,f),i);return s.reshapePacked(d,t)},n.conv2DPacked=(s,h,f)=>{const c=h[0].dims,o=h[1].dims,t=(0,u.calculateOutputShape)(c,o,f.dilations,f.pads,f.strides),e=s.run((0,l.createPackedIm2ColProgramInfoLoader)(s,h[0],h[1],t,f),[h[0]]),r=s.reshapePacked(h[1],[o[0],o[1]*o[2]*o[3]]),i=h.length===3?[r,e,h[2]]:[r,e],d=s.run((0,p.createPackedMatmulProgramInfoLoader)(s,i,f),i);return s.reshapePacked(d,t)}},9663:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseConvTransposeAttributes=n.convTranspose=void 0;const u=a(246),l=a(5060),p=a(2039),s=a(2823),h=(r,i,d,g,m,b)=>(r-1)*i+d+(g-1)*m+1-b,f=(r,i,d,g,m)=>{const b=Math.floor(r/2);i==="SAME_UPPER"?(d[g]=b,d[m]=r-b):i==="SAME_LOWER"&&(d[g]=r-b,d[m]=b)};n.convTranspose=(r,i,d)=>(e(i,d),c(r,i,d));const c=(r,i,d)=>{const g=t(d,i);return[o(r,i,g)]},o=(r,i,d)=>r.run(((g,m,b)=>{const y=(w=m.length>2,v=b.cacheKey,{name:"ConvTranspose",inputNames:w?["X","W","B"]:["X","W"],inputTypes:w?[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.unpacked],cacheHint:v});var w,v;return Object.assign(Object.assign({},y),{get:()=>((S,A,O,x)=>{const I=A.length>2?"getB(output_channel)":"0.0",N=A[0].dims,R=A[1].dims,L=R[1],$=R[0]/x.group,G=[A[0].dims[0],A[1].dims[1]*x.group,...x.outputShape],D=(0,l.getGlsl)(S.session.backend.glContext.version),{activationFunction:j,applyActivation:Z}=(0,s.getActivationSnippet)(x),X=`
- const ivec2 strides = ivec2(${x.strides[0]}, ${x.strides[1]});
- const ivec2 pads = ivec2(${x.pads[0]}, ${x.pads[1]});
- ${j}
- void main() {
- ivec4 coords = getOutputCoords();
- int batch = coords.x;
- int output_channel = coords.y;
-
- ivec2 loc = coords.zw + pads;
-
- int group_id = output_channel / ${L};
- int wOutChannel = output_channel - group_id * ${L};
-
- float value = ${I};
- for (int inChannelOffset = 0; inChannelOffset < ${$}; inChannelOffset++) {
- int input_channel = group_id * ${$} + inChannelOffset;
- for (int wWOff = 0; wWOff < ${R[2]}; wWOff++) {
- for (int wHOff = 0; wHOff < ${R[3]}; wHOff++) {
- ivec2 wOff = ivec2(wWOff * ${x.dilations[0]}, wHOff * ${x.dilations[1]});
- ivec2 wLoc = loc - wOff;
- ivec2 wLocIn = wLoc / strides;
- if (
- wLocIn * strides == wLoc &&
- wLocIn.x >= 0 && wLocIn.x < ${N[2]} &&
- wLocIn.y >= 0 && wLocIn.y < ${N[3]}
- ) {
- float xVal = getX(batch, input_channel, wLocIn.y, wLocIn.x);
- float wVal = getW(input_channel, wOutChannel, wHOff, wWOff);
- value += xVal * wVal;
- }
- }
- }
- }
- ${Z}
- ${D.output} = vec4(value, .0, .0, .0);
- }
-`;return Object.assign(Object.assign({},O),{output:{dims:G,type:A[0].type,textureType:p.TextureType.unpacked},shaderSource:X,hasMain:!0})})(g,m,y,b)})})(r,i,d),i),t=(r,i)=>{const d=r.kernelShape.slice();if(r.kernelShape.length===0)for(let y=2;y{const N=y.length-2,R=I.length===0;for(let L=0;L{const i=r.attributes,d=(0,s.parseInternalActivationAttributes)(i),g=i.getString("auto_pad","NOTSET"),m=i.getInts("dilations",[1,1]),b=i.getInt("group",1),y=i.getInts("kernel_shape",[]),w=i.getInts("output_padding",[0,0]),v=i.getInts("output_shape",[]),S=i.getInts("pads",[0,0,0,0]),A=i.getInts("strides",[1,1]);return(0,u.createAttributeWithCacheKey)(Object.assign({autoPad:g,dilations:m,group:b,kernelShape:y,outputPadding:w,outputShape:v,pads:S,strides:A},d))};const e=(r,i)=>{if(!r||r.length!==2&&r.length!==3)throw new Error("Conv requires 2 or 3 inputs");if(r[0].dims.length!==4||r[1].dims.length!==4)throw new Error("currently only support 2-dimensional conv");if(r[0].dims[1]!==r[1].dims[0])throw new Error("FILTER_IN_CHANNEL should be equal to DATA_CHANNEL");const d=r[1].dims[1]*i.group;if(r.length===3&&(r[2].dims.length!==1||r[2].dims[0]!==d))throw new Error("invalid bias");const g=r[0].dims.length-2;if(i.dilations.length!==g)throw new Error(`dilations should be ${g}D`);if(i.strides.length!==g)throw new Error(`strides should be ${g}D`);if(i.pads.length!==2*g)throw new Error(`pads should be ${2*g}D`);if(i.outputPadding.length!==g)throw new Error(`output_padding should be ${g}D`);if(i.kernelShape.length!==0&&i.kernelShape.length!==r[1].dims.length-2)throw new Error("invalid kernel shape");if(i.outputShape.length!==0&&i.outputShape.length!==r[0].dims.length-2)throw new Error("invalid output shape");if(r[0].type!=="float32"||r[1].type!=="float32")throw new Error("ConvTranspose input(X,W) should be float tensor");if(r.length===3&&r[2].type!=="float32")throw new Error("ConvTranspose input(bias) should be float tensor")}},8138:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseConvAttributes=n.conv=n.calculateOutputShape=void 0;const u=a(246),l=a(2517),p=a(4770),s=a(1386),h=a(9828),f=a(2823),c=a(3248),o=a(5623);n.calculateOutputShape=(g,m,b,y,w)=>{const v=g[0],S=g.slice(2),A=S.length,O=m[0],x=m.slice(2).map((N,R)=>N+(N-1)*(b[R]-1)),I=S.map((N,R)=>N+y[R]+y[R+A]).map((N,R)=>Math.floor((N-x[R]+w[R])/w[R]));return[v,O].concat(...I)},n.conv=(g,m,b)=>(d(m,b),t(g,m,b));const t=(g,m,b)=>{const y=i(b,m),w=g.session.pack,v=y.kernelShape[0]===1&&y.kernelShape[1]===1;return y.group>1?[g.run((0,p.createUnpackedGroupedConvProgramInfoLoader)(g,m,y),m)]:v&&w?[e(g,m,y)]:w&&m[0].dims.length===4&&m[0].dims[0]===1&&!v?[(0,s.conv2DPacked)(g,m,y)]:[r(g,m,y)]},e=(g,m,b)=>{const y=m[0].dims,w=m[1].dims,v=(0,n.calculateOutputShape)(y,w,b.dilations,b.pads,b.strides),S=g.reshapeUnpacked(m[0],[y[1],y[2]*y[3]]),A=g.reshapeUnpacked(m[1],[w[0],w[1]]),O=m.length>2?[A,S,m[2]]:[A,S],x=g.run((0,o.createMatmulProgramInfoLoader)(O,b),O);return g.reshapeUnpacked(x,v)},r=(g,m,b)=>{const y=m[0].dims,w=m[1].dims,v=(0,n.calculateOutputShape)(y,w,b.dilations,b.pads,b.strides),S=g.run((0,c.createIm2ColProgramInfoLoader)(g,m[0],m[1],v,b),[m[0]]),A=m.length===3?[S,m[1],m[2]]:[S,m[1]];return g.run((0,h.createDotProductProgramInfoLoader)(g,m,v,b),A)},i=(g,m)=>{const b=g.kernelShape.slice();if(g.kernelShape.length===0)for(let v=2;v{const m=g.attributes,b=(0,f.parseInternalActivationAttributes)(m),y=m.getString("auto_pad","NOTSET"),w=m.getInts("dilations",[1,1]),v=m.getInt("group",1),S=m.getInts("kernel_shape",[]),A=m.getInts("pads",[0,0,0,0]),O=m.getInts("strides",[1,1]);return(0,u.createAttributeWithCacheKey)(Object.assign({autoPad:y,dilations:w,group:v,kernelShape:S,pads:A,strides:O},b))};const d=(g,m)=>{if(!g||g.length!==2&&g.length!==3)throw new Error("Conv requires 2 or 3 inputs");if(g[0].dims.length!==4||g[1].dims.length!==4)throw new Error("currently only support 2-dimensional conv");if(g[0].dims[1]!==g[1].dims[1]*m.group)throw new Error("FILTER_IN_CHANNEL should be equal to DATA_CHANNEL");if(g.length===3&&(g[2].dims.length!==1||g[1].dims[0]!==g[2].dims[0]))throw new Error("invalid bias");const b=g[0].dims.length-2;if(m.dilations.length!==b)throw new Error(`dilations should be ${b}D`);if(m.strides.length!==b)throw new Error(`strides should be ${b}D`);if(m.pads.length!==2*b)throw new Error(`pads should be ${2*b}D`);if(m.kernelShape.length!==0&&m.kernelShape.length!==g[1].dims.length-2)throw new Error("invalid kernel shape");if(g[0].type!=="float32"||g[1].type!=="float32")throw new Error("Conv input(X,W) should be float tensor");if(g.length===3&&g[2].type!=="float32")throw new Error("Conv input(bias) should be float tensor")}},5193:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseDepthToSpaceAttributes=n.depthToSpace=void 0;const u=a(3738);n.depthToSpace=(p,s,h)=>{l(s);const f=h.blocksize,c=f*f,o=h.mode==="DCR"?[0,3,4,1,5,2]:[0,1,4,2,5,3],t=h.mode==="DCR"?[s[0].dims[0],f,f,s[0].dims[1]/c,s[0].dims[2],s[0].dims[3]]:[s[0].dims[0],s[0].dims[1]/c,f,f,s[0].dims[2],s[0].dims[3]],e=p.reshapeUnpacked(s[0],t),r={perm:o,cacheKey:`${o}`},[i]=(0,u.transpose)(p,[e],r),d=[s[0].dims[0],s[0].dims[1]/c,s[0].dims[2]*f,s[0].dims[3]*f];return[p.reshapeUnpacked(i,d)]},n.parseDepthToSpaceAttributes=p=>{const s=p.attributes.getInt("blocksize");if(s<1)throw new Error(`blocksize must be >= 1, but got : ${s} for DepthToSpace`);const h=p.attributes.getString("mode","DCR");if(h!=="DCR"&&h!=="CRD")throw new Error(`unrecognized mode: ${h} for DepthToSpace`);return{mode:h,blocksize:s}};const l=p=>{if(p.length!==1)throw new Error(`DepthToSpace expect 1 inputs, but got ${p.length}`);if(p[0].type==="string"||p[0].dims.length!==4)throw new TypeError("DepthToSpace input should be a 4-D numeric tensor")}},9828:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createDotProductProgramInfoLoader=void 0;const u=a(2517),l=a(5060),p=a(2039),s=a(2823),h=a(3248);n.createDotProductProgramInfoLoader=(f,c,o,t)=>{const e=((r,i)=>({name:"ConvDotProduct",inputNames:r?["Im2Col","K","B"]:["Im2Col","K"],inputTypes:r?[p.TextureType.unpacked,p.TextureType.packedLastDimension,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.packedLastDimension],cacheKey:i.activationCacheKey}))(c.length>2,t);return Object.assign(Object.assign({},e),{get:()=>((r,i,d,g,m)=>{const b=d[0].dims,y=d[1].dims,w=[y[0],Math.ceil(b[1]*y[2]*y[3]/4)],v=(0,h.calculateIm2ColDims)(b,y,g),[S,A]=r.calculateTextureWidthAndHeight(w,p.TextureType.packedLastDimension),O=u.ShapeUtil.computeStrides(v),[x,I]=r.calculateTextureWidthAndHeight(v,p.TextureType.packedLastDimension),N=g.length,R=d.length<3?"0.0":"_B(b)",L=Math.ceil(b[1]*y[2]*y[3]/4),{activationFunction:$,applyActivation:G}=(0,s.getActivationSnippet)(m),D=(0,l.getGlsl)(r.session.backend.glContext.version),j=`
-${$}
-float process(int indices[${N}]) {
- int b[1];
- b[0] = indices[1];
- int im2col[4];
- im2col[0] = indices[0];
- im2col[1] = indices[2];
- im2col[2] = indices[3];
- int im2colOffset = im2col[0] * ${O[0]} + im2col[1] * ${O[1]} + im2col[2] * ${O[2]};
- int kernelOffset = indices[1] * ${w[1]};
- float value = ${R};
- for (int i = 0; i < ${L}; ++i) {
- vec2 im2colCoords = offsetToCoords(im2colOffset, ${x}, ${I});
- vec2 kernelCoords = offsetToCoords(kernelOffset, ${S}, ${A});
- value += dot(${D.texture2D}(Im2Col, im2colCoords), ${D.texture2D}(K, kernelCoords));
- ++im2colOffset;
- ++kernelOffset;
- }
- ${G}
- return value;
-}`;return Object.assign(Object.assign({},i),{output:{dims:g,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:j})})(f,e,c,o,t)})}},7992:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseFlattenAttributes=n.flatten=void 0;const u=a(2517);n.flatten=(p,s,h)=>{l(s,h);const f=u.ShapeUtil.flattenShape(s[0].dims,h);return[p.reshapeUnpacked(s[0],f)]},n.parseFlattenAttributes=p=>p.attributes.getInt("axis",1);const l=(p,s)=>{if(!p||p.length!==1)throw new Error("Flatten requires 1 input.");const h=p[0].dims.length;if(h===0)throw new Error("scalar tensor is not supported.");if(s<-h||s>h)throw new Error("Invalid axis");if(p[0].type==="string")throw new Error("string tensor is not supported.")}},2823:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseInternalActivationAttributes=n.getActivationSnippet=void 0;const u=a(2517),l=a(4909);n.getActivationSnippet=function(p){let s;switch(p.activation){case"Relu":s=(0,l.glslRelu)();break;case"Sigmoid":s=(0,l.glslSigmoid)();break;case"Clip":s=(0,l.glslClip)(p.clipMin,p.clipMax);break;default:return{activationFunction:"",applyActivation:""}}const h=s.name;return{activationFunction:s.body,applyActivation:`value = ${h}_(value);`}},n.parseInternalActivationAttributes=p=>{const s=p.getString("activation","");if(s==="Clip"){const[h,f]=p.getFloats("activation_params",[u.MIN_CLIP,u.MAX_CLIP]);return{activation:s,clipMax:f,clipMin:h,activationCacheKey:`${s}:${h},${f}`}}return{activation:s,activationCacheKey:s}}},1253:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseGatherAttributes=n.gather=void 0;const u=a(246),l=a(782),p=a(2517),s=a(2039);n.gather=(o,t,e)=>(c(t,e.axis),[o.run(f(o,t,e),t)]),n.parseGatherAttributes=o=>(0,u.createAttributeWithCacheKey)({axis:o.attributes.getInt("axis",0)});const h={name:"Gather",inputNames:["A","B"],inputTypes:[s.TextureType.unpacked,s.TextureType.unpacked]},f=(o,t,e)=>{const r=Object.assign(Object.assign({},h),{cacheHint:e.cacheKey});return Object.assign(Object.assign({},r),{get:()=>((i,d,g,m)=>{const b=g[0].dims.slice(),y=g[1].dims.slice(),w=new Array(b.length+y.length-1);m=p.ShapeUtil.normalizeAxis(m,b.length);const v=[];for(let A=0;A{if(!o||o.length!==2)throw new Error("Gather requires 2 inputs.");const e=o[0].dims.length;if(e<1)throw new Error("Invalid input shape.");if(t<-e||t>e-1)throw new Error("Invalid axis.");if(l.NUMBER_TYPES.indexOf(o[0].type)===-1)throw new Error("Invaid input type.");if(o[1].type!=="int32"&&o[1].type!=="int16")throw new Error("Invaid input type.")}},4776:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseGemmAttributesV11=n.parseGemmAttributesV7=n.gemm=void 0;const u=a(246),l=a(2517),p=a(2039);n.gemm=(o,t,e)=>(c(t,e),[o.run(h(t,e),t)]);const s=(o,t)=>{const e=o.attributes.getInt("transA",0)!==0,r=o.attributes.getInt("transB",0)!==0,i=o.attributes.getFloat("alpha",1),d=o.attributes.getFloat("beta",1);return(0,u.createAttributeWithCacheKey)({transA:e,transB:r,alpha:i,beta:d,isOptionalC:t})};n.parseGemmAttributesV7=o=>s(o,!1),n.parseGemmAttributesV11=o=>s(o,!0);const h=(o,t)=>{const e={name:"Gemm",inputNames:o.length===3?["A","B","C"]:["A","B"],inputTypes:o.length===3?[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.unpacked],key:t.cacheKey};return Object.assign(Object.assign({},e),{get:()=>f(e,o,t)})},f=(o,t,e)=>{const r=t[0].dims.slice(),i=t[1].dims.slice(),[d,g]=l.GemmUtil.getShapeOfGemmResult(r,e.transA,i,e.transB,t.length===3?t[2].dims:void 0),m=[d,g];if(!m)throw new Error("Can't use gemm on the given tensors");let b=r[r.length-1],y="";e.transA&&(b=r[0]),e.transA&&e.transB?y="value += _A_T(a) * _B_T(b);":e.transA&&!e.transB?y="value += _A_T(a) * _B(b);":!e.transA&&e.transB?y="value += _A(a) * _B_T(b);":e.transA||e.transB||(y="value += _A(a) * _B(b);");const w=m.length,v=`
- float process(int indices[${w}]) {
- int a[${w}];
- int b[${w}];
- ${t.length===3?`int c[${t[2].dims.length}];`:""}
-
- copyVec(indices, a);
- copyVec(indices, b);
- ${t.length===3?"bcastIndices_C(indices, c);":""}
-
- float value = 0.0;
- for (int k=0; k<${b}; ++k) {
- a[${w-1}] = k;
- b[${w-2}] = k;
- ${y}
- }
-
- value = value * alpha;
- ${t.length===3?"value += beta * _C(c);":""}
- return value;
- }`;return Object.assign(Object.assign({},o),{output:{dims:m,type:t[0].type,textureType:p.TextureType.unpacked},variables:[{name:"alpha",type:"float",data:e.alpha},{name:"beta",type:"float",data:e.beta}],shaderSource:v})},c=(o,t)=>{if(!o)throw new Error("Input is missing");if(t.isOptionalC&&(o.length<2||o.length>3))throw new Error("Invaid input shape.");if(!t.isOptionalC&&o.length!==3)throw new Error("Gemm requires 3 inputs");if(o.length===3&&o[2].dims.length!==1&&o[2].dims.length!==2)throw new Error("Invalid input shape of C");if(o[0].type!=="float32"&&o[0].type!=="float64"||o[1].type!=="float32"&&o[1].type!=="float64"||o.length===3&&o[2].type!=="float32"&&o[2].type!=="float64")throw new Error("Invalid input type.");if(o[0].type!==o[1].type||o.length===3&&o[0].type!==o[2].type)throw new Error("Input types are mismatched")}},8555:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackedIm2ColProgramInfoLoader=void 0;const u=a(5060),l=a(2039),p=a(2827);n.createPackedIm2ColProgramInfoLoader=(s,h,f,c,o)=>{const t=(e=o.cacheKey,{name:"Im2Col (packed)",inputNames:["A"],inputTypes:[l.TextureType.packed],cacheHint:e});var e;return Object.assign(Object.assign({},t),{get:()=>((r,i,d,g,m,b)=>{const y=d.dims,w=g.dims,v=m.length,S=[w[1]*w[2]*w[3],m[2]*m[3]],A=w[2]*w[3],O=(0,p.unpackFromChannel)(),x=(0,u.getGlsl)(r.session.backend.glContext.version);let I="";for(let R=0;R<=1;R++)for(let L=0;L<=1;L++)I+=`
- blockIndex = rc.x + ${L};
- pos = rc.y + ${R};
-
- if(blockIndex < ${S[1]} && pos < ${S[0]}) {
- offsetY = int(blockIndex / (${m[v-1]})) * ${b.strides[0]} -
- ${b.pads[0]};
- d0 = offsetY + ${b.dilations[0]} * (imod(pos, ${A}) / ${w[2]});
-
- if(d0 < ${y[2]} && d0 >= 0) {
- offsetX = imod(blockIndex, ${m[v-1]}) * ${b.strides[1]} -
- ${b.pads[1]};
- d1 = offsetX + ${b.dilations[1]} * imod(imod(pos, ${A}), ${w[2]});
-
- if(d1 < ${y[3]} && d1 >= 0) {
-
- ch = int(float(pos)/ ${A}.);
- innerDims = vec2(d0, d1);
- result[${2*R+L}] = getChannel(
- getA(0, ch, int(innerDims.x),
- int(innerDims.y)), innerDims);
- }
- }
- }
-
- `;const N=`
- ${O}
-
- void main() {
- ivec2 rc = getOutputCoords();
- vec4 result = vec4(0.0);
- int blockIndex, pos, offsetY, d0, offsetX, d1, ch;
- vec2 innerDims;
- ${I}
- ${x.output} = result;
- }
- `;return Object.assign(Object.assign({},i),{output:{dims:S,type:d.type,textureType:l.TextureType.packed},shaderSource:N,hasMain:!0})})(s,t,h,f,c,o)})}},3248:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.calculateIm2ColDims=n.createIm2ColProgramInfoLoader=void 0;const u=a(2039);n.createIm2ColProgramInfoLoader=(l,p,s,h,f)=>{const c=(o=f.cacheKey,{name:"Im2Col",inputNames:["X"],inputTypes:[u.TextureType.unpacked],cacheHint:o});var o;return Object.assign(Object.assign({},c),{get:()=>((t,e,r,i,d,g)=>{const m=r.dims,b=i.dims,y=d.length,w=(0,n.calculateIm2ColDims)(m,b,d,4),v=`
- const int XC = ${m[1]};
- const int XH = ${m[2]};
- const int XW = ${m[3]};
- const int KH = ${g.kernelShape[0]};
- const int KW = ${g.kernelShape[1]};
- const int dilationH = ${g.dilations[0]};
- const int dilationW = ${g.dilations[1]};
- const int strideH = ${g.strides[0]};
- const int strideW = ${g.strides[1]};
- const int padH = ${g.pads[0]};
- const int padW = ${g.pads[1]};
- const int KHKW = KH*KW;
- const int XCKHKW = XC * KHKW;
- const int outputChannels = 4;
- vec4 process(int indices[${y}]) {
- int b = indices[0]; // batch size
- int oh = indices[1] * strideH - padH; //output height
- int ow = indices[2] * strideW - padW; //output width
- int p = indices[3] * outputChannels; //patch
- vec4 value = vec4(0.0);
- for(int i=0; i < outputChannels; ++i) {
- if(p < XCKHKW) {
- int patchC = p / KHKW;
- int patchH = (p - patchC*KHKW) / KW;
- int patchW = (p - patchC*KHKW) - patchH * KW;
- int xh2 = oh + patchH * dilationH;
- int xw2 = ow + patchW * dilationW;
- int x[${m.length}];
- x[0] = b;
- x[1] = patchC;
- x[2] = xh2;
- x[3] = xw2;
- if(xh2 >= 0 &&
- xh2 < XH &&
- xw2 >= 0 &&
- xw2 < XW) {
- value[i] = _X(x);
- }
- }
- ++p;
- }
- return value;
- }
- `;return Object.assign(Object.assign({},e),{output:{dims:w,type:r.type,textureType:u.TextureType.packedLastDimension},shaderSource:v})})(0,c,p,s,h,f)})},n.calculateIm2ColDims=(l,p,s,h=4)=>[s[0],s[2],s[3],Math.ceil(l[1]*p[2]*p[3]/h)]},6572:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseImageScalerAttributes=n.imageScaler=void 0;const u=a(246),l=a(2039);n.imageScaler=(c,o,t)=>(f(o),[c.run(s(c,o,t),o)]),n.parseImageScalerAttributes=c=>{const o=c.attributes.getFloat("scale"),t=c.attributes.getFloats("bias");return(0,u.createAttributeWithCacheKey)({scale:o,bias:t})};const p={name:"ImageScaler",inputNames:["X"],inputTypes:[l.TextureType.unpacked]},s=(c,o,t)=>{const e=Object.assign(Object.assign({},p),{cacheHint:t.cacheKey});return Object.assign(Object.assign({},e),{get:()=>((r,i,d,g)=>{const m=d[0].dims.slice(),b=m.length,y=`
- ${h(g.bias.length)}
- float process(int indices[${b}]) {
- return _X(indices) * scale + getBias(bias, indices[1]);
- }`;return Object.assign(Object.assign({},i),{output:{dims:m,type:d[0].type,textureType:l.TextureType.unpacked},variables:[{name:"bias",type:"float",arrayLength:g.bias.length,data:g.bias},{name:"scale",type:"float",data:g.scale}],shaderSource:y})})(0,e,o,t)})},h=c=>{const o=[`float getBias(float bias[${c}], int channel) {`];for(let t=0;t{if(!c||c.length!==1)throw new Error("ImageScaler requires 1 input.");if(c[0].dims.length!==4)throw new Error("Invalid input shape.");if(c[0].type!=="float32"&&c[0].type!=="float64")throw new Error("Invalid input type.")}},3346:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseInstanceNormalizationAttributes=n.instanceNormalization=void 0;const u=a(5060),l=a(2039);n.instanceNormalization=(o,t,e)=>{c(t);const r=o.run(s(t[0]),t);return[o.run(f(o,t[0],e,r.dims),[t[0],r,t[1],t[2]])]},n.parseInstanceNormalizationAttributes=o=>o.attributes.getFloat("epsilon",1e-5);const p={name:"InstanceNormalization_MeanAndVariance",inputNames:["X"],inputTypes:[l.TextureType.unpacked]},s=o=>Object.assign(Object.assign({},p),{get:()=>((t,e)=>{const r=e.dims.slice(),i=r[1],d=r[2]*r[3],g=[r[0],i],m=`
- vec4 process(int[2] indices) {
- vec4 v = vec4(0.0);
- int a[4];
- a[0] = indices[0];
- a[1] = indices[1];
- float temp = 0.0;
- for(int a2=0; a2<${r[2]}; a2++) {
- a[2] = a2;
- for(int a3=0; a3<${r[3]}; a3++) {
- a[3] = a3;
- float x = _X(a);
- temp += x;
- }
- }
- float mean = temp / float(${d});
- temp = 0.0;
- for(int a2=0; a2<${r[2]}; a2++) {
- a[2] = a2;
- for(int a3=0; a3<${r[3]}; a3++) {
- a[3] = a3;
- float x = _X(a);
- temp += (x - mean) * (x - mean);
- }
- }
- v.r = mean;
- v.g = temp / float(${d});
-
- return v;
- }`;return Object.assign(Object.assign({},t),{output:{dims:g,type:e.type,textureType:l.TextureType.packedLastDimension},shaderSource:m})})(p,o)}),h={name:"InstanceNormalization_ComputeOutput",inputNames:["X","MeanAndVariance","Scale","B"],inputTypes:[l.TextureType.unpacked,l.TextureType.packedLastDimension,l.TextureType.unpacked,l.TextureType.unpacked]},f=(o,t,e,r)=>{const i=Object.assign(Object.assign({},h),{cacheHint:`${e}`});return Object.assign(Object.assign({},i),{get:()=>((d,g,m,b,y)=>{const w=(0,u.getGlsl)(d.session.backend.glContext.version),[v,S]=d.calculateTextureWidthAndHeight(y,l.TextureType.packedLastDimension),[A,O]=[v/4,S],x=`
- vec4 get_MeanAndVariance(int[2] mv) {
- int offset = indicesToOffset_MeanAndVariance(mv);
- vec2 coords = offsetToCoords(offset, ${A}, ${O});
- return ${w.texture2D}(MeanAndVariance, coords);
- }
-
- float process(int[4] indices) {
- int mv[2];
- mv[0] = indices[0];
- mv[1] = indices[1];
- vec4 mean_and_variance = get_MeanAndVariance(mv);
- float mean = mean_and_variance.r;
- float variance = mean_and_variance.g;
-
- int sb[1];
- sb[0] = indices[1];
- float scale = _Scale(sb);
- float b = _B(sb);
-
- return scale * (_X(indices) - mean) / sqrt(variance + epsilon) + b;
- }`;return Object.assign(Object.assign({},g),{output:{dims:m.dims,type:m.type,textureType:l.TextureType.unpacked},variables:[{name:"epsilon",type:"float",data:b}],shaderSource:x})})(o,i,t,e,r)})},c=o=>{if(!o||o.length!==3)throw new Error("InstanceNormalization requires 3 inputs.");const t=o[0],e=o[1],r=o[2];if(t.dims.length<3||e.dims.length!==1||r.dims.length!==1)throw new Error("Invalid input shape.");if(e.dims[0]!==t.dims[1]||r.dims[0]!==t.dims[1])throw new Error("Input shapes are mismatched.");if(t.type!=="float32"&&t.type!=="float64"||e.type!=="float32"&&e.type!=="float64"||r.type!=="float32"&&r.type!=="float64")throw new Error("Invalid input type.");if(o[0].dims.length!==4)throw new Error("Only support 4-D input shape.")}},708:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackedMatmulProgramInfoLoader=void 0;const u=a(2517),l=a(5060),p=a(2039),s=a(9390),h=a(2823),f=a(5623);n.createPackedMatmulProgramInfoLoader=(c,o,t)=>{const e=(r=o.length>2,i=t.activationCacheKey,{name:"MatMul (packed)",inputNames:r?["A","B","Bias"]:["A","B"],inputTypes:r?[p.TextureType.packed,p.TextureType.packed,p.TextureType.packed]:[p.TextureType.packed,p.TextureType.packed],cacheHint:i});var r,i;return Object.assign(Object.assign({},e),{get:()=>((d,g,m,b)=>{const y=m.length>2,w=y?"value += getBiasForMatmul();":"",v=m[0].dims,S=m[1].dims,A=u.BroadcastUtil.calcShape(v,S,!0),O=!u.ShapeUtil.areEqual(m[0].dims,m[1].dims);if(!A)throw new Error("Can't use matmul on the given tensors");const x=v[v.length-1],I=Math.ceil(x/2),N=v.length,R=S.length,L=(0,l.getGlsl)(d.session.backend.glContext.version),$=(0,s.getCoordsDataType)(A.length),G=A.length,D=(0,s.getGlChannels)(),{activationFunction:j,applyActivation:Z}=(0,h.getActivationSnippet)(b),X=y?`${(0,f.getBiasForMatmul)($,D,m[2].dims,A,!0)}`:"",J=O?`${function(ve,oe,ye,be){let ke=[],Fe=[];const xe=ye[0].dims,Ne=ye[1].dims,Ce=xe.length,Oe=Ne.length,Ae=be.length,Be=Ae-Ce,Ge=Ae-Oe;ke=xe.map((Pe,je)=>`coords.${oe[je+Be]}`),ke[Ce-1]="i*2",ke.join(", "),Fe=Ne.map((Pe,je)=>`coords.${oe[je+Ge]}`),Fe[Oe-2]="i*2",Fe.join(", ");const Ve=u.BroadcastUtil.getBroadcastDims(xe,be),Xe=u.BroadcastUtil.getBroadcastDims(Ne,be),Ze=Ve.map(Pe=>`coords.${oe[Pe+Be]} = 0;`).join(`
-`),qe=Xe.map(Pe=>`coords.${oe[Pe+Ge]} = 0;`).join(`
-`),Ue=`int lastDim = coords.${oe[Ae-1]};
- coords.${oe[Ae-1]} = coords.${oe[Ae-2]};
- coords.${oe[Ae-2]} = lastDim;`;return`
-vec4 getAAtOutCoordsMatmul(int i) {
- ${ve} coords = getOutputCoords();
- ${Ue}
- ${Ze}
- vec4 outputValue = getA(${ke});
- return outputValue;
-}
-
-vec4 getBAtOutCoordsMatmul(int i) {
- ${ve} coords = getOutputCoords();
- ${Ue}
- ${qe}
- vec4 outputValue = getB(${Fe});
- return outputValue;
-}`}($,D,m,A)}`:"",ee=O?"getAAtOutCoordsMatmul(i)":`getA(${function(ve,oe){let ye="";for(let be=0;be{Object.defineProperty(n,"__esModule",{value:!0}),n.getBiasForMatmul=n.createMatmulProgramInfoLoader=n.parseMatMulAttributes=n.matMul=void 0;const u=a(2517),l=a(2039),p=a(9390),s=a(2823),h=a(708);function f(t,e){const r=(i=t.length>2,d=e.activationCacheKey,{name:"MatMul",inputNames:i?["A","B","Bias"]:["A","B"],inputTypes:i?[l.TextureType.unpacked,l.TextureType.unpacked,l.TextureType.unpacked]:[l.TextureType.unpacked,l.TextureType.unpacked],cacheHint:d});var i,d;return Object.assign(Object.assign({},r),{get:()=>function(g,m,b){const y=m[0].dims,w=m[1].dims,v=u.BroadcastUtil.calcShape(y,w,!0);if(!v)throw new Error("Can't use matmul on the given tensors");const S=(0,p.getCoordsDataType)(v.length),A=(0,p.getGlChannels)(),{activationFunction:O,applyActivation:x}=(0,s.getActivationSnippet)(b),I=m.length>2,N=I?"value += getBiasForMatmul();":"",R=I?`${o(S,A,m[2].dims,v,!1)}`:"",L=v.length,$=y.length,G=w.length,D=`
- ${O}
- ${R}
- float process(int indices[${L}]) {
- int a[${$}];
- int b[${G}];
- bcastMatmulIndices_A(indices, a);
- bcastMatmulIndices_B(indices, b);
-
- float value;
- for (int k=0; k<${y[y.length-1]}; ++k) {
- a[${$-1}] = k;
- b[${G-2}] = k;
- value += _A(a) * _B(b);
- }
- ${N}
- ${x}
- return value;
- }`;return Object.assign(Object.assign({},g),{output:{dims:v,type:m[0].type,textureType:l.TextureType.unpacked},shaderSource:D})}(r,t,e)})}n.matMul=(t,e,r)=>(c(e),t.session.pack?[t.run((0,h.createPackedMatmulProgramInfoLoader)(t,e,r),e)]:[t.run(f(e,r),e)]),n.parseMatMulAttributes=t=>(0,s.parseInternalActivationAttributes)(t.attributes),n.createMatmulProgramInfoLoader=f;const c=t=>{if(!t||t.length!==2)throw new Error("MatMul requires 2 inputs.");if(t[0].dims[t[0].dims.length-1]!==t[1].dims[t[1].dims.length-2])throw new Error("shared dimension does not match.");if(t[0].type!=="float32"&&t[0].type!=="float64"||t[1].type!=="float32"&&t[1].type!=="float64")throw new Error("inputs should be float type");if(t[0].type!==t[1].type)throw new Error("inputs types should match")};function o(t,e,r,i,d){let g="";const m=r.length,b=i.length,y=b-m;g=b<2&&m>0?"coords":r.map((S,A)=>`coords.${e[A+y]}`).join(", ");const w=u.BroadcastUtil.getBroadcastDims(r,i).map(S=>`coords.${e[S+y]} = 0;`).join(`
-`);let v="vec4(outputValue.xx, outputValue.yy)";return u.ShapeUtil.size(r)===1&&(v="vec4(outputValue.x)"),d?`
-vec4 getBiasForMatmul() {
- ${t} coords = getOutputCoords();
- ${w}
- vec4 outputValue = getBias(${g});
- return ${v};
-}`:`
-float getBiasForMatmul() {
- ${t} coords = getOutputCoords();
- ${w}
- return getBias(coords.x);
-}`}n.getBiasForMatmul=o},2403:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackProgramInfoLoader=void 0;const u=a(5060),l=a(2039),p=a(9390),s=a(2827),h={name:"pack",inputNames:["A"],inputTypes:[l.TextureType.unpackedReversed]};n.createPackProgramInfoLoader=(f,c)=>Object.assign(Object.assign({},h),{get:()=>((o,t)=>{const e=(0,u.getGlsl)(o.session.backend.glContext.version),r=t.dims,i=r.length,d=t.dims.length,g=(0,p.getCoordsDataType)(d),m=(0,s.getChannels)("rc",d),b=(y=d,w=m,v=r[r.length-2],S=r[r.length-1],y===0||y===1?"":`
- int r = ${w[y-2]};
- int c = ${w[y-1]};
- int rp1 = ${w[y-2]} + 1;
- int cp1 = ${w[y-1]} + 1;
- bool rEdge = rp1 >= ${S};
- bool cEdge = cp1 >= ${v};
- `);var y,w,v,S;let A;A=i===0?[1,1]:i===1?[r[0],1]:[r[d-1],r[d-2]];const O=function(N,R,L){if(N===0)return"false";if(N===1)return`rc > ${R[0]}`;let $="";for(let G=N-2;G= ${R[G-N+2]}`,G= ${N[0]} ? 0. : getA(rc + 1),
- 0, 0`;let $="";if(L>2)for(let G=0;G{Object.defineProperty(n,"__esModule",{value:!0}),n.unpackFromChannel=n.getChannels=n.getVecChannels=void 0;const u=a(9390);function l(p,s){return(0,u.getGlChannels)(s).map(h=>`${p}.${h}`)}n.getVecChannels=l,n.getChannels=function(p,s){return s===1?[p]:l(p,s)},n.unpackFromChannel=function(){return`
- float getChannel(vec4 frag, int dim) {
- int modCoord = imod(dim, 2);
- return modCoord == 0 ? frag.r : frag.g;
- }
-
- float getChannel(vec4 frag, vec2 innerDims) {
- vec2 modCoord = mod(innerDims, 2.);
- return modCoord.x == 0. ?
- (modCoord.y == 0. ? frag.r : frag.g) :
- (modCoord.y == 0. ? frag.b : frag.a);
- }
- `}},2870:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parsePadAttributesV11=n.padV11=n.parsePadAttributesV2=n.padV2=void 0;const u=a(246),l=a(2517),p=a(5060),s=a(2039),h={name:"Pad",inputNames:["A"],inputTypes:[s.TextureType.unpacked]};n.padV2=(g,m,b)=>(o(m),[g.run(Object.assign(Object.assign({},h),{cacheHint:b.cacheKey,get:()=>c(g,m[0],b)}),m)]),n.parsePadAttributesV2=g=>{const m=g.attributes.getString("mode","constant"),b=g.attributes.getFloat("value",0),y=g.attributes.getInts("pads");return(0,u.createAttributeWithCacheKey)({mode:m,value:b,pads:y})},n.padV11=(g,m,b)=>{t(m);const y=f(g,m,b);return(0,n.padV2)(g,[m[0]],y)},n.parsePadAttributesV11=g=>g.attributes.getString("mode","constant");const f=(g,m,b)=>{if(!g.session.isInitializer(m[1].dataId)||m.length>=3&&!g.session.isInitializer(m[2].dataId))throw new Error("dynamic pad attributes are not allowed");const y=Array.from(m[1].integerData),w=m.length>=3?m[2].floatData[0]:0;return(0,u.createAttributeWithCacheKey)({mode:b,pads:y,value:w})},c=(g,m,b)=>{const y=l.ShapeUtil.padShape(m.dims.slice(),b.pads),w=y.length,v=`
- ${e(g,m,b)}
- float process(int[${w}] indices) {
- return padA(indices);
- }`;return{name:"Pad",inputNames:["A"],inputTypes:[s.TextureType.unpacked],output:{dims:y,type:m.type,textureType:s.TextureType.unpacked},shaderSource:v}},o=g=>{if(!g||g.length!==1)throw new Error("Pad requires 1 input");if(g[0].type!=="float32"&&g[0].type!=="float64")throw new Error("Invalid input type.")},t=g=>{if(!g||g.length!==2&&g.length!==3)throw new Error("Pad requires 2 or 3 inputs");if(g[1].type!=="int32")throw new Error("Invalid input type.");if(g.length>=3&&g[2].type==="string")throw new Error("Invalid input type.")},e=(g,m,b)=>{const y=(0,p.getGlsl)(g.session.backend.glContext.version),[w,v]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),S=l.ShapeUtil.computeStrides(m.dims);switch(b.mode){case"constant":return r(y,m.dims,S,w,v,b.pads,b.value);case"reflect":return i(y,m.dims,S,w,v,b.pads);case"edge":return d(y,m.dims,S,w,v,b.pads);default:throw new Error("Invalid mode")}},r=(g,m,b,y,w,v,S)=>{const A=m.length;let O="";for(let x=A-1;x>=0;--x)O+=`
- k = m[${x}] - ${v[x]};
- if (k < 0) return constant;
- if (k >= ${m[x]}) return constant;
- offset += k * ${b[x]};
- `;return`
- float padA(int m[${A}]) {
- const float constant = float(${S});
- int offset = 0;
- int k = 0;
- ${O}
- vec2 coords = offsetToCoords(offset, ${y}, ${w});
- float value = getColorAsFloat(${g.texture2D}(A, coords));
- return value;
- }
- `},i=(g,m,b,y,w,v)=>{const S=m.length;let A="";for(let O=S-1;O>=0;--O)A+=`
- k = m[${O}] - ${v[O]};
- if (k < 0) { k = -k; }
- {
- const int _2n_1 = ${2*(m[O]-1)};
- k = int( mod( float(k), float(_2n_1) ) ) ;
- if(k >= ${m[O]}) { k = _2n_1 - k; }
- }
- offset += k * ${b[O]};
- `;return`
- float padA(int m[${S}]) {
- int offset = 0;
- int k = 0;
- ${A}
- vec2 coords = offsetToCoords(offset, ${y}, ${w});
- float value = getColorAsFloat(${g.texture2D}(A, coords));
- return value;
- }
- `},d=(g,m,b,y,w,v)=>{const S=m.length;let A="";for(let O=S-1;O>=0;--O)A+=`
- k = m[${O}] - ${v[O]};
- if (k < 0) k = 0;
- if (k >= ${m[O]}) k = ${m[O]-1};
- offset += k * ${b[O]};
- `;return`
- float padA(int m[${S}]) {
- int offset = 0;
- int k = 0;
- ${A}
- vec2 coords = offsetToCoords(offset, ${y}, ${w});
- float value = getColorAsFloat(${g.texture2D}(A, coords));
- return value;
- }
- `}},2143:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.globalMaxPool=n.parseMaxPoolAttributes=n.maxPool=n.parseGlobalAveragePoolAttributes=n.globalAveragePool=n.parseAveragePoolAttributes=n.averagePool=void 0;const u=a(246),l=a(2517),p=a(2039);n.averagePool=(d,g,m)=>{t(g);const b={name:"AveragePool",inputNames:["X"],inputTypes:[p.TextureType.unpacked],cacheHint:m.cacheKey};return[d.run(Object.assign(Object.assign({},b),{get:()=>s(g,b,!1,m)}),g)]},n.parseAveragePoolAttributes=d=>{const g=d.attributes.getString("auto_pad","NOTSET"),m=d.attributes.getInt("ceil_mode",0),b=d.attributes.getInt("count_include_pad",0)!==0,y=d.attributes.getInts("kernel_shape"),w=d.attributes.getInts("strides",[]),v=d.attributes.getInts("pads",[]);if(m!==0)throw new Error("using ceil() in shape computation is not yet supported for AveragePool");return(0,u.createAttributeWithCacheKey)({autoPad:g,ceilMode:m,countIncludePad:b,kernelShape:y,strides:w,pads:v})};const s=(d,g,m,b)=>{const[y,w]=f(d,b,m),v=l.ShapeUtil.size(y.kernelShape);let S="";y.countIncludePad?S+=`value /= float(${v});`:S+=`value /= float(${v} - pad);`;const A=`
- ${e(d[0].dims,y,"value += _X(x);",S,"0.0")}
- `;return Object.assign(Object.assign({},g),{output:{dims:w,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:A})};n.globalAveragePool=(d,g,m)=>{t(g);const b={name:"GlobalAveragePool",inputNames:["X"],inputTypes:[p.TextureType.unpacked],cacheHint:`${m.countIncludePad}`};return[d.run(Object.assign(Object.assign({},b),{get:()=>s(g,b,!0,m)}),g)]},n.parseGlobalAveragePoolAttributes=d=>{const g=d.attributes.getInt("count_include_pad",0)!==0;return(0,u.createAttributeWithCacheKey)({autoPad:"",ceilMode:0,countIncludePad:g,kernelShape:[],strides:[],pads:[]})},n.maxPool=(d,g,m)=>{t(g);const b={name:"MaxPool",inputNames:["X"],inputTypes:[p.TextureType.unpacked],cacheHint:m.cacheKey};return[d.run(Object.assign(Object.assign({},b),{get:()=>h(g,b,!1,m)}),g)]},n.parseMaxPoolAttributes=d=>{const g=d.attributes.getString("auto_pad","NOTSET"),m=d.attributes.getInt("ceil_mode",0),b=d.attributes.getInts("kernel_shape"),y=d.attributes.getInts("strides",[]),w=d.attributes.getInts("pads",[]),v=d.attributes.getInt("storage_order",0),S=d.attributes.getInts("dilations",[]);if(v!==0)throw new Error("column major storage order is not yet supported for MaxPool");if(m!==0)throw new Error("using ceil() in shape computation is not yet supported for MaxPool");return(0,u.createAttributeWithCacheKey)({autoPad:g,ceilMode:m,countIncludePad:!1,kernelShape:b,strides:y,pads:w,storageOrder:v,dilations:S})};const h=(d,g,m,b)=>{const[y,w]=f(d,b,m),v=`
- ${e(d[0].dims,y,`
- value = max(_X(x), value);
- `,"","-1e5")}
- `;return Object.assign(Object.assign({},g),{output:{dims:w,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:v})},f=(d,g,m)=>{const b=d[0].dims.slice(),y=Object.hasOwnProperty.call(g,"dilations"),w=g.kernelShape.slice(),v=g.strides.slice(),S=y?g.dilations.slice():[],A=g.pads.slice();l.PoolConvUtil.adjustPoolAttributes(m,b,w,v,S,A);const O=l.PoolConvUtil.computePoolOutputShape(m,b,v,S,w,A,g.autoPad),x=Object.assign({},g);return y?Object.assign(x,{kernelShape:w,strides:v,pads:A,dilations:S,cacheKey:g.cacheKey}):Object.assign(x,{kernelShape:w,strides:v,pads:A,cacheKey:g.cacheKey}),[x,O]},c={autoPad:"",ceilMode:0,countIncludePad:!1,kernelShape:[],strides:[],pads:[],storageOrder:0,dilations:[],cacheKey:""},o={name:"GlobalMaxPool",inputNames:["X"],inputTypes:[p.TextureType.unpacked]};n.globalMaxPool=(d,g)=>(t(g),[d.run(Object.assign(Object.assign({},o),{get:()=>h(g,o,!0,c)}),g)]);const t=d=>{if(!d||d.length!==1)throw new Error("Pool ops requires 1 input.");if(d[0].type!=="float32"&&d[0].type!=="float64")throw new Error("Invalid input type.")},e=(d,g,m,b,y)=>{const w=d.length;if(g.kernelShape.length<=2){const v=g.kernelShape[g.kernelShape.length-1],S=g.strides[g.strides.length-1],A=g.pads[g.pads.length/2-1],O=g.pads[g.pads.length-1],x=d[w-1];let I="",N="",R="";if(I=A+O!==0?`
- for (int i = 0; i < ${v}; i++) {
- x[${w} - 1] = indices[${w} - 1] * ${S} - ${A} + i;
- if (x[${w} - 1] < 0 || x[${w} - 1] >= ${x}) {
- pad++;
- continue;
- }
- ${m}
- }`:`
- for (int i = 0; i < ${v}; i++) {
- x[${w} - 1] = indices[${w} - 1] * ${S} - ${A} + i;
- ${m}
- }`,g.kernelShape.length===2){const L=g.kernelShape[g.kernelShape.length-2],$=g.strides[g.strides.length-2],G=g.pads[g.pads.length/2-2],D=g.pads[g.pads.length-2],j=d[w-2];N=G+D!==0?`
- for (int j = 0; j < ${L}; j++) {
- x[${w} - 2] = indices[${w} - 2] * ${$} - ${G} + j;
- if (x[${w} - 2] < 0 || x[${w} - 2] >= ${j}) {
- pad+= ${v};
- continue;
- }
- `:`
- for (int j = 0; j < ${L}; j++) {
- x[${w} - 2] = indices[${w} - 2] * ${$} - ${G} + j;
- `,R=`
- }
- `}return`
- float process(int indices[${w}]) {
- int x[${w}];
- copyVec(indices, x);
-
- float value = ${y};
- int pad = 0;
- ${N}
- ${I}
- ${R}
- ${b}
- return value;
- }
- `}{const v=l.ShapeUtil.size(g.kernelShape),S=l.ShapeUtil.computeStrides(g.kernelShape),A=S.length,O=g.pads.length,x=i(A),I=r(d,"inputDims"),N=r(g.pads,"pads"),R=r(S,"kernelStrides"),L=r(g.strides,"strides");let $="";return $=g.pads.reduce((G,D)=>G+D)?`
- if (x[j] >= inputDims[j] || x[j] < 0) {
- pad++;
- isPad = true;
- break;
- }
- }
- if (!isPad) {
- ${m}
- }`:`
- }
- ${m}
- `,`
- ${x}
- float process(int indices[${w}]) {
- int x[${w}];
- copyVec(indices, x);
- int offset[${A}];
- int pads[${O}];
- int inputDims[${w}];
- int kernelStrides[${A}];
- int strides[${A}];
- ${N}
- ${I}
- ${L}
- ${R}
-
- float value = ${y};
- int pad = 0;
- bool isPad = false;
- for (int i = 0; i < ${v}; i++) {
- offsetToIndices(i, kernelStrides, offset);
- isPad = false;
- for (int j = ${w} - ${A}; j < ${w}; j++) {
- x[j] = indices[j] * strides[j - ${w} + ${A}]
- + offset[j - ${w} + ${A}] - pads[j - 2];
- ${$}
- }
- ${b}
-
- return value;
- }
- `}},r=(d,g)=>{let m="";for(let b=0;b`
- void offsetToIndices(int offset, int[${d}] strides, out int[${d}] indices) {
- if (${d} == 0) {
- return;
- }
- for (int i = 0; i < ${d} - 1; ++i) {
- indices[i] = offset / strides[i];
- offset -= indices[i] * strides[i];
- }
- indices[${d} - 1] = offset;
- }`},4939:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.reduceLogSumSquare=n.reduceLogSum=n.reduceProd=n.reduceMin=n.reduceMax=n.reduceMean=n.reduceSum=n.parseReduceAttributes=void 0;const u=a(246),l=a(782),p=a(2517),s=a(2039),h=(o,t,e,r,i)=>{c(t);const d={name:r,inputNames:["A"],inputTypes:[s.TextureType.unpacked]};return[o.run(Object.assign(Object.assign({},d),{cacheHint:e.cacheKey,get:()=>f(o,t,e,r,i,d)}),t)]};n.parseReduceAttributes=o=>{const t=o.attributes.getInts("axes",[]),e=o.attributes.getInt("keepdims",1)===1;return(0,u.createAttributeWithCacheKey)({axes:t,keepDims:e})};const f=(o,t,e,r,i,d)=>{const g=[],m=t[0].dims.length||1,b=[],y=p.ShapeUtil.normalizeAxes(e.axes,t[0].dims.length),w=i(t,y);let v=w[1];for(let A=0;A=0||y.length===0?(e.keepDims&&g.push(1),v=`
- for(int j${A} = 0; j${A} < ${t[0].dims[A]}; j${A}++) {
- inputIdx[${A}] = j${A};
- ${v}
- }`):(b.push(`inputIdx[${A}] = outputIdx[${g.length}];`),g.push(t[0].dims[A]));const S=`
- float process(int outputIdx[${g.length||1}]) {
- float value; // final result
- int inputIdx[${m}]; // addressing input data
- ${b.join(`
-`)}
- ${w[0]} // init ops for reduce max/min
- ${v}
- ${w[2]} // final computation for reduce mean
- return value;
- }`;return Object.assign(Object.assign({},d),{output:{dims:g,type:t[0].type,textureType:s.TextureType.unpacked},shaderSource:S})},c=o=>{if(!o||o.length!==1)throw new Error("Reduce op requires 1 input.");if(l.NUMBER_TYPES.indexOf(o[0].type)===-1)throw new Error("Invalid input type.")};n.reduceSum=(o,t,e)=>h(o,t,e,"ReduceSum",()=>["value = 0.0;","value += _A(inputIdx);",""]),n.reduceMean=(o,t,e)=>h(o,t,e,"ReduceMean",(r,i)=>{let d=1;for(let g=0;g=0||i.length===0)&&(d*=r[0].dims[g]);return["value = 0.0;","value += _A(inputIdx);",`value /= ${d}.;`]}),n.reduceMax=(o,t,e)=>h(o,t,e,"ReduceMax",(r,i)=>{const d=[];for(let g=0;g=0||i.length===0)&&d.push(`inputIdx[${g}] = 0;`);return[`${d.join(`
-`)}
-value = _A(inputIdx);`,"value = max(value, _A(inputIdx));",""]}),n.reduceMin=(o,t,e)=>h(o,t,e,"ReduceMin",(r,i)=>{const d=[];for(let g=0;g=0||i.length===0)&&d.push(`inputIdx[${g}] = 0;`);return[`${d.join(`
-`)}
-value = _A(inputIdx);`,"value = min(value, _A(inputIdx));",""]}),n.reduceProd=(o,t,e)=>h(o,t,e,"ReduceProd",()=>["value = 1.0;","value *= _A(inputIdx);",""]),n.reduceLogSum=(o,t,e)=>h(o,t,e,"ReduceLogSum",()=>["value = 0.0;","value += _A(inputIdx);","value = log(value);"]),n.reduceLogSumSquare=(o,t,e)=>h(o,t,e,"ReduceLogSumSquare",()=>["float t; value = 0.0;","t = _A(inputIdx); value += t * t;",""])},7019:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.isReshapeCheap=n.processDims3D=n.createPackedReshape3DProgramInfoLoader=void 0;const u=a(2517),l=a(5060),p=a(2039),s=a(2827);n.createPackedReshape3DProgramInfoLoader=(h,f,c)=>{const o=(t=>({name:"Reshape (packed)",inputTypes:[p.TextureType.packed],inputNames:["A"],cacheHint:`${t}`}))(c);return Object.assign(Object.assign({},o),{get:()=>((t,e,r,i)=>{const d=e.dims,g=i;let m="";for(let w=0;w<4;w++){let v="";switch(w){case 0:v="outputCoords = rc;";break;case 1:v="outputCoords = ivec3(rc.x, rc.y+1, rc.z);";break;case 2:v="outputCoords = ivec3(rc.x, rc.y, rc.z+1);";break;case 3:v="outputCoords = ivec3(rc.x, rc.y+1, rc.z+1);";break;default:throw new Error}m+=`
- ${v}
- ${w>0?"if(outputCoords.y < rows && outputCoords.z < cols){":""}
- int flattenedIndex = getFlattenedIndex(outputCoords);
-
- ivec3 inputRC = inputCoordsFromReshapedOutCoords(flattenedIndex);
- vec2 innerDims = vec2(float(inputRC.y),float(inputRC.z));
-
- result[${w}] = getChannel(getA(inputRC.x, inputRC.y, inputRC.z), innerDims);
-
- ${w>0?"}":""}
- `}const b=(0,l.getGlsl)(t.session.backend.glContext.version),y=`
- ${function(w){const v=u.ShapeUtil.computeStrides(w),S=["b","r","c"],A="index";return`
- ivec3 inputCoordsFromReshapedOutCoords(int index) {
- ${v.map((O,x)=>`int ${S[x]} = ${A} / ${O}; ${x===v.length-1?`int ${S[x+1]} = ${A} - ${S[x]} * ${O}`:`index -= ${S[x]} * ${O}`};`).join("")}
- return ivec3(b, r, c);
- }
- `}(d)}
- ${function(w){const v=u.ShapeUtil.computeStrides(w);return`
- int getFlattenedIndex(ivec3 coords) {
- // reverse y, z order
- return coords.x * ${v[0]} + coords.z * ${v[1]} + coords.y;
- }
-`}(g)}
- ${(0,s.unpackFromChannel)()}
-
- void main() {
- ivec3 rc = getOutputCoords();
-
- vec4 result = vec4(0.0);
-
- ivec3 outputCoords;
- int rows = ${g[2]};
- int cols = ${g[1]};
-
- ${m}
- ${b.output} = result;
- }
- `;return Object.assign(Object.assign({},r),{output:{dims:g,type:e.type,textureType:p.TextureType.packed},shaderSource:y,hasMain:!0})})(h,f,o,c)})},n.processDims3D=function(h){if(h.length===0)return[1,1,1];let f=1;for(let c=0;c1?h[h.length-2]:1,h[h.length-1]]},n.isReshapeCheap=function(h,f){let c=!1;return c=h.length===0||f.length===0||(h.length<2||f.length<2?h[h.length-1]===f[f.length-1]:h[h.length-1]===f[f.length-1]&&h[h.length-2]===f[f.length-2]),c}},718:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.reshape=void 0;const u=a(2517);n.reshape=(l,p)=>{const s=u.ShapeUtil.calculateReshapedDims(p[0].dims,p[1].integerData);return l.session.pack?[l.reshapePacked(p[0],s)]:[l.reshapeUnpacked(p[0],s)]}},2268:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseResizeAttributesV11=n.parseResizeAttributesV10=n.resize=void 0;const u=a(5060),l=a(2039),p=a(9390),s=a(2827),h=a(9793),f={name:"Resize",inputNames:["A"],inputTypes:[l.TextureType.packed]};n.resize=(r,i,d)=>((0,h.validateInputs)(i,d),[r.run(Object.assign(Object.assign({},f),{cacheHint:d.cacheKey,get:()=>c(r,i,d)}),i)]),n.parseResizeAttributesV10=r=>(0,h.parseUpsampleAttributes)(r,10),n.parseResizeAttributesV11=r=>(0,h.parseUpsampleAttributes)(r,11);const c=(r,i,d)=>{const g=(0,u.getGlsl)(r.session.backend.glContext.version),[m,b]=o(i,d);if(m.every($=>$===1)&&d.coordinateTransformMode!=="tf_crop_and_resize")return Object.assign(Object.assign({},f),{output:{dims:b,type:i[0].type,textureType:l.TextureType.packed},hasMain:!0,shaderSource:`void main() {
- vec4 v = ${g.texture2D}(X, TexCoords);
- ${g.output} = v;
- }`});const y=b.length;if(y<2)throw new Error(`output dimension should be at least 2, but got ${y}`);const w=b[y-2],v=b[y-1],S=i[0].dims;if(y!==S.length)throw new Error(`output dimension should match input ${S.length}, but got ${y}`);const A=S[y-2],O=S[y-1],x=m[y-2],I=m[y-1];let N="";if(d.mode!=="linear")throw new Error(`resize (packed) does not support mode: '${d.mode}'`);switch(d.coordinateTransformMode){case"asymmetric":N=`
- vec4 getSourceFracIndex(ivec4 coords) {
- return vec4(coords) / scaleWHWH;
- }
- `;break;case"half_pixel":N=`
- vec4 getSourceFracIndex(ivec4 coords) {
- return (vec4(coords) + 0.5) / scaleWHWH - 0.5;
- }
- `;break;case"pytorch_half_pixel":N=`
- vec4 getSourceFracIndex(ivec4 coords) {
- vec4 fcoords = vec4(coords);
- return vec4(
- ${v}.0 > 1.0 ? (fcoords.x + 0.5) / scaleWHWH.x - 0.5 : 0.0,
- ${w}.0 > 1.0 ? (fcoords.y + 0.5) / scaleWHWH.y - 0.5 : 0.0,
- ${v}.0 > 1.0 ? (fcoords.z + 0.5) / scaleWHWH.z - 0.5 : 0.0,
- ${w}.0 > 1.0 ? (fcoords.w + 0.5) / scaleWHWH.w - 0.5 : 0.0
- );
- }
- `;break;case"align_corners":N=`
- vec4 getSourceFracIndex(ivec4 coords) {
- vec4 resized = vec4(${v}.0 - 1.0, ${w}.0 - 1.0, ${v}.0 - 1.0,
- ${w}.0 - 1.0);
- vec4 original = vec4(${O}.0 - 1.0, ${A}.0 - 1.0, ${O}.0 - 1.0,
- ${A}.0 - 1.0);
- vec4 new_scale = original / resized;
- return vec4(coords) * new_scale;
- }
- `;break;default:throw new Error(`resize (packed) does not support coordinateTransformMode: '${d.coordinateTransformMode}'`)}const R=(0,p.getCoordsDataType)(y),L=`
- const vec2 inputWH = vec2(${A}.0, ${O}.0);
- const vec4 scaleWHWH = vec4(float(${x}), float(${I}), float(${x}), float(${I}));
- ${(0,s.unpackFromChannel)()}
- ${N}
- float getAValue(int x10, int r, int c, int d) {
- return getChannel(getA(x10, r, c, d), vec2(c, d));
- }
- void main() {
- ${R} rc = getOutputCoords();
-
- int batch = rc[0];
- int depth = rc[1];
-
- // retrieve the 4 coordinates that is used in the 4 packed output values.
- ivec4 coords = ivec4(rc.wz, rc.w + 1, rc.z + 1);
-
- // calculate the source index in fraction
- vec4 sourceFrac = getSourceFracIndex(coords);
-
- // get the lower and upper bound of the 4 values that will be packed into one texel.
- ivec4 x00 = ivec4(max(sourceFrac.xy, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.xy)));
- ivec4 x01 = ivec4(max(sourceFrac.xw, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.xw)));
- ivec4 x10 = ivec4(max(sourceFrac.zy, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.zy)));
- ivec4 x11 = ivec4(max(sourceFrac.zw, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.zw)));
-
- bool hasNextRow = rc.w < ${w-1};
- bool hasNextCol = rc.z < ${v-1};
-
- // pack x00, x01, x10, x11's top-left corner into one vec4 structure
- vec4 topLeft = vec4(
- getAValue(batch, depth, x00.x, x00.y),
- hasNextCol ? getAValue(batch, depth, x01.x, x01.y) : 0.0,
- hasNextRow ? getAValue(batch, depth, x10.x, x10.y) : 0.0,
- (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.x, x11.y) : 0.0);
-
- // pack x00, x01, x10, x11's top-right corner into one vec4 structure
- vec4 topRight = vec4(
- getAValue(batch, depth, x00.x, x00.w),
- hasNextCol ? getAValue(batch, depth, x01.x, x01.w) : 0.0,
- hasNextRow ? getAValue(batch, depth, x10.x, x10.w) : 0.0,
- (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.x, x11.w) : 0.0);
-
- // pack x00, x01, x10, x11's bottom-left corner into one vec4 structure
- vec4 bottomLeft = vec4(
- getAValue(batch, depth, x00.z, x00.y),
- hasNextCol ? getAValue(batch, depth, x01.z, x01.y) : 0.0,
- hasNextRow ? getAValue(batch, depth, x10.z, x10.y) : 0.0,
- (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.z, x11.y) : 0.0);
-
- // pack x00, x01, x10, x11's bottom-right corner into one vec4 structure
- vec4 bottomRight = vec4(
- getAValue(batch, depth, x00.z, x00.w),
- hasNextCol ? getAValue(batch, depth, x01.z, x01.w) : 0.0,
- hasNextRow ? getAValue(batch, depth, x10.z, x10.w) : 0.0,
- (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.z, x11.w) : 0.0);
-
- // calculate the interpolation fraction on u and v direction
- vec4 frac = vec4(sourceFrac) - floor(sourceFrac);
- vec4 clampFrac = clamp(frac, vec4(0.0), vec4(1.0));
-
- vec4 top = mix(topLeft, topRight, clampFrac.ywyw);
- vec4 bottom = mix(bottomLeft, bottomRight, clampFrac.ywyw);
- vec4 newValue = mix(top, bottom, clampFrac.xxzz);
-
- ${g.output} = vec4(newValue);
- }
- `;return Object.assign(Object.assign({},f),{output:{dims:b,type:i[0].type,textureType:l.TextureType.packed},hasMain:!0,shaderSource:L})},o=(r,i)=>{const d=r[0].dims;let g,m=i.scales;if(m.length===0){const y=r[i.scalesInputIdx];if(y&&y.size!==0){if(r[i.sizesInputIdx])throw new Error("Only one of scales or sizes must be provided as input.");m=t(y,i.mode,i.isResize)}else{const w=r[i.sizesInputIdx];if(!w||w.size===0)throw new Error("Either scales or sizes MUST be provided as input.");g=Array.from(w.integerData),m=e(g,d,i.mode,i.isResize)}}else if(r[i.sizesInputIdx])throw new Error("Only one of scales or sizes must be provided as input.");const b=g||d.map((y,w)=>Math.floor(y*m[w]));return[m,b]},t=(r,i,d)=>{const g=Array.from(r.floatData);return(0,h.scalesValidation)(g,i,d),g},e=(r,i,d,g)=>{const m=i.length,b=new Array(m);for(let y=0,w=m;y{Object.defineProperty(n,"__esModule",{value:!0}),n.shape=void 0;const u=a(9162);n.shape=(p,s)=>(l(s),[new u.Tensor([s[0].dims.length],"int32",void 0,void 0,new Int32Array(s[0].dims))]);const l=p=>{if(!p||p.length!==1)throw new Error("Shape requires 1 input.")}},2278:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.sliceV10=n.parseSliceAttributes=n.slice=void 0;const u=a(246),l=a(782),p=a(2517),s=a(2039),h={name:"Slice",inputNames:["A"],inputTypes:[s.TextureType.unpacked]};n.slice=(e,r,i)=>(c(r),[e.run(Object.assign(Object.assign({},h),{cacheHint:i.cacheKey,get:()=>f(e,r[0],i)}),r)]),n.parseSliceAttributes=e=>{const r=e.attributes.getInts("starts"),i=e.attributes.getInts("ends"),d=e.attributes.getInts("axes",[]);return(0,u.createAttributeWithCacheKey)({starts:r,ends:i,axes:d})};const f=(e,r,i)=>{const d=i.axes.length===0?r.dims.slice(0).map((S,A)=>A):i.axes,g=p.ShapeUtil.normalizeAxes(d,r.dims.length),m=i.starts.map((S,A)=>S>r.dims[g[A]]-1?r.dims[g[A]]:p.ShapeUtil.normalizeAxis(S,r.dims[g[A]])),b=i.ends.map((S,A)=>S>r.dims[g[A]]-1?r.dims[g[A]]:p.ShapeUtil.normalizeAxis(S,r.dims[g[A]])),y=r.dims.slice(),w=[];for(let S=0;S0&&w.push(`outputIdx[${g[S]}] += ${m[S]};`);const v=`
- float process(int outputIdx[${y.length}]) {
- ${w.join(`
- `)}
- return _A(outputIdx);
- }`;return Object.assign(Object.assign({},h),{output:{dims:y,type:r.type,textureType:s.TextureType.unpacked},shaderSource:v})},c=e=>{if(!e||e.length!==1)throw new Error("Slice requires 1 input.");if(l.NUMBER_TYPES.indexOf(e[0].type)===-1)throw new Error("Invalid input type.")};n.sliceV10=(e,r)=>{t(r);const i=o(e,r);return[e.run(Object.assign(Object.assign({},h),{cacheHint:i.cacheKey,get:()=>f(e,r[0],i)}),[r[0]])]};const o=(e,r)=>{if(!e.session.isInitializer(r[1].dataId)||!e.session.isInitializer(r[2].dataId)||r.length>=4&&!e.session.isInitializer(r[3].dataId)||r.length>=5&&!e.session.isInitializer(r[4].dataId))throw new Error("dynamic slice attributes are not allowed");if(r.length>=5&&r[4].integerData.some(m=>m!==1))throw new Error("currently non-1 steps is not supported for Slice");const i=Array.from(r[1].integerData),d=Array.from(r[2].integerData),g=r.length>=4?Array.from(r[3].integerData):[];return{starts:i,ends:d,axes:g,cacheKey:`${g};${i};${d}`}},t=e=>{if(!e||e.length<3||e.length>5)throw new Error("Invalid input number.");if(e[1].type!=="int32"||e[1].dims.length!==1)throw new Error("Invalid input type.");if(e[2].type!=="int32"||e[2].dims.length!==1)throw new Error("Invalid input type.");if(e.length>=4&&(e[3].type!=="int32"||e[3].dims.length!==1))throw new Error("Invalid input type.");if(e.length>=5&&(e[4].type!=="int32"||e[4].dims.length!==1))throw new Error("Invalid input type.")}},5524:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.softmaxV13=n.parseSoftmaxAttributesV13=n.parseSoftmaxAttributes=n.softmax=void 0;const u=a(246),l=a(2517),p=a(5060),s=a(2039),h=a(3738),f={name:"SoftmaxComputeMax",inputNames:["A"],inputTypes:[s.TextureType.unpacked]},c={name:"SoftmaxComputeScale",inputNames:["A","Max"],inputTypes:[s.TextureType.unpacked,s.TextureType.unpacked]},o={name:"SoftMax",inputNames:["A","Max","Norm"],inputTypes:[s.TextureType.unpacked,s.TextureType.unpacked,s.TextureType.unpacked]};n.softmax=(g,m,b)=>{d(m);const y=m[0].dims.slice(),w=l.ShapeUtil.normalizeAxis(b.axis,y.length),v=l.ShapeUtil.sizeToDimension(y,w),S=l.ShapeUtil.sizeFromDimension(y,w);return t(g,m,b,v,S)},n.parseSoftmaxAttributes=g=>(0,u.createAttributeWithCacheKey)({axis:g.attributes.getInt("axis",1)}),n.parseSoftmaxAttributesV13=g=>(0,u.createAttributeWithCacheKey)({axis:g.attributes.getInt("axis",-1)}),n.softmaxV13=(g,m,b)=>{d(m);const y=m[0].dims.slice(),w=l.ShapeUtil.normalizeAxis(b.axis,y.length),v=y.length,S=w!==v-1,A=[];let O,x=[],I=[];S&&(x=Array.from({length:v}).map(($,G)=>G),x[w]=v-1,x[v-1]=w,x.map($=>A.push(y[$])),O=(0,u.createAttributeWithCacheKey)({perm:x}),I=(0,h.transpose)(g,m,O));const N=S?l.ShapeUtil.sizeToDimension(A,v-1):l.ShapeUtil.sizeToDimension(y,v-1),R=S?l.ShapeUtil.sizeFromDimension(A,v-1):l.ShapeUtil.sizeFromDimension(y,v-1),L=t(g,S?I:m,b,N,R);return S?(0,h.transpose)(g,L,O):L};const t=(g,m,b,y,w)=>{const v=e(g,m[0],y,w,[y]),S=g.run(Object.assign(Object.assign({},f),{cacheHint:b.cacheKey,get:()=>v}),m),A=r(g,m[0],y,w,v.output.dims,[y]),O=g.run(Object.assign(Object.assign({},c),{cacheHint:b.cacheKey,get:()=>A}),[m[0],S]),x=i(g,m[0],y,w,v.output.dims,A.output.dims);return[g.run(Object.assign(Object.assign({},o),{cacheHint:b.cacheKey,get:()=>x}),[m[0],S,O])]},e=(g,m,b,y,w)=>{const[v,S]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),A=w.length;if(b<1||y<1)throw new Error("Logical row count N and feature count D must be greater than or equal to 1");if(w.length!==1)throw new Error("Dimensionality of the output should be 1");if(w[0]!==b)throw new Error("Shape of the output should be equal to logical row count");const O=(0,p.getGlsl)(g.session.backend.glContext.version),x=`
- float process(int[${A}] indices) {
- int logical_row_start_offset = indices[0] * ${y};
-
- float max = getColorAsFloat(${O.texture2D}(A, offsetToCoords(logical_row_start_offset, ${v},
- ${S} )));
- for(int i=1; i<${y}; ++i)
- {
- float current = getColorAsFloat(${O.texture2D}(A, offsetToCoords(logical_row_start_offset + i,
- ${v}, ${S})));
- if(current > max)
- max = current;
- }
-
- return max;
- }`;return Object.assign(Object.assign({},f),{output:{dims:w,type:m.type,textureType:s.TextureType.unpacked},shaderSource:x})},r=(g,m,b,y,w,v)=>{const[S,A]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),O=v.length;if(b<1||y<1)throw new Error("Logical row count N and feature count D must be greater than or equal to 1");if(v.length!==1)throw new Error("Dimensionality of the output should be 1");if(v[0]!==b)throw new Error("Shape of the output should be equal to logical row count");if(w.length!==1)throw new Error("Dimensionality of the intermediate results should be 1");if(w[0]!==b)throw new Error("Shape of the intermediate results should be equal to logical row count");const x=`
- float process(int[${O}] indices) {
- int logical_row_start_offset = indices[0] * ${y};
-
- float norm_factor = 0.0;
- float max = _Max(indices);
- for(int i=0; i<${y}; ++i)
- {
- norm_factor += exp(getColorAsFloat(${(0,p.getGlsl)(g.session.backend.glContext.version).texture2D}(A, offsetToCoords(logical_row_start_offset + i,
- ${S}, ${A}))) - max);
- }
-
- return norm_factor;
- }`;return Object.assign(Object.assign({},c),{output:{dims:v,type:m.type,textureType:s.TextureType.unpacked},shaderSource:x})},i=(g,m,b,y,w,v)=>{const[S,A]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),O=m.dims.length;if(b<1||y<1)throw new Error("Logical row count N and feature count D must be greater than or equal to 1");if(w.length!==1||v.length!==1)throw new Error("Dimensionality of the intermediate results should be 1");if(w[0]!==b||v[0]!==b)throw new Error("Shape of the intermediate results should be equal to logical row count");const x=`
- float process(int[${O}] indices) {
-
- // get offset of current logical tensor index from the 2-D texture coordinates (TexCoords)
- int offset = coordsToOffset(TexCoords, ${S}, ${A});
-
- //determine the logical row for this index
- int logical_row_index[1];
- logical_row_index[0] = offset / ${y};
-
- float norm_factor = _Norm(logical_row_index);
-
- // avoid possible division by 0
- // if norm_facor is 0, all elements are zero
- // if so, return 0
- if(norm_factor == 0.0)
- return 0.0;
-
- return exp(_A(indices) - _Max(logical_row_index)) / norm_factor;
- }`;return Object.assign(Object.assign({},o),{output:{dims:m.dims,type:m.type,textureType:s.TextureType.unpacked},shaderSource:x})},d=g=>{if(!g||g.length!==1)throw new Error("Softmax requires 1 input.");if(g[0].type!=="float32"&&g[0].type!=="float64")throw new Error("Invalid input type")}},5975:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseSplitAttributes=n.split=void 0;const u=a(246),l=a(2517),p=a(2039),s={name:"Split",inputNames:["A"],inputTypes:[p.TextureType.unpacked]};n.split=(o,t,e)=>{c(t);const r=l.ShapeUtil.normalizeAxis(e.axis,t[0].dims.length),i=h(o,t,r,e),d=[];for(let g=0;gf(o,t[0],e,r,g)}),t));return d},n.parseSplitAttributes=o=>{const t=o.attributes.getInt("axis",0),e=o.attributes.getInts("split",[]),r=o.outputs.length;return(0,u.createAttributeWithCacheKey)({axis:t,split:e,numOutputs:r})};const h=(o,t,e,r)=>{const[,i]=l.SplitUtil.splitShape(t[0].dims,e,r.split,r.numOutputs);return i.length},f=(o,t,e,r,i)=>{const[d,g]=l.SplitUtil.splitShape(t.dims,r,e.split,e.numOutputs),m=g[i],b=d[i],y=`
- float process(int indices[${b.length}]) {
- indices[${r}] += ${m};
- return _A(indices);
- }
- `;return Object.assign(Object.assign({},s),{cacheHint:`${e.cacheKey}:${i}`,output:{dims:b,type:t.type,textureType:p.TextureType.unpacked},shaderSource:y})},c=o=>{if(!o||o.length!==1)throw new Error("Split requires one input.");if(o[0].type!=="int8"&&o[0].type!=="uint8"&&o[0].type!=="int16"&&o[0].type!=="uint16"&&o[0].type!=="int32"&&o[0].type!=="uint32"&&o[0].type!=="float32"&&o[0].type!=="float64"&&o[0].type!=="bool")throw new Error("Invalid input type.")}},3933:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseSqueezeAttributes=n.squeezeV13=n.squeeze=void 0;const u=a(2517);n.squeeze=(s,h,f)=>{l(h);const c=u.ShapeUtil.squeezeShape(h[0].dims,f);return[s.reshapeUnpacked(h[0],c)]},n.squeezeV13=(s,h)=>(p(h),(0,n.squeeze)(s,[h[0]],Array.from(h[1].integerData))),n.parseSqueezeAttributes=s=>s.attributes.getInts("axes");const l=s=>{if(!s||s.length!==1)throw new Error("Squeeze requires 1 input.");if(s[0].type==="string")throw new Error("invalid input tensor types.")},p=s=>{if(!s||s.length!==2)throw new Error("Squeeze requires 2 inputs.");if(s[1].type!=="int32")throw new Error("Invalid input type.")}},6558:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.sum=void 0;const u=a(5060),l=a(2039);n.sum=(h,f)=>{s(f);const c={name:"Sum",inputNames:f.map((o,t)=>`X${t}`),inputTypes:new Array(f.length).fill(l.TextureType.unpacked)};return[h.run(Object.assign(Object.assign({},c),{get:()=>p(h,f,c)}),f)]};const p=(h,f,c)=>{const o=(0,u.getGlsl)(h.session.backend.glContext.version),t=f[0].dims.slice(),e=`
- void main() {
- vec4 result = ${f.map((r,i)=>`${o.texture2D}(X${i},TexCoords)`).join(" + ")};
- ${o.output} = result;
- }
- `;return Object.assign(Object.assign({},c),{output:{dims:t,type:f[0].type,textureType:l.TextureType.unpacked},hasMain:!0,shaderSource:e})},s=h=>{if(!h||h.length===0)throw new Error("Sum requires inputs.");const f=h[0].dims.length;for(let c=1;c{Object.defineProperty(n,"__esModule",{value:!0}),n.tile=void 0;const u=a(782),l=a(2039);n.tile=(h,f)=>{s(f);const c={name:"Tile",inputNames:["A"],inputTypes:[l.TextureType.unpacked]};return[h.run(Object.assign(Object.assign({},c),{get:()=>p(h,f,c)}),f)]};const p=(h,f,c)=>{const o=f[0].dims.slice(),t=new Array(o.length),e=[];for(let d=0;d{if(!h||h.length!==2)throw new Error("Tile requires 2 input.");if(h[1].dims.length!==1)throw new Error("The second input shape must 1 dimension.");if(h[1].dims[0]!==h[0].dims.length)throw new Error("Invalid input shape.");if(u.NUMBER_TYPES.indexOf(h[0].type)===-1)throw new Error("Invalid input type.");if(h[1].type!=="int32"&&h[1].type!=="int16")throw new Error("Invalid repeat type.")}},3738:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseTransposeAttributes=n.transpose=void 0;const u=a(246),l=a(2517),p=a(2039),s={name:"Transpose",inputNames:["A"],inputTypes:[p.TextureType.unpacked]};n.transpose=(e,r,i)=>(t(r),[e.run(Object.assign(Object.assign({},s),{cacheHint:i.cacheKey,get:()=>h(e,r[0],i.perm)}),r)]),n.parseTransposeAttributes=e=>(0,u.createAttributeWithCacheKey)({perm:e.attributes.getInts("perm",[])});const h=(e,r,i)=>{const d=r.dims;i=f(d,i);const g=c(d,i),m=d.length,b=`
- ${o("perm",i,m)}
- float process(int indices[${m}]) {
- int a[${m}];
- perm(a, indices);
- return _A(a);
- }`;return Object.assign(Object.assign({},s),{output:{dims:g,type:r.type,textureType:p.TextureType.unpacked},shaderSource:b})},f=(e,r)=>(r&&r.length!==e.length&&(r=[...e.keys()].reverse()),r),c=(e,r)=>(r=f(e,r),l.ShapeUtil.sortBasedOnPerm(e,r)),o=(e,r,i)=>{const d=[];d.push(`void ${e}(out int a[${i}], int src[${i}]) {`);for(let g=0;g{if(!e||e.length!==1)throw new Error("Transpose requires 1 input.");if(e[0].type!=="float32"&&e[0].type!=="float64")throw new Error("input should be float tensor")}},8710:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.encodeAsUint8=void 0;const u=a(5060),l=a(2039);n.encodeAsUint8=(p,s)=>{const h=s.shape,f=(0,u.getGlsl)(p.session.backend.glContext.version),c=`
- const float FLOAT_MAX = 1.70141184e38;
- const float FLOAT_MIN = 1.17549435e-38;
-
- bool isNaN(float val) {
- return (val < 1.0 || 0.0 < val || val == 0.0) ? false : true;
- }
-
- highp vec4 encodeAsUint8(highp float v) {
- if (isNaN(v)) {
- return vec4(255, 255, 255, 255);
- }
-
- highp float av = abs(v);
-
- if(av < FLOAT_MIN) {
- return vec4(0.0, 0.0, 0.0, 0.0);
- } else if(v > FLOAT_MAX) {
- return vec4(0.0, 0.0, 128.0, 127.0) / 255.0;
- } else if(v < -FLOAT_MAX) {
- return vec4(0.0, 0.0, 128.0, 255.0) / 255.0;
- }
-
- highp vec4 c = vec4(0,0,0,0);
-
- highp float e = floor(log2(av));
- highp float m = exp2(fract(log2(av))) - 1.0;
-
- c[2] = floor(128.0 * m);
- m -= c[2] / 128.0;
- c[1] = floor(32768.0 * m);
- m -= c[1] / 32768.0;
- c[0] = floor(8388608.0 * m);
-
- highp float ebias = e + 127.0;
- c[3] = floor(ebias / 2.0);
- ebias -= c[3] * 2.0;
- c[2] += floor(ebias) * 128.0;
-
- c[3] += 128.0 * step(0.0, -v);
-
- return c / 255.0;
- }
-
- void main() {
- float value = ${f.texture2D}(X,TexCoords).r;
- ${f.output} = encodeAsUint8(value);
- }`,o={name:"Uint8Encode",inputTypes:[l.TextureType.unpacked],inputNames:["X"],output:{dims:h,type:s.tensor.type,textureType:l.TextureType.downloadUint8AsFloat},shaderSource:c,hasMain:!0};return p.executeProgram(o,[s.tensor])}},4909:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.tanh=n.tan=n.sqrt=n.sin=n.sigmoid=n.relu=n.not=n.neg=n.log=n.parseLeakyReluAttributes=n.leakyRelu=n.identity=n.floor=n.exp=n.parseEluAttributes=n.elu=n.cos=n.ceil=n.clipV11=n.parseClipAttributes=n.clip=n.atan=n.asin=n.acos=n.abs=n.glslTanh=n.glslTan=n.glslSqrt=n.glslSigmoid=n.glslRelu=n.glslSin=n.glslNot=n.glslNeg=n.glslLog=n.glslLeakyRelu=n.glslIdentity=n.glslClip=n.glslFloor=n.glslExp=n.glslElu=n.glslCos=n.glslCeil=n.glslAtan=n.glslAsin=n.glslAcos=n.glslAbs=void 0;const u=a(246),l=a(2517),p=a(8520),s=a(5060),h=a(2039);function f(){return L("abs")}function c(){return L("acos")}function o(){return L("asin")}function t(){return L("atan")}function e(){return L("ceil")}function r(){return L("cos")}function i(D){const j="elu";return{body:`
- const float alpha = float(${D});
-
- float ${j}_(float a) {
- return a >= 0.0 ? a: (exp(a) - 1.0) * alpha;
- }
- vec4 ${j}_(vec4 v) {
- return vec4(${j}_(v.x), ${j}_(v.y), ${j}_(v.z), ${j}_(v.w));
- }
- `,name:j,type:p.FunctionType.ValueBased}}function d(){return L("exp")}function g(){return L("floor")}function m(D,j){const Z="clip";return{body:`
- const float min = float(${D});
- const float max = float(${j});
-
- float ${Z}_(float a) {
- return clamp(a, min, max);
- }
- vec4 ${Z}_(vec4 v) {
- return clamp(v, min, max);
- }
- `,name:Z,type:p.FunctionType.ValueBased}}function b(){const D="indentity";return{body:`
- float ${D}_(float a) {
- return a;
- }
- vec4 ${D}_(vec4 v) {
- return v;
- }
- `,name:D,type:p.FunctionType.ValueBased}}function y(D){const j="leakyRelu";return{body:`
- const float alpha = float(${D});
-
- float ${j}_(float a) {
- return a < 0.0 ? a * alpha : a;
- }
- vec4 ${j}_(vec4 v) {
- return vec4(${j}_(v.x), ${j}_(v.y), ${j}_(v.z), ${j}_(v.w));
- }
- `,name:j,type:p.FunctionType.ValueBased}}function w(){return L("log")}function v(){const D="neg";return{body:`
- float ${D}_(float a) {
- return -a;
- }
- vec4 ${D}_(vec4 v) {
- return -v;
- }
- `,name:D,type:p.FunctionType.ValueBased}}function S(){const D="not";return{body:`
- float ${D}_(float a) {
- return float( ! bool(a) );
- }
- bool ${D}_(bool a) {
- return !a;
- }
- vec4 ${D}_(vec4 v) {
- return vec4(!bool(v.x), !bool(v.y), !bool(v.z), !bool(v.w));
- }
- bvec4 ${D}_(bvec4 v) {
- return bvec4(!v.x, !v.y, !v.z, !v.w);
- }
- `,name:D,type:p.FunctionType.ValueBased}}function A(){return L("sin")}function O(){const D="relu";return{body:`
- float ${D}_(float a) {
- return max( a, 0.0 );
- }
- vec4 ${D}_(vec4 v) {
- return max( v, 0.0 );
- }
- `,name:D,type:p.FunctionType.ValueBased}}function x(){const D="sigmoid";return{body:`
- float ${D}_(float a) {
- return 1.0 / (1.0 + exp(-a));
- }
- vec4 ${D}_(vec4 v) {
- return 1.0 / (1.0 + exp(-v));
- }
- `,name:D,type:p.FunctionType.ValueBased}}function I(){return L("sqrt")}function N(){return L("tan")}function R(){const D="tanh";return{body:`
- float ${D}_(float a) {
- a = clamp(a, -10., 10.);
- a = exp(2.*a);
- return (a - 1.) / (a + 1.);
- }
- vec4 ${D}_(vec4 v) {
- v = clamp(v, -10., 10.);
- v = exp(2.*v);
- return (v - 1.) / (v + 1.);
- }
- `,name:D,type:p.FunctionType.ValueBased}}function L(D){return{body:`
- float ${D}_(float a) {
- return ${D}(a);
- }
- vec4 ${D}_(vec4 v) {
- return ${D}(v);
- }
- `,name:D,type:p.FunctionType.ValueBased}}n.glslAbs=f,n.glslAcos=c,n.glslAsin=o,n.glslAtan=t,n.glslCeil=e,n.glslCos=r,n.glslElu=i,n.glslExp=d,n.glslFloor=g,n.glslClip=m,n.glslIdentity=b,n.glslLeakyRelu=y,n.glslLog=w,n.glslNeg=v,n.glslNot=S,n.glslSin=A,n.glslRelu=O,n.glslSigmoid=x,n.glslSqrt=I,n.glslTan=N,n.glslTanh=R;const $=(D,j,Z,X)=>{const J=D.session.pack?h.TextureType.packed:h.TextureType.unpacked,ee={name:Z.name,inputTypes:[J],inputNames:["A"],cacheHint:X};return Object.assign(Object.assign({},ee),{get:()=>((ue,Se,ve,oe)=>{const ye=ue.session.pack?h.TextureType.packed:h.TextureType.unpacked,be=(0,s.getGlsl)(ue.session.backend.glContext.version);return Object.assign(Object.assign({},Se),{output:{dims:ve.dims,type:ve.type,textureType:ye},shaderSource:`
- ${oe.body}
- void main() {
- vec4 v = ${be.texture2D}(A, TexCoords);
- v = ${oe.name}_(v);
- ${be.output} = v;
- }
- `,hasMain:!0})})(D,ee,j,Z)})};n.abs=(D,j)=>[D.run($(D,j[0],f()),j)],n.acos=(D,j)=>[D.run($(D,j[0],c()),j)],n.asin=(D,j)=>[D.run($(D,j[0],o()),j)],n.atan=(D,j)=>[D.run($(D,j[0],t()),j)],n.clip=(D,j,Z)=>[D.run($(D,j[0],m(Z.min,Z.max),Z.cacheKey),j)],n.parseClipAttributes=D=>(0,u.createAttributeWithCacheKey)({min:D.attributes.getFloat("min",l.MIN_CLIP),max:D.attributes.getFloat("max",l.MAX_CLIP)}),n.clipV11=(D,j)=>{const Z=G(D,j);return(0,n.clip)(D,[j[0]],Z)};const G=(D,j)=>{if(j.length>=3&&(!D.session.isInitializer(j[1].dataId)||!D.session.isInitializer(j[2].dataId)))throw new Error("dynamic clip attributes are not allowed");const Z=j.length>=3?j[1].numberData[0]:l.MIN_CLIP,X=j.length>=3?j[2].numberData[0]:l.MAX_CLIP;return(0,u.createAttributeWithCacheKey)({min:Z,max:X})};n.ceil=(D,j)=>[D.run($(D,j[0],e()),j)],n.cos=(D,j)=>[D.run($(D,j[0],r()),j)],n.elu=(D,j,Z)=>[D.run($(D,j[0],i(Z.alpha),Z.cacheKey),j)],n.parseEluAttributes=D=>(0,u.createAttributeWithCacheKey)({alpha:D.attributes.getFloat("alpha",1)}),n.exp=(D,j)=>[D.run($(D,j[0],d()),j)],n.floor=(D,j)=>[D.run($(D,j[0],g()),j)],n.identity=(D,j)=>[D.run($(D,j[0],b()),j)],n.leakyRelu=(D,j,Z)=>[D.run($(D,j[0],y(Z.alpha),Z.cacheKey),j)],n.parseLeakyReluAttributes=D=>(0,u.createAttributeWithCacheKey)({alpha:D.attributes.getFloat("alpha",.01)}),n.log=(D,j)=>[D.run($(D,j[0],w()),j)],n.neg=(D,j)=>[D.run($(D,j[0],v()),j)],n.not=(D,j)=>[D.run($(D,j[0],S()),j)],n.relu=(D,j)=>[D.run($(D,j[0],O()),j)],n.sigmoid=(D,j)=>[D.run($(D,j[0],x()),j)],n.sin=(D,j)=>[D.run($(D,j[0],A()),j)],n.sqrt=(D,j)=>[D.run($(D,j[0],I()),j)],n.tan=(D,j)=>[D.run($(D,j[0],N()),j)],n.tanh=(D,j)=>[D.run($(D,j[0],R()),j)]},5611:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createUnpackProgramInfoLoader=n.createUnpackProgramInfo=void 0;const u=a(5060),l=a(2039),p=a(9390),s=a(2827),h={name:"unpack",inputNames:["A"],inputTypes:[l.TextureType.packed]};n.createUnpackProgramInfo=(f,c)=>{const o=c.dims.length,t=(0,s.getChannels)("rc",o),e=t.slice(-2),r=(0,p.getCoordsDataType)(o),i=(0,s.unpackFromChannel)(),d=c.dims.length===0?"":function(b,y){if(b===1)return"rc";let w="";for(let v=0;vObject.assign(Object.assign({},h),{get:()=>(0,n.createUnpackProgramInfo)(f,c)})},8428:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseUnsqueezeAttributes=n.unsqueezeV13=n.unsqueeze=void 0;const u=a(2517);n.unsqueeze=(s,h,f)=>{l(h);const c=u.ShapeUtil.unsqueezeShape(h[0].dims,f);return[s.reshapeUnpacked(h[0],c)]},n.unsqueezeV13=(s,h)=>(p(h),(0,n.unsqueeze)(s,[h[0]],Array.from(h[1].integerData))),n.parseUnsqueezeAttributes=s=>s.attributes.getInts("axes");const l=s=>{if(!s||s.length!==1)throw new Error("Unsqueeze requires 1 input.");if(s[0].type==="string")throw new Error("invalid input tensor types.")},p=s=>{if(!s||s.length!==2)throw new Error("Unsqueeze requires 2 inputs.");if(s[1].type!=="int32")throw new Error("Invalid input type.")}},9793:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.scalesValidation=n.validateInputs=n.parseUpsampleAttributes=n.parseUpsampleAttributesV9=n.parseUpsampleAttributesV7=n.upsample=void 0;const u=a(246),l=a(5060),p=a(2039),s={name:"Upsample",inputNames:["X"],inputTypes:[p.TextureType.unpacked]};n.upsample=(f,c,o)=>((0,n.validateInputs)(c,o),[f.run(Object.assign(Object.assign({},s),{cacheHint:o.cacheKey,get:()=>h(f,c,o)}),c)]),n.parseUpsampleAttributesV7=f=>(0,n.parseUpsampleAttributes)(f,7),n.parseUpsampleAttributesV9=f=>(0,n.parseUpsampleAttributes)(f,9),n.parseUpsampleAttributes=(f,c)=>{const o=c>=10,t=f.attributes.getString("mode","nearest");if(t!=="nearest"&&t!=="linear"&&(c<11||t!=="cubic"))throw new Error(`unrecognized mode: ${t}`);let e=[];c<9&&(e=f.attributes.getFloats("scales"),(0,n.scalesValidation)(e,t,o));const r=f.attributes.getFloat("extrapolation_value",0),i=c>10?f.attributes.getString("coordinate_transformation_mode","half_pixel"):"asymmetric";if(["asymmetric","pytorch_half_pixel","tf_half_pixel_for_nn","align_corners","tf_crop_and_resize","half_pixel"].indexOf(i)===-1)throw new Error(`coordinate_transform_mode '${i}' is not supported`);const d=i==="tf_crop_and_resize",g=d,m=t==="nearest"&&c>=11?f.attributes.getString("nearest_mode","round_prefer_floor"):"";if(["round_prefer_floor","round_prefer_ceil","floor","ceil",""].indexOf(m)===-1)throw new Error(`nearest_mode '${m}' is not supported`);const b=f.attributes.getFloat("cubic_coeff_a",-.75),y=f.attributes.getInt("exclude_outside",0)!==0;if(y&&t!=="cubic")throw new Error("exclude_outside can be set to 1 only when mode is CUBIC.");const w=c<11||t==="nearest"&&i==="asymmetric"&&m==="floor";let v=0,S=0,A=0;return c>10?f.inputs.length>2?(v=1,S=2,A=3):(S=1,A=2):c===9&&(S=1),(0,u.createAttributeWithCacheKey)({opset:c,isResize:o,mode:t,scales:e,extrapolationValue:r,coordinateTransformMode:i,useExtrapolation:g,needRoiInput:d,nearestMode:m,cubicCoefficientA:b,excludeOutside:y,useNearest2xOptimization:w,roiInputIdx:v,scalesInputIdx:S,sizesInputIdx:A})};const h=(f,c,o)=>{const t=(0,l.getGlsl)(f.session.backend.glContext.version),[e,r]=f.calculateTextureWidthAndHeight(c[0].dims,p.TextureType.unpacked),i=c[0].dims.map((A,O)=>Math.floor(A*o.scales[O])),[d,g]=f.calculateTextureWidthAndHeight(i,p.TextureType.unpacked),m=i.length,b=new Array(m),y=new Array(m);let w=`
- int output_pitches[${m}];
- int input_pitches[${m}];
- `;for(let A=m-1;A>=0;A--)b[A]=A===m-1?1:b[A+1]*i[A+1],y[A]=A===m-1?1:y[A+1]*c[0].dims[A+1],w+=`
- output_pitches[${A}] = ${b[A]};
- input_pitches[${A}] = ${y[A]};
- `;const v=`
- float getInputFloat(int index) {
- vec2 coords = offsetToCoords(index, ${e}, ${r});
- float value = getColorAsFloat(${t.texture2D}(X, coords));
- return value;
- }
- `,S=o.mode==="nearest"?`
- ${v}
- float process(int indices[${m}]) {
- int input_index = 0;
- int output_index = coordsToOffset(TexCoords, ${d}, ${g});
-
- ${w}
-
- int d, m;
- for (int dim = 0; dim < ${m}; ++dim) {
- d = output_index / output_pitches[dim];
- m = output_index - d * output_pitches[dim];
- output_index = m;
-
- if (scales[dim] != 1 && d > 0) {
- int d2 = d / scales[dim];
- m = d - d2 * scales[dim];
- d = d2;
- }
- input_index += input_pitches[dim] * d;
- }
-
- return getInputFloat(input_index);
- }`:m===4?`
- ${v}
- float process(int indices[4]) {
- int input_index = 0;
- int output_index = coordsToOffset(TexCoords, ${d}, ${g});
-
- ${w}
-
- int m;
- int index_of_dim0, index_of_dim1, index_of_dim2, index_of_dim3;
- index_of_dim0 = output_index / output_pitches[0];
- m = output_index - index_of_dim0 * output_pitches[0];
- index_of_dim1 = m / output_pitches[1];
- m = m - index_of_dim1 * output_pitches[1];
- index_of_dim2 = m / output_pitches[2];
- m = m - index_of_dim2 * output_pitches[2];
- index_of_dim3 = m;
-
- int index_of_input_dim2, index_of_input_dim3, x_offset, y_offset;
- index_of_input_dim2 = index_of_dim2 / scales[2];
- y_offset = index_of_dim2 - index_of_input_dim2 * scales[2];
- index_of_input_dim3 = index_of_dim3 / scales[3];
- x_offset = index_of_dim3 - index_of_input_dim3 * scales[3];
-
- input_index = index_of_dim0 * input_pitches[0] +
- index_of_dim1 * input_pitches[1] +
- index_of_input_dim2 * input_pitches[2] +
- index_of_input_dim3;
-
- float x00 = getInputFloat(input_index);
- float x10, x01, x11;
-
- bool end_of_dim2 = false;
- if (index_of_input_dim2 == (${c[0].dims[2]} - 1)) {
- // It's the end in dimension 2
- x01 = x00;
- end_of_dim2 = true;
- } else {
- x01 = getInputFloat(input_index + input_pitches[2]);
- }
-
- if (index_of_input_dim3 == (input_pitches[2] - 1)) {
- // It's the end in dimension 3
- x10 = x00;
- x11 = x01;
- }
- else {
- x10 = getInputFloat(input_index + 1);
- x11 = end_of_dim2 ? x10 : getInputFloat(input_index + input_pitches[2] + 1);
- }
-
- float y0 = x00 + float(y_offset) * (x01 - x00) / float(scales[2]);
- float y1 = x10 + float(y_offset) * (x11 - x10) / float(scales[2]);
- return y0 + float(x_offset) * (y1 - y0) / float(scales[3]);
- }`:`
- ${v}
- float process(int indices[2]) {
- int input_index = 0;
- int output_index = coordsToOffset(TexCoords, ${d}, ${g});
-
- ${w}
-
- int m;
- int index_of_dim0, index_of_dim1;
- index_of_dim0 = output_index / output_pitches[0];
- m = output_index - index_of_dim0 * output_pitches[0];
- index_of_dim1 = m;
-
- int index_of_input_dim0, index_of_input_dim1, x_offset, y_offset;
- index_of_input_dim0 = index_of_dim0 / scales[0];
- y_offset = index_of_dim0 - index_of_input_dim0 * scales[0];
- index_of_input_dim1 = index_of_dim1 / scales[1];
- x_offset = index_of_dim1 - index_of_input_dim1 * scales[1];
-
- input_index = index_of_input_dim0 * input_pitches[0] + index_of_input_dim1;
-
- float x00 = getInputFloat(input_index);
- float x10, x01, x11;
-
- bool end_of_dim0 = false;
- if (index_of_input_dim0 == (${c[0].dims[0]} - 1)) {
- // It's the end in dimension 0
- x01 = x00;
- end_of_dim0 = true;
- } else {
- x01 = getInputFloat(input_index + input_pitches[0]);
- }
-
- if (index_of_input_dim1 == (input_pitches[0] - 1)) {
- // It's the end in dimension 1
- x10 = x00;
- x11 = x01;
- }
- else {
- x10 = getInputFloat(input_index + 1);
- x11 = end_of_dim0 ? x10 : getInputFloat(input_index + input_pitches[0] + 1);
- }
-
- float y0 = x00 + float(y_offset) * (x01 - x00) / float(scales[0]);
- float y1 = x10 + float(y_offset) * (x11 - x10) / float(scales[0]);
- return y0 + float(x_offset) * (y1 - y0) / float(scales[1]);
- }`;return Object.assign(Object.assign({},s),{output:{dims:i,type:c[0].type,textureType:p.TextureType.unpacked},shaderSource:S,variables:[{name:"scales",type:"int",arrayLength:o.scales.length,data:o.scales.map(A=>Math.ceil(A))}]})};n.validateInputs=(f,c)=>{if(!f||c.opset<9&&f.length!==1||c.opset>=9&&c.opset<11&&f.length!==2||c.opset>=11&&f.length<2)throw new Error("invalid inputs.");if(c.scales.length>0&&f[0].dims.length!==c.scales.length)throw new Error("Invalid input shape.");if(f[0].type==="string")throw new Error("Invalid input tensor types.")},n.scalesValidation=(f,c,o)=>{if(o){for(const t of f)if(t<=0)throw new Error("Scale value should be greater than 0.")}else for(const t of f)if(t<1)throw new Error("Scale value should be greater than or equal to 1.");if(!(c!=="linear"&&c!=="cubic"||f.length===2||f.length===4&&f[0]===1&&f[1]===1))throw new Error(`'Linear' mode and 'Cubic' mode only support 2-D inputs ('Bilinear', 'Bicubic') or 4-D inputs with the corresponding outermost 2 scale values being 1 in the ${o?"Resize":"Upsample"} opeartor.`)}},1958:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.ProgramManager=void 0;const u=a(1670),l=a(6231),p=a(8879),s=a(5060);n.ProgramManager=class{constructor(h,f,c){this.profiler=h,this.glContext=f,this.textureLayoutStrategy=c,this.repo=new Map,this.attributesBound=!1}getArtifact(h){return this.repo.get(h)}setArtifact(h,f){this.repo.set(h,f)}run(h,f,c){var o;this.profiler.event("op",`ProgramManager.run ${(o=h.programInfo.name)!==null&&o!==void 0?o:"unknown kernel"}`,()=>{var t;const e=this.glContext.gl,r=h.program;e.useProgram(r);try{this.bindOutput(c),this.attributesBound||this.bindAttributes(h.attribLocations),this.bindUniforms(h.uniformLocations,(t=h.programInfo.variables)!==null&&t!==void 0?t:[],f)}catch(i){throw l.Logger.error("ProgramManager",h.programInfo.shaderSource),i}this.profiler.event("backend","GlContext.draw()",()=>{this.glContext.draw()})},this.glContext)}dispose(){this.vertexShader&&this.glContext.deleteShader(this.vertexShader),this.repo.forEach(h=>this.glContext.deleteProgram(h.program))}build(h,f,c){return this.profiler.event("backend","ProgramManager.build",()=>{const o=new p.GlslPreprocessor(this.glContext,h,f,c),t=o.preprocess(),e=this.compile(t);return{programInfo:h,program:e,uniformLocations:this.getUniformLocations(e,o.context.programInfo.inputNames,o.context.programInfo.variables),attribLocations:this.getAttribLocations(e)}})}compile(h){if(!this.vertexShader){l.Logger.verbose("ProrgramManager","Compiling and caching Vertex shader for the first time");const o=(0,s.getVertexShaderSource)(this.glContext.version);this.vertexShader=this.glContext.compileShader(o,this.glContext.gl.VERTEX_SHADER)}u.env.debug&&l.Logger.verbose("ProrgramManager",`FragShader:
-${h}
-`);const f=this.glContext.compileShader(h,this.glContext.gl.FRAGMENT_SHADER),c=this.glContext.createProgram(this.vertexShader,f);return this.glContext.deleteShader(f),c}bindOutput(h){const f=h.width,c=h.height;l.Logger.verbose("ProrgramManager",`Binding output texture to Framebuffer: w/h=${f}/${c}, shape=${h.shape}, type=${h.tensor.type}`),this.glContext.attachFramebuffer(h.texture,f,c)}bindAttributes(h){const f=h.position,c=h.textureCoord;this.glContext.setVertexAttributes(f,c),this.attributesBound=!0}bindUniforms(h,f,c){var o;const t=this.glContext.gl;let e=0;for(const{name:r,type:i,location:d,arrayLength:g}of h){const m=(o=f.find(b=>b.name===r))===null||o===void 0?void 0:o.data;if(i!=="sampler2D"&&!m)throw new Error(`variable '${r}' does not have data defined in program info`);switch(i){case"sampler2D":this.bindTexture(c[e],d,e),e++;break;case"float":g?t.uniform1fv(d,m):t.uniform1f(d,m);break;case"int":g?t.uniform1iv(d,m):t.uniform1i(d,m);break;default:throw new Error(`Uniform not implemented: ${i}`)}}}bindTexture(h,f,c){this.glContext.bindTextureToUniform(h.texture,c,f)}getAttribLocations(h){return{position:this.getAttribLocation(h,"position"),textureCoord:this.getAttribLocation(h,"textureCoord")}}getUniformLocations(h,f,c){const o=[];if(f)for(const t of f)o.push({name:t,type:"sampler2D",location:this.getUniformLocation(h,t)});if(c)for(const t of c)o.push(Object.assign(Object.assign({},t),{location:this.getUniformLocation(h,t.name)}));return o}getUniformLocation(h,f){const c=this.glContext.gl.getUniformLocation(h,f);if(c===null)throw new Error(`Uniform ${f} not found.`);return c}getAttribLocation(h,f){return this.glContext.gl.getAttribLocation(h,f)}}},6416:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLSessionHandler=void 0;const u=a(6231),l=a(1047),p=a(8316),s=a(1640),h=a(1958),f=a(7859),c=a(5702);n.WebGLSessionHandler=class{constructor(o,t){this.backend=o,this.context=t,this.layoutStrategy=new f.PreferLogicalStrategy(o.glContext.maxTextureSize),this.programManager=new h.ProgramManager(this.context.profiler,o.glContext,this.layoutStrategy),this.textureManager=new c.TextureManager(o.glContext,this.layoutStrategy,this.context.profiler,{reuseTextures:o.textureCacheMode==="full"}),this.packedTextureDataCache=new Map,this.unpackedTextureDataCache=new Map,this.pack=o.pack,this.pack2unpackMap=new Map,this.unpack2packMap=new Map}createInferenceHandler(){return new p.WebGLInferenceHandler(this)}onGraphInitialized(o){const t=o.getValues().filter(e=>e.from===-1&&e.tensor).map(e=>e.tensor.dataId);this.initializers=new Set(t)}isInitializer(o){return!!this.initializers&&this.initializers.has(o)}addInitializer(o){this.initializers.add(o)}getTextureData(o,t){return t?this.packedTextureDataCache.get(o):this.unpackedTextureDataCache.get(o)}setTextureData(o,t,e=!1){u.Logger.verbose("WebGLSessionHandler","Storing Texture data in cache"),e?this.packedTextureDataCache.set(o,t):this.unpackedTextureDataCache.set(o,t)}dispose(){this.programManager.dispose(),this.textureManager.clearActiveTextures(),this.packedTextureDataCache.forEach(o=>this.textureManager.releaseTexture(o,!0)),this.packedTextureDataCache=new Map,this.unpackedTextureDataCache.forEach(o=>this.textureManager.releaseTexture(o,!0)),this.unpackedTextureDataCache=new Map}resolve(o,t,e){const r=(0,l.resolveOperator)(o,t,s.WEBGL_OP_RESOLVE_RULES);return{impl:r.opImpl,context:r.opInit?r.opInit(o,e):o}}}},7769:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Uint8DataEncoder=n.RGBAFloatDataEncoder=n.RedFloat32DataEncoder=void 0;const u=a(6231);n.RedFloat32DataEncoder=class{constructor(l,p=1){if(p===1)this.internalFormat=l.R32F,this.format=l.RED,this.textureType=l.FLOAT,this.channelSize=p;else{if(p!==4)throw new Error(`Invalid number of channels: ${p}`);this.internalFormat=l.RGBA32F,this.format=l.RGBA,this.textureType=l.FLOAT,this.channelSize=p}}encode(l,p){let s,h;return l.constructor!==Float32Array&&(u.Logger.warning("Encoder","data was not of type Float32; creating new Float32Array"),h=new Float32Array(l)),p*this.channelSize>l.length?(u.Logger.warning("Encoder","Source data too small. Allocating larger array"),h=l,s=this.allocate(p*this.channelSize),h.forEach((f,c)=>s[c]=f)):(h=l,s=h),s}allocate(l){return new Float32Array(4*l)}decode(l,p){return this.channelSize===1?l.filter((s,h)=>h%4==0).subarray(0,p):l.subarray(0,p)}},n.RGBAFloatDataEncoder=class{constructor(l,p=1,s){if(p!==1&&p!==4)throw new Error(`Invalid number of channels: ${p}`);this.internalFormat=l.RGBA,this.format=l.RGBA,this.channelSize=p,this.textureType=s||l.FLOAT}encode(l,p){let s=l;return this.channelSize===1&&(u.Logger.verbose("Encoder","Exploding into a larger array"),s=this.allocate(p),l.forEach((h,f)=>s[4*f]=h)),s}allocate(l){return new Float32Array(4*l)}decode(l,p){return this.channelSize===1?l.filter((s,h)=>h%4==0).subarray(0,p):l.subarray(0,p)}},n.Uint8DataEncoder=class{constructor(l,p=1){if(this.channelSize=4,p===1)this.internalFormat=l.ALPHA,this.format=l.ALPHA,this.textureType=l.UNSIGNED_BYTE,this.channelSize=p;else{if(p!==4)throw new Error(`Invalid number of channels: ${p}`);this.internalFormat=l.RGBA,this.format=l.RGBA,this.textureType=l.UNSIGNED_BYTE,this.channelSize=p}}encode(l,p){return new Uint8Array(l.buffer,l.byteOffset,l.byteLength)}allocate(l){return new Uint8Array(l*this.channelSize)}decode(l,p){if(l instanceof Uint8Array)return l.subarray(0,p);throw new Error(`Invalid array type: ${l.constructor}`)}}},7859:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.getBatchDim=n.sizeToSquarishShape=n.getRowsCols=n.sizeFromShape=n.isInt=n.parseAxisParam=n.squeezeShape=n.PreferLogicalStrategy=n.AlwaysKeepOriginalSizeStrategy=void 0;const u=a(6231),l=a(2517);function p(o,t){const e=[],r=[],i=t!=null&&Array.isArray(t)&&t.length===0,d=t==null||i?null:s(t,o).sort();let g=0;for(let m=0;mm)&&o[m]===1&&(e.push(o[m]),r.push(m)),d[g]<=m&&g++}o[m]!==1&&(e.push(o[m]),r.push(m))}return{newShape:e,keptDims:r}}function s(o,t){const e=t.length;return o=o==null?t.map((r,i)=>i):[].concat(o),(0,l.assert)(o.every(r=>r>=-e&&r`All values in axis param must be in range [-${e}, ${e}) but got axis ${o}`),(0,l.assert)(o.every(h),()=>`All values in axis param must be integers but got axis ${o}`),o.map(r=>r<0?e+r:r)}function h(o){return o%1==0}function f(o){if(o.length===0)return 1;let t=o[0];for(let e=1;e=o.length?1:o.slice(t.breakAxis).reduce((m,b)=>m*b),g=t.breakAxis<=0?1:o.slice(0,t.breakAxis).reduce((m,b)=>m*b);if(!(d>e||g>e))return[d,g];u.Logger.verbose("TextureLayout",`Given width/height preferences were unattainable: shape:${o}, breakAxis:${t.breakAxis}`)}const r=o.reduce((d,g)=>d*g);let i=Math.floor(Math.sqrt(r));for(;i=e||r%i!=0)throw new Error(`The given dimensions are outside this GPU's boundaries: ${o}`);return[i,r/i]}},n.PreferLogicalStrategy=class{constructor(o){this.maxTextureSize=o}computeTextureWH(o,t){const e=this.computeTexture(o,t);return t&&t.isPacked&&(e[0]/=2,e[1]/=2),t&&t.reverseWH?[e[1],e[0]]:e}computeTexture(o,t){const e=t&&t.isPacked;if(o.length===0)return e?[2,2]:[1,1];let r=this.maxTextureSize;if(t&&t.breakAxis!==void 0){const g=t.breakAxis>=o.length?1:o.slice(t.breakAxis).reduce((b,y)=>b*y),m=t.breakAxis<=0?1:o.slice(0,t.breakAxis).reduce((b,y)=>b*y);if(!(g>r||m>r))return[g,m];u.Logger.verbose("TextureLayout",`Given width/height preferences were unattainable: shape:${o}, breakAxis:${t.breakAxis}`)}let i=o.slice(0);e&&(r*=2,i=i.map((g,m)=>m>=i.length-2?i[m]%2==0?i[m]:i[m]+1:i[m]),i.length===1&&(i=[2,i[0]])),i.length!==2&&(i=p(i).newShape);const d=f(i);return i.length<=1&&d<=r?[1,d]:i.length===2&&i[0]<=r&&i[1]<=r?i:i.length===3&&i[0]*i[1]<=r&&i[2]<=r?[i[0]*i[1],i[2]]:i.length===3&&i[0]<=r&&i[1]*i[2]<=r?[i[0],i[1]*i[2]]:i.length===4&&i[0]*i[1]*i[2]<=r&&i[3]<=r?[i[0]*i[1]*i[2],i[3]]:i.length===4&&i[0]<=r&&i[1]*i[2]*i[3]<=r?[i[0],i[1]*i[2]*i[3]]:e?c(d/4).map(g=>2*g):c(d)}},n.squeezeShape=p,n.parseAxisParam=s,n.isInt=h,n.sizeFromShape=f,n.getRowsCols=function(o){if(o.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[o.length>1?o[o.length-2]:1,o[o.length-1]]},n.sizeToSquarishShape=c,n.getBatchDim=function(o,t=2){return f(o.slice(0,o.length-t))}},4057:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createTextureLayoutFromShape=n.calculateTextureWidthAndHeight=n.createTextureLayoutFromTextureType=void 0;const u=a(2517),l=a(2039);n.createTextureLayoutFromTextureType=(p,s,h)=>{const f=h===l.TextureType.unpacked||h===l.TextureType.unpackedReversed?1:4,c=h===l.TextureType.packed,o=h===l.TextureType.unpackedReversed||h===l.TextureType.packed,t=h===l.TextureType.packedLastDimension?s.length-1:void 0,e=h===l.TextureType.packedLastDimension?s.map((r,i)=>i===s.length-1?4*r:r):void 0;return(0,n.createTextureLayoutFromShape)(p,s,f,e,{isPacked:c,reverseWH:o,breakAxis:t})},n.calculateTextureWidthAndHeight=(p,s,h)=>{const f=(0,n.createTextureLayoutFromTextureType)(p,s,h);return[f.width,f.height]},n.createTextureLayoutFromShape=(p,s,h=1,f,c)=>{const o=!(!c||!c.isPacked),[t,e]=p.computeTextureWH(o&&f||s,c),r=s.length;let i=s.slice(0);if(r===0&&(i=[1]),h===1)f=s;else if(o){if(h!==4)throw new Error("a packed texture must be 4-channel");f=s,r>0&&(i[r-1]=Math.ceil(i[r-1]/2)),r>1&&(i[r-2]=Math.ceil(i[r-2]/2))}else if(!f)throw new Error("Unpacked shape is needed when using channels > 1");return{width:t,height:e,channels:h,isPacked:o,shape:i,strides:u.ShapeUtil.computeStrides(i),unpackedShape:f,reversedWH:c&&c.reverseWH}}},5702:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.TextureManager=void 0;const u=a(6231);n.TextureManager=class{constructor(l,p,s,h){this.glContext=l,this.layoutStrategy=p,this.profiler=s,this.config=h,this.pendingRead=new Map,h.reuseTextures&&(this.inUseTextures=new Map,this.idleTextures=new Map,this.textureLookup=new Map)}createTextureFromLayout(l,p,s,h){const f=this.toEncoderType(l),c=this.glContext.getEncoder(f,p.channels||1,h);if(p.isPacked&&h===1)throw new Error("not implemented");const o=p.width,t=p.height;let e,r;if(this.config.reuseTextures){e=`${o}x${t}_${c.format}_${c.internalFormat}_${c.textureType}`,r=this.inUseTextures.get(e),r||(r=[],this.inUseTextures.set(e,r));const d=this.idleTextures.get(e);if(d&&d.length>0){const g=d.pop();return r.push(g),h===1&&this.glContext.updateTexture(g,o,t,c,this.toTextureData(l,s)),g}}u.Logger.verbose("TextureManager",`Creating new texture of size ${p.width}x${p.height}`);const i=this.glContext.allocateTexture(o,t,c,this.toTextureData(l,s));return this.config.reuseTextures&&(r.push(i),this.textureLookup.set(i,e)),i}readTexture(l,p,s){return s||(s=1),this.profiler.event("backend","TextureManager.readTexture",()=>{const h=l.shape.reduce((c,o)=>c*o)*s,f=this.glContext.readTexture(l.texture,l.width,l.height,h,this.toEncoderType(p),s);return this.toTensorData(p,f)})}async readTextureAsync(l,p,s){const h=l.tensor.dataId;if(s||(s=1),this.pendingRead.has(h)){const f=this.pendingRead.get(h);return new Promise(c=>f==null?void 0:f.push(c))}return this.profiler.event("backend","TextureManager.readTextureAsync",async()=>{this.pendingRead.set(h,[]);const f=l.shape.reduce((e,r)=>e*r)*s;await this.glContext.createAndWaitForFence();const c=this.glContext.readTexture(l.texture,l.width,l.height,f,this.toEncoderType(p),s),o=this.toTensorData(p,c),t=this.pendingRead.get(h);return this.pendingRead.delete(h),t==null||t.forEach(e=>e(o)),o})}readUint8TextureAsFloat(l){return this.profiler.event("backend","TextureManager.readUint8TextureAsFloat",()=>{const p=l.shape.reduce((h,f)=>h*f),s=this.glContext.readTexture(l.texture,l.width,l.height,4*p,"byte",4);return new Float32Array(s.buffer,s.byteOffset,p)})}releaseTexture(l,p){let s;if(this.config.reuseTextures&&(s=this.textureLookup.get(l.texture),s)){p&&this.textureLookup.delete(s);const h=this.inUseTextures.get(s);if(h){const f=h.indexOf(l.texture);if(f!==-1){h.splice(f,1);let c=this.idleTextures.get(s);c||(c=[],this.idleTextures.set(s,c)),c.push(l.texture)}}}s&&!p||(u.Logger.verbose("TextureManager",`Deleting texture of size ${l.width}x${l.height}`),this.glContext.deleteTexture(l.texture))}toTensorData(l,p){switch(l){case"int16":return p instanceof Int16Array?p:Int16Array.from(p);case"int32":return p instanceof Int32Array?p:Int32Array.from(p);case"int8":return p instanceof Int8Array?p:Int8Array.from(p);case"uint16":return p instanceof Uint16Array?p:Uint16Array.from(p);case"uint32":return p instanceof Uint32Array?p:Uint32Array.from(p);case"uint8":case"bool":return p instanceof Uint8Array?p:Uint8Array.from(p);case"float32":return p instanceof Float32Array?p:Float32Array.from(p);case"float64":return p instanceof Float64Array?p:Float64Array.from(p);default:throw new Error(`TensorData type ${l} is not supported`)}}toTextureData(l,p){if(p)return p instanceof Float32Array?p:new Float32Array(p)}toEncoderType(l){return"float"}clearActiveTextures(){this.glContext.clearActiveTextures()}}},2039:(_,n)=>{var a;Object.defineProperty(n,"__esModule",{value:!0}),n.TextureType=void 0,(a=n.TextureType||(n.TextureType={}))[a.unpacked=0]="unpacked",a[a.unpackedReversed=1]="unpackedReversed",a[a.packed=2]="packed",a[a.downloadUint8AsFloat=3]="downloadUint8AsFloat",a[a.packedLastDimension=4]="packedLastDimension"},9390:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.getGlChannels=n.getCoordsDataType=n.getSqueezedParams=n.squeezeInputShape=n.generateShaderFuncNameFromInputSamplerNameAtOutCoords=n.generateShaderFuncNameFromInputSamplerName=n.repeatedTry=n.getPackedShape=void 0;const u=a(2517);n.getPackedShape=function(l){const p=l.length;return l.slice(0,p-1).concat(l[p-1]/4)},n.repeatedTry=async function(l,p=h=>0,s){return new Promise((h,f)=>{let c=0;const o=()=>{if(l())return void h();c++;const t=p(c);s!=null&&c>=s?f():setTimeout(o,t)};o()})},n.generateShaderFuncNameFromInputSamplerName=function(l){return(0,u.assert)(l!==void 0&&l.length!==0,()=>"empty string found for sampler name"),"get"+l.charAt(0).toUpperCase()+l.slice(1)},n.generateShaderFuncNameFromInputSamplerNameAtOutCoords=function(l){return(0,u.assert)(l!==void 0&&l.length!==0,()=>"empty string found for sampler name"),"get"+l.charAt(0).toUpperCase()+l.slice(1)+"AtOutCoords"},n.squeezeInputShape=function(l,p){let s=JSON.parse(JSON.stringify(l));return s=p,s},n.getSqueezedParams=function(l,p){return p.map(s=>l[s]).join(", ")},n.getCoordsDataType=function(l){if(l<=1)return"int";if(l===2)return"ivec2";if(l===3)return"ivec3";if(l===4)return"ivec4";if(l===5)return"ivec5";if(l===6)return"ivec6";throw Error(`GPU for rank ${l} is not yet supported`)},n.getGlChannels=function(l=6){return["x","y","z","w","u","v"].slice(0,l)}},7305:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createNewWebGLContext=n.createWebGLContext=void 0;const u=a(6231),l=a(1713),p={};function s(h){const f=function(){if(typeof document>"u"){if(typeof OffscreenCanvas>"u")throw new TypeError("failed to create canvas: OffscreenCanvas is not supported");return new OffscreenCanvas(1,1)}const t=document.createElement("canvas");return t.width=1,t.height=1,t}();let c;const o={alpha:!1,depth:!1,antialias:!1,stencil:!1,preserveDrawingBuffer:!1,premultipliedAlpha:!1,failIfMajorPerformanceCaveat:!1};if((!h||h==="webgl2")&&(c=f.getContext("webgl2",o),c))try{return new l.WebGLContext(c,2)}catch(t){u.Logger.warning("GlContextFactory",`failed to create WebGLContext using contextId 'webgl2'. Error: ${t}`)}if((!h||h==="webgl")&&(c=f.getContext("webgl",o)||f.getContext("experimental-webgl",o),c))try{return new l.WebGLContext(c,1)}catch(t){u.Logger.warning("GlContextFactory",`failed to create WebGLContext using contextId 'webgl' or 'experimental-webgl'. Error: ${t}`)}throw new Error("WebGL is not supported")}n.createWebGLContext=function h(f){let c;f&&f!=="webgl2"||!("webgl2"in p)?f&&f!=="webgl"||!("webgl"in p)||(c=p.webgl):c=p.webgl2,c=c||s(f),f=f||c.version===1?"webgl":"webgl2";const o=c.gl;return p[f]=c,o.isContextLost()?(delete p[f],h(f)):(o.disable(o.DEPTH_TEST),o.disable(o.STENCIL_TEST),o.disable(o.BLEND),o.disable(o.DITHER),o.disable(o.POLYGON_OFFSET_FILL),o.disable(o.SAMPLE_COVERAGE),o.enable(o.SCISSOR_TEST),o.enable(o.CULL_FACE),o.cullFace(o.BACK),c)},n.createNewWebGLContext=s},1713:function(_,n,a){var u=this&&this.__createBinding||(Object.create?function(o,t,e,r){r===void 0&&(r=e);var i=Object.getOwnPropertyDescriptor(t,e);i&&!("get"in i?!t.__esModule:i.writable||i.configurable)||(i={enumerable:!0,get:function(){return t[e]}}),Object.defineProperty(o,r,i)}:function(o,t,e,r){r===void 0&&(r=e),o[r]=t[e]}),l=this&&this.__setModuleDefault||(Object.create?function(o,t){Object.defineProperty(o,"default",{enumerable:!0,value:t})}:function(o,t){o.default=t}),p=this&&this.__importStar||function(o){if(o&&o.__esModule)return o;var t={};if(o!=null)for(var e in o)e!=="default"&&Object.prototype.hasOwnProperty.call(o,e)&&u(t,o,e);return l(t,o),t};Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLContext=n.linearSearchLastTrue=void 0;const s=a(1670),h=p(a(7769)),f=a(9390);function c(o){let t=0;for(;tthis.isTimerResultAvailable(o)),this.getTimerResult(o)}async createAndWaitForFence(){const o=this.createFence(this.gl);return this.pollFence(o)}createFence(o){let t;const e=o,r=e.fenceSync(e.SYNC_GPU_COMMANDS_COMPLETE,0);return o.flush(),t=r===null?()=>!0:()=>{const i=e.clientWaitSync(r,0,0);return i===e.ALREADY_SIGNALED||i===e.CONDITION_SATISFIED},{query:r,isFencePassed:t}}async pollFence(o){return new Promise(t=>{this.addItemToPoll(()=>o.isFencePassed(),()=>t())})}pollItems(){const o=c(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=o;++t){const{resolveFn:e}=this.itemsToPoll[t];e()}this.itemsToPoll=this.itemsToPoll.slice(o+1)}async addItemToPoll(o,t){this.itemsToPoll.push({isDoneFn:o,resolveFn:t}),this.itemsToPoll.length>1||await(0,f.repeatedTry)(()=>(this.pollItems(),this.itemsToPoll.length===0))}}},1036:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.ExecutionPlan=void 0;const u=a(6231);class l{constructor(s,h){this.op=s,this.node=h}}n.ExecutionPlan=class{constructor(p,s,h){this.graph=p,this.profiler=h,this.initialize(s)}initialize(p){this.profiler.event("session","ExecutionPlan.initialize",()=>{const s=this.graph.getNodes();if(s.length!==p.length)throw new Error("The size of nodes and OPs do not match.");this._ops=p.map((h,f)=>new l(h,s[f])),this.reset(),this._starter=[],this._ops.forEach((h,f)=>{let c=!0;for(const o of h.node.inputs)if(!this._values[o]&&this.graph.getInputIndices().indexOf(o)===-1){c=!1;break}c&&this._starter.push(f)})})}reset(){this._values=this.graph.getValues().map(p=>p.tensor)}async execute(p,s){return this.profiler.event("session","ExecutionPlan.execute",async()=>{this.reset();const h=p.createInferenceHandler(),f=this.graph.getInputIndices();if(s.length!==f.length)throw new Error(`number of input tensors don't match the number of inputs to the model: actual: ${s.length} expected: ${f.length}`);s.forEach((i,d)=>{const g=f[d];this._values[g]=i});const c=this._starter.slice(0),o=this.graph.getValues(),t=this.graph.getNodes();let e=0;for(;ethis._values[w]);if(g.indexOf(void 0)!==-1)throw new Error(`unresolved input detected: op: ${d.node}`);const m=g;u.Logger.verbose("ExecPlan",`Runing op:${d.node.name} (${m.map((w,v)=>`'${d.node.inputs[v]}': ${w.type}[${w.dims.join(",")}]`).join(", ")})`);const b=await this.profiler.event("node",d.node.name,async()=>d.op.impl(h,m,d.op.context));if(b.length!==d.node.outputs.length)throw new Error("the size of output does not match model definition.");b.forEach((w,v)=>{const S=d.node.outputs[v];if(this._values[S])throw new Error(`output [${S}] already has value: op:${d.node.name}`);this._values[S]=w});const y=new Set;b.forEach((w,v)=>{const S=d.node.outputs[v];for(const A of o[S].to){const O=t[A];let x=!0;for(const I of O.inputs)if(!this._values[I]){x=!1;break}x&&y.add(A)}}),c.push(...y)}const r=[];for(let i=0;i{Object.defineProperty(n,"__esModule",{value:!0}),n.Graph=void 0;const u=a(1446),l=a(7778),p=a(9395),s=a(9162),h=a(2517);var f=p.onnxruntime.experimental.fbs;n.Graph={from:(e,r)=>new t(e,r)};class c{constructor(r){this._from=void 0,this._to=[],this.tensor=void 0,this.type=void 0,r&&(this.type=h.ProtoUtil.tensorValueTypeFromProto(r.type.tensorType))}get from(){return this._from}get to(){return this._to}}class o{constructor(r,i){r instanceof u.onnx.NodeProto?(this.name=r.name,this.opType=r.opType,this.attributes=new l.Attribute(r.attribute)):r instanceof f.Node&&(this.name=i??r.name(),this.opType=r.opType(),this.attributes=new l.Attribute(h.ProtoUtil.tensorAttributesFromORTFormat(r))),this.inputs=[],this.outputs=[],this.executeNode=!0}}class t{constructor(r,i){if(!r)throw new TypeError("graph is empty");this.buildGraph(r),this.transformGraph(i),this.checkIsAcyclic()}getInputIndices(){return this._allInputIndices}getInputNames(){return this._allInputNames}getOutputIndices(){return this._allOutputIndices}getOutputNames(){return this._allOutputNames}getValues(){return this._allData}getNodes(){return this._nodes}buildGraph(r){if(r instanceof u.onnx.GraphProto)this.buildGraphFromOnnxFormat(r);else{if(!(r instanceof f.Graph))throw new TypeError("Graph type is not supported.");this.buildGraphFromOrtFormat(r)}}buildGraphFromOnnxFormat(r){const i=new Map;this._allData=[],this._allInputIndices=[],this._allInputNames=[],this._allOutputIndices=[],this._allOutputNames=[],this._nodes=[];const d=new Map;if(!r.input)throw new Error("missing information in graph: input");const g=[];for(const m of r.input){if(i.has(m.name))throw new Error(`duplicated input name: ${m.name}`);const b=this._allData.push(new c(m))-1;i.set(m.name,b),g.push(m.name)}if(!r.initializer)throw new Error("missing information in graph: initializer");for(const m of r.initializer){let b=i.get(m.name);if(b===void 0){const y=new c;y.type={shape:{dims:h.ProtoUtil.tensorDimsFromProto(m.dims)},tensorType:h.ProtoUtil.tensorDataTypeFromProto(m.dataType)},b=this._allData.push(y)-1,i.set(m.name,b)}this._allData[b]._from=-1,this._allData[b].tensor=s.Tensor.fromProto(m)}for(let m=0;m{this._allData[g]._to.forEach(m=>{r.add(m)})});const i=Array.from(r),d=new Array(this._nodes.length).fill("white");for(;i.length>0;){const g=i.pop();d[g]==="gray"?d[g]="black":(i.push(g),d[g]="gray",this._nodes[g].outputs.forEach(m=>{const b=this._allData[m];if(b.tensor!==void 0)throw new Error("node outputs should not be initialized");if(b._from!==g)throw new Error("from property of the Value object doesn't match index of Node being processed");b._to.forEach(y=>{if(d[y]==="gray")throw new Error("model graph is cyclic");d[y]==="white"&&i.push(y)})}))}}transformGraph(r){this.removeAllIdentityNodes(),this.removeAllDropoutNodes(),this.fuseConvActivationNodes(),r&&r.transformGraph(this),this.finalizeGraph()}finalizeGraph(){let r=0;for(let i=0;i0&&(this._nodes[i].inputs.forEach(d=>{const g=this._allData[d]._to.indexOf(i+r);g!==-1&&(this._allData[d]._to[g]=i)}),this._nodes[i].outputs.forEach(d=>{this._allData[d]._from&&this._allData[d]._from===i+r&&(this._allData[d]._from=i)})):(r++,this._nodes[i].outputs.forEach(d=>{this._allData[d]._from=-2}),this._nodes.splice(i,1),i--);r=0;for(let i=0;i0){let d=-1;this._allData[i].from!==void 0&&this._allData[i].from!==-1?(d=this._nodes[this._allData[i].from].outputs.indexOf(i+r),d!==-1&&(this._nodes[this._allData[i].from].outputs[d]=i)):(d=this._allInputIndices.indexOf(i+r),d!==-1&&(this._allInputIndices[d]=i)),this._allData[i].to.forEach(g=>{d=this._nodes[g].inputs.indexOf(i+r),d!==-1&&(this._nodes[g].inputs[d]=i)}),this._allData[i].to.length===0&&(d=this._allOutputIndices.indexOf(i+r),d!==-1&&(this._allOutputIndices[d]=i))}}else r++,this._allData.splice(i,1),i--}deleteNode(r){const i=this._nodes[r];if(i.outputs.length>1){for(let w=1;w0)throw new Error("Node deletion with more than one output connected to other nodes is not supported. ")}i.executeNode=!1;const d=i.inputs[0],g=i.outputs[0],m=this._allData[g].to,b=this._allData[d].to.indexOf(r);if(b===-1)throw new Error("The Value object doesn't have the current Node in it's 'to' property ");this._allData[d].to.splice(b,1),this._allData[g]._to=[];const y=this._allOutputIndices.indexOf(g);if(y!==-1&&(this._allOutputIndices[y]=d),m&&m.length>0)for(const w of m){const v=this._nodes[w].inputs.indexOf(g);if(v===-1)throw new Error("The Node object doesn't have the output Value in it's 'inputs' property ");this._nodes[w].inputs[v]=d,this._allData[d].to.push(w)}}removeAllDropoutNodes(){let r=0;for(const i of this._nodes){if(i.opType==="Dropout"){if(i.inputs.length!==1)throw new Error("Dropout nodes should only contain one input. ");if(i.outputs.length!==1&&i.outputs.length!==2)throw new Error("Dropout nodes should contain either 1 or 2 output(s)");if(i.outputs.length===2&&this._allData[i.outputs[1]]._to.length!==0)throw new Error("Dropout nodes's second output should not be referenced by other nodes");this.deleteNode(r)}r++}}removeAllIdentityNodes(){let r=0;for(const i of this._nodes)i.opType==="Identity"&&this.deleteNode(r),r++}isActivation(r){switch(r.opType){case"Relu":case"Sigmoid":case"Clip":return!0;default:return!1}}fuseConvActivationNodes(){for(const r of this._nodes)if(r.opType==="Conv"){const i=this._allData[r.outputs[0]]._to;if(i.length===1&&this.isActivation(this._nodes[i[0]])){const d=this._nodes[i[0]];if(d.opType==="Clip")if(d.inputs.length===1)try{r.attributes.set("activation_params","floats",[d.attributes.getFloat("min"),d.attributes.getFloat("max")])}catch{r.attributes.set("activation_params","floats",[h.MIN_CLIP,h.MAX_CLIP])}else{if(!(d.inputs.length>=3&&this._allData[d.inputs[1]].tensor!==void 0&&this._allData[d.inputs[2]].tensor!==void 0))continue;r.attributes.set("activation_params","floats",[this._allData[d.inputs[1]].tensor.floatData[0],this._allData[d.inputs[2]].tensor.floatData[0]])}r.attributes.set("activation","string",d.opType),this.deleteNode(i[0])}}}}},6231:(_,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.now=n.Profiler=n.Logger=void 0;const a={verbose:1e3,info:2e3,warning:4e3,error:5e3,fatal:6e3},u={none:new class{log(o,t,e){}},console:new class{log(o,t,e){console.log(`${this.color(o)} ${e?"\x1B[35m"+e+"\x1B[0m ":""}${t}`)}color(o){switch(o){case"verbose":return"\x1B[34;40mv\x1B[0m";case"info":return"\x1B[32mi\x1B[0m";case"warning":return"\x1B[30;43mw\x1B[0m";case"error":return"\x1B[31;40me\x1B[0m";case"fatal":return"\x1B[101mf\x1B[0m";default:throw new Error(`unsupported severity: ${o}`)}}}},l={provider:"console",minimalSeverity:"warning",logDateTime:!0,logSourceLocation:!1};let p={"":l};function s(o,t,e,r){if(t===void 0)return i=o,{verbose:s.verbose.bind(null,i),info:s.info.bind(null,i),warning:s.warning.bind(null,i),error:s.error.bind(null,i),fatal:s.fatal.bind(null,i)};if(e===void 0)h(o,t);else if(typeof e=="number"&&r===void 0)h(o,t);else if(typeof e=="string"&&r===void 0)h(o,e,0,t);else{if(typeof e!="string"||typeof r!="number")throw new TypeError("input is valid");h(o,e,0,t)}var i}function h(o,t,e,r){const i=p[r||""]||p[""];a[o]{g.then(async y=>{i&&await i.end(),m(y)},async y=>{i&&await i.end(),b(y)})});if(!d&&i){const m=i.end();if(m&&typeof m.then=="function")return new Promise((b,y)=>{m.then(()=>{b(g)},w=>{y(w)})})}return g}begin(o,t,e){if(!this._started)throw new Error("profiler is not started yet");if(e===void 0){const r=(0,n.now)();return this.flush(r),new f(o,t,r,i=>this.endSync(i))}{const r=e.beginTimer();return new f(o,t,0,async i=>this.end(i),r,e)}}async end(o){const t=await o.checkTimer();this._timingEvents.length=this._flushBatchSize||o-this._flushTime>=this._flushIntervalInMilliseconds){for(const t=this._flushPointer;this._flushPointerperformance.now():Date.now},2644:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Model=void 0;const u=a(5686),l=a(1446),p=a(7070),s=a(9395),h=a(2517);var f=s.onnxruntime.experimental.fbs;n.Model=class{constructor(){}load(c,o,t){if(!t)try{return void this.loadFromOnnxFormat(c,o)}catch(e){if(t!==void 0)throw e}this.loadFromOrtFormat(c,o)}loadFromOnnxFormat(c,o){const t=l.onnx.ModelProto.decode(c);if(h.LongUtil.longToNumber(t.irVersion)<3)throw new Error("only support ONNX model with IR_VERSION>=3");this._opsets=t.opsetImport.map(e=>({domain:e.domain,version:h.LongUtil.longToNumber(e.version)})),this._graph=p.Graph.from(t.graph,o)}loadFromOrtFormat(c,o){const t=new u.flatbuffers.ByteBuffer(c),e=f.InferenceSession.getRootAsInferenceSession(t).model();if(h.LongUtil.longToNumber(e.irVersion())<3)throw new Error("only support ONNX model with IR_VERSION>=3");this._opsets=[];for(let r=0;r{Object.defineProperty(n,"__esModule",{value:!0}),n.FLOAT_TYPES=n.INT_TYPES=n.NUMBER_TYPES=void 0,n.NUMBER_TYPES=["float32","float64","int32","int16","int8","uint16","uint32","uint8"],n.INT_TYPES=["int32","int16","int8","uint16","uint32","uint8"],n.FLOAT_TYPES=["float32","float64"]},1047:(_,n)=>{function a(u,l){if(l.endsWith("+")){const p=Number.parseInt(l.substring(0,l.length-1),10);return!isNaN(p)&&p<=u}if(l.split("-").length===2){const p=l.split("-"),s=Number.parseInt(p[0],10),h=Number.parseInt(p[1],10);return!isNaN(s)&&!isNaN(h)&&s<=u&&u<=h}return Number.parseInt(l,10)===u}Object.defineProperty(n,"__esModule",{value:!0}),n.resolveOperator=void 0,n.resolveOperator=function(u,l,p){for(const s of p){const h=s[0],f=s[1],c=s[2],o=s[3],t=s[4];if(u.opType===h){for(const e of l)if((e.domain===f||e.domain==="ai.onnx"&&f==="")&&a(e.version,c))return{opImpl:o,opInit:t}}}throw new TypeError(`cannot resolve operator '${u.opType}' with opsets: ${l.map(s=>`${s.domain||"ai.onnx"} v${s.version}`).join(", ")}`)}},9395:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.onnxruntime=void 0;const u=a(5686);var l,p;l=n.onnxruntime||(n.onnxruntime={}),function(s){(function(h){h[h.UNDEFINED=0]="UNDEFINED",h[h.FLOAT=1]="FLOAT",h[h.INT=2]="INT",h[h.STRING=3]="STRING",h[h.TENSOR=4]="TENSOR",h[h.GRAPH=5]="GRAPH",h[h.FLOATS=6]="FLOATS",h[h.INTS=7]="INTS",h[h.STRINGS=8]="STRINGS",h[h.TENSORS=9]="TENSORS",h[h.GRAPHS=10]="GRAPHS",h[h.SPARSE_TENSOR=11]="SPARSE_TENSOR",h[h.SPARSE_TENSORS=12]="SPARSE_TENSORS"})(s.AttributeType||(s.AttributeType={}))}((p=l.experimental||(l.experimental={})).fbs||(p.fbs={})),function(s){(function(h){(function(f){(function(c){c[c.UNKNOWN=0]="UNKNOWN",c[c.VALUE=1]="VALUE",c[c.PARAM=2]="PARAM"})(f.DimensionValueType||(f.DimensionValueType={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){(function(c){c[c.UNDEFINED=0]="UNDEFINED",c[c.FLOAT=1]="FLOAT",c[c.UINT8=2]="UINT8",c[c.INT8=3]="INT8",c[c.UINT16=4]="UINT16",c[c.INT16=5]="INT16",c[c.INT32=6]="INT32",c[c.INT64=7]="INT64",c[c.STRING=8]="STRING",c[c.BOOL=9]="BOOL",c[c.FLOAT16=10]="FLOAT16",c[c.DOUBLE=11]="DOUBLE",c[c.UINT32=12]="UINT32",c[c.UINT64=13]="UINT64",c[c.COMPLEX64=14]="COMPLEX64",c[c.COMPLEX128=15]="COMPLEX128",c[c.BFLOAT16=16]="BFLOAT16"})(f.TensorDataType||(f.TensorDataType={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){(function(c){c[c.Primitive=0]="Primitive",c[c.Fused=1]="Fused"})(f.NodeType||(f.NodeType={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){(function(c){c[c.NONE=0]="NONE",c[c.tensor_type=1]="tensor_type",c[c.sequence_type=2]="sequence_type",c[c.map_type=3]="map_type"})(f.TypeInfoValue||(f.TypeInfoValue={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsShape(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsShape(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}dim(t,e){let r=this.bb.__offset(this.bb_pos,4);return r?(e||new s.experimental.fbs.Dimension).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}dimLength(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.__vector_len(this.bb_pos+t):0}static startShape(t){t.startObject(1)}static addDim(t,e){t.addFieldOffset(0,e,0)}static createDimVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startDimVector(t,e){t.startVector(4,e,4)}static endShape(t){return t.endObject()}static createShape(t,e){return c.startShape(t),c.addDim(t,e),c.endShape(t)}}f.Shape=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsDimension(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsDimension(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}value(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.DimensionValue).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}denotation(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}static startDimension(t){t.startObject(2)}static addValue(t,e){t.addFieldOffset(0,e,0)}static addDenotation(t,e){t.addFieldOffset(1,e,0)}static endDimension(t){return t.endObject()}static createDimension(t,e,r){return c.startDimension(t),c.addValue(t,e),c.addDenotation(t,r),c.endDimension(t)}}f.Dimension=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsDimensionValue(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsDimensionValue(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}dimType(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt8(this.bb_pos+t):s.experimental.fbs.DimensionValueType.UNKNOWN}dimValue(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}dimParam(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__string(this.bb_pos+e,t):null}static startDimensionValue(t){t.startObject(3)}static addDimType(t,e){t.addFieldInt8(0,e,s.experimental.fbs.DimensionValueType.UNKNOWN)}static addDimValue(t,e){t.addFieldInt64(1,e,t.createLong(0,0))}static addDimParam(t,e){t.addFieldOffset(2,e,0)}static endDimensionValue(t){return t.endObject()}static createDimensionValue(t,e,r,i){return c.startDimensionValue(t),c.addDimType(t,e),c.addDimValue(t,r),c.addDimParam(t,i),c.endDimensionValue(t)}}f.DimensionValue=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsTensorTypeAndShape(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsTensorTypeAndShape(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}elemType(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.TensorDataType.UNDEFINED}shape(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.Shape).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startTensorTypeAndShape(t){t.startObject(2)}static addElemType(t,e){t.addFieldInt32(0,e,s.experimental.fbs.TensorDataType.UNDEFINED)}static addShape(t,e){t.addFieldOffset(1,e,0)}static endTensorTypeAndShape(t){return t.endObject()}static createTensorTypeAndShape(t,e,r){return c.startTensorTypeAndShape(t),c.addElemType(t,e),c.addShape(t,r),c.endTensorTypeAndShape(t)}}f.TensorTypeAndShape=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsMapType(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsMapType(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}keyType(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.TensorDataType.UNDEFINED}valueType(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startMapType(t){t.startObject(2)}static addKeyType(t,e){t.addFieldInt32(0,e,s.experimental.fbs.TensorDataType.UNDEFINED)}static addValueType(t,e){t.addFieldOffset(1,e,0)}static endMapType(t){return t.endObject()}static createMapType(t,e,r){return c.startMapType(t),c.addKeyType(t,e),c.addValueType(t,r),c.endMapType(t)}}f.MapType=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSequenceType(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSequenceType(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}elemType(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startSequenceType(t){t.startObject(1)}static addElemType(t,e){t.addFieldOffset(0,e,0)}static endSequenceType(t){return t.endObject()}static createSequenceType(t,e){return c.startSequenceType(t),c.addElemType(t,e),c.endSequenceType(t)}}f.SequenceType=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(h.fbs||(h.fbs={})).EdgeEnd=class{constructor(){this.bb=null,this.bb_pos=0}__init(f,c){return this.bb_pos=f,this.bb=c,this}nodeIndex(){return this.bb.readUint32(this.bb_pos)}srcArgIndex(){return this.bb.readInt32(this.bb_pos+4)}dstArgIndex(){return this.bb.readInt32(this.bb_pos+8)}static createEdgeEnd(f,c,o,t){return f.prep(4,12),f.writeInt32(t),f.writeInt32(o),f.writeInt32(c),f.offset()}}})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsNodeEdge(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsNodeEdge(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}nodeIndex(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readUint32(this.bb_pos+t):0}inputEdges(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.EdgeEnd).__init(this.bb.__vector(this.bb_pos+r)+12*t,this.bb):null}inputEdgesLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}outputEdges(t,e){let r=this.bb.__offset(this.bb_pos,8);return r?(e||new s.experimental.fbs.EdgeEnd).__init(this.bb.__vector(this.bb_pos+r)+12*t,this.bb):null}outputEdgesLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}static startNodeEdge(t){t.startObject(3)}static addNodeIndex(t,e){t.addFieldInt32(0,e,0)}static addInputEdges(t,e){t.addFieldOffset(1,e,0)}static startInputEdgesVector(t,e){t.startVector(12,e,4)}static addOutputEdges(t,e){t.addFieldOffset(2,e,0)}static startOutputEdgesVector(t,e){t.startVector(12,e,4)}static endNodeEdge(t){return t.endObject()}static createNodeEdge(t,e,r,i){return c.startNodeEdge(t),c.addNodeIndex(t,e),c.addInputEdges(t,r),c.addOutputEdges(t,i),c.endNodeEdge(t)}}f.NodeEdge=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsNode(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsNode(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}domain(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__string(this.bb_pos+e,t):null}sinceVersion(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readInt32(this.bb_pos+t):0}index(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.readUint32(this.bb_pos+t):0}opType(t){let e=this.bb.__offset(this.bb_pos,14);return e?this.bb.__string(this.bb_pos+e,t):null}type(){let t=this.bb.__offset(this.bb_pos,16);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.NodeType.Primitive}executionProviderType(t){let e=this.bb.__offset(this.bb_pos,18);return e?this.bb.__string(this.bb_pos+e,t):null}inputs(t,e){let r=this.bb.__offset(this.bb_pos,20);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}inputsLength(){let t=this.bb.__offset(this.bb_pos,20);return t?this.bb.__vector_len(this.bb_pos+t):0}outputs(t,e){let r=this.bb.__offset(this.bb_pos,22);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}outputsLength(){let t=this.bb.__offset(this.bb_pos,22);return t?this.bb.__vector_len(this.bb_pos+t):0}attributes(t,e){let r=this.bb.__offset(this.bb_pos,24);return r?(e||new s.experimental.fbs.Attribute).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}attributesLength(){let t=this.bb.__offset(this.bb_pos,24);return t?this.bb.__vector_len(this.bb_pos+t):0}inputArgCounts(t){let e=this.bb.__offset(this.bb_pos,26);return e?this.bb.readInt32(this.bb.__vector(this.bb_pos+e)+4*t):0}inputArgCountsLength(){let t=this.bb.__offset(this.bb_pos,26);return t?this.bb.__vector_len(this.bb_pos+t):0}inputArgCountsArray(){let t=this.bb.__offset(this.bb_pos,26);return t?new Int32Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}implicitInputs(t,e){let r=this.bb.__offset(this.bb_pos,28);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}implicitInputsLength(){let t=this.bb.__offset(this.bb_pos,28);return t?this.bb.__vector_len(this.bb_pos+t):0}static startNode(t){t.startObject(13)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addDomain(t,e){t.addFieldOffset(2,e,0)}static addSinceVersion(t,e){t.addFieldInt32(3,e,0)}static addIndex(t,e){t.addFieldInt32(4,e,0)}static addOpType(t,e){t.addFieldOffset(5,e,0)}static addType(t,e){t.addFieldInt32(6,e,s.experimental.fbs.NodeType.Primitive)}static addExecutionProviderType(t,e){t.addFieldOffset(7,e,0)}static addInputs(t,e){t.addFieldOffset(8,e,0)}static createInputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startInputsVector(t,e){t.startVector(4,e,4)}static addOutputs(t,e){t.addFieldOffset(9,e,0)}static createOutputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startOutputsVector(t,e){t.startVector(4,e,4)}static addAttributes(t,e){t.addFieldOffset(10,e,0)}static createAttributesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startAttributesVector(t,e){t.startVector(4,e,4)}static addInputArgCounts(t,e){t.addFieldOffset(11,e,0)}static createInputArgCountsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addInt32(e[r]);return t.endVector()}static startInputArgCountsVector(t,e){t.startVector(4,e,4)}static addImplicitInputs(t,e){t.addFieldOffset(12,e,0)}static createImplicitInputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startImplicitInputsVector(t,e){t.startVector(4,e,4)}static endNode(t){return t.endObject()}static createNode(t,e,r,i,d,g,m,b,y,w,v,S,A,O){return c.startNode(t),c.addName(t,e),c.addDocString(t,r),c.addDomain(t,i),c.addSinceVersion(t,d),c.addIndex(t,g),c.addOpType(t,m),c.addType(t,b),c.addExecutionProviderType(t,y),c.addInputs(t,w),c.addOutputs(t,v),c.addAttributes(t,S),c.addInputArgCounts(t,A),c.addImplicitInputs(t,O),c.endNode(t)}}f.Node=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsValueInfo(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsValueInfo(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}type(t){let e=this.bb.__offset(this.bb_pos,8);return e?(t||new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startValueInfo(t){t.startObject(3)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addType(t,e){t.addFieldOffset(2,e,0)}static endValueInfo(t){return t.endObject()}static createValueInfo(t,e,r,i){return c.startValueInfo(t),c.addName(t,e),c.addDocString(t,r),c.addType(t,i),c.endValueInfo(t)}}f.ValueInfo=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsTypeInfo(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsTypeInfo(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}denotation(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}valueType(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.readUint8(this.bb_pos+t):s.experimental.fbs.TypeInfoValue.NONE}value(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__union(t,this.bb_pos+e):null}static startTypeInfo(t){t.startObject(3)}static addDenotation(t,e){t.addFieldOffset(0,e,0)}static addValueType(t,e){t.addFieldInt8(1,e,s.experimental.fbs.TypeInfoValue.NONE)}static addValue(t,e){t.addFieldOffset(2,e,0)}static endTypeInfo(t){return t.endObject()}static createTypeInfo(t,e,r,i){return c.startTypeInfo(t),c.addDenotation(t,e),c.addValueType(t,r),c.addValue(t,i),c.endTypeInfo(t)}}f.TypeInfo=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsOperatorSetId(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsOperatorSetId(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}domain(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}version(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}static startOperatorSetId(t){t.startObject(2)}static addDomain(t,e){t.addFieldOffset(0,e,0)}static addVersion(t,e){t.addFieldInt64(1,e,t.createLong(0,0))}static endOperatorSetId(t){return t.endObject()}static createOperatorSetId(t,e,r){return c.startOperatorSetId(t),c.addDomain(t,e),c.addVersion(t,r),c.endOperatorSetId(t)}}f.OperatorSetId=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsTensor(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsTensor(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}dims(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.readInt64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}dimsLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}dataType(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.TensorDataType.UNDEFINED}rawData(t){let e=this.bb.__offset(this.bb_pos,12);return e?this.bb.readUint8(this.bb.__vector(this.bb_pos+e)+t):0}rawDataLength(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.__vector_len(this.bb_pos+t):0}rawDataArray(){let t=this.bb.__offset(this.bb_pos,12);return t?new Uint8Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}stringData(t,e){let r=this.bb.__offset(this.bb_pos,14);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}stringDataLength(){let t=this.bb.__offset(this.bb_pos,14);return t?this.bb.__vector_len(this.bb_pos+t):0}static startTensor(t){t.startObject(6)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addDims(t,e){t.addFieldOffset(2,e,0)}static createDimsVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startDimsVector(t,e){t.startVector(8,e,8)}static addDataType(t,e){t.addFieldInt32(3,e,s.experimental.fbs.TensorDataType.UNDEFINED)}static addRawData(t,e){t.addFieldOffset(4,e,0)}static createRawDataVector(t,e){t.startVector(1,e.length,1);for(let r=e.length-1;r>=0;r--)t.addInt8(e[r]);return t.endVector()}static startRawDataVector(t,e){t.startVector(1,e,1)}static addStringData(t,e){t.addFieldOffset(5,e,0)}static createStringDataVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startStringDataVector(t,e){t.startVector(4,e,4)}static endTensor(t){return t.endObject()}static createTensor(t,e,r,i,d,g,m){return c.startTensor(t),c.addName(t,e),c.addDocString(t,r),c.addDims(t,i),c.addDataType(t,d),c.addRawData(t,g),c.addStringData(t,m),c.endTensor(t)}}f.Tensor=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSparseTensor(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSparseTensor(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}values(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}indices(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}dims(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.readInt64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}dimsLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}static startSparseTensor(t){t.startObject(3)}static addValues(t,e){t.addFieldOffset(0,e,0)}static addIndices(t,e){t.addFieldOffset(1,e,0)}static addDims(t,e){t.addFieldOffset(2,e,0)}static createDimsVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startDimsVector(t,e){t.startVector(8,e,8)}static endSparseTensor(t){return t.endObject()}static createSparseTensor(t,e,r,i){return c.startSparseTensor(t),c.addValues(t,e),c.addIndices(t,r),c.addDims(t,i),c.endSparseTensor(t)}}f.SparseTensor=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsAttribute(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsAttribute(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}type(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.AttributeType.UNDEFINED}f(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readFloat32(this.bb_pos+t):0}i(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}s(t){let e=this.bb.__offset(this.bb_pos,14);return e?this.bb.__string(this.bb_pos+e,t):null}t(t){let e=this.bb.__offset(this.bb_pos,16);return e?(t||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}g(t){let e=this.bb.__offset(this.bb_pos,18);return e?(t||new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}floats(t){let e=this.bb.__offset(this.bb_pos,20);return e?this.bb.readFloat32(this.bb.__vector(this.bb_pos+e)+4*t):0}floatsLength(){let t=this.bb.__offset(this.bb_pos,20);return t?this.bb.__vector_len(this.bb_pos+t):0}floatsArray(){let t=this.bb.__offset(this.bb_pos,20);return t?new Float32Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}ints(t){let e=this.bb.__offset(this.bb_pos,22);return e?this.bb.readInt64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}intsLength(){let t=this.bb.__offset(this.bb_pos,22);return t?this.bb.__vector_len(this.bb_pos+t):0}strings(t,e){let r=this.bb.__offset(this.bb_pos,24);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}stringsLength(){let t=this.bb.__offset(this.bb_pos,24);return t?this.bb.__vector_len(this.bb_pos+t):0}tensors(t,e){let r=this.bb.__offset(this.bb_pos,26);return r?(e||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}tensorsLength(){let t=this.bb.__offset(this.bb_pos,26);return t?this.bb.__vector_len(this.bb_pos+t):0}graphs(t,e){let r=this.bb.__offset(this.bb_pos,28);return r?(e||new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}graphsLength(){let t=this.bb.__offset(this.bb_pos,28);return t?this.bb.__vector_len(this.bb_pos+t):0}static startAttribute(t){t.startObject(13)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addType(t,e){t.addFieldInt32(2,e,s.experimental.fbs.AttributeType.UNDEFINED)}static addF(t,e){t.addFieldFloat32(3,e,0)}static addI(t,e){t.addFieldInt64(4,e,t.createLong(0,0))}static addS(t,e){t.addFieldOffset(5,e,0)}static addT(t,e){t.addFieldOffset(6,e,0)}static addG(t,e){t.addFieldOffset(7,e,0)}static addFloats(t,e){t.addFieldOffset(8,e,0)}static createFloatsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addFloat32(e[r]);return t.endVector()}static startFloatsVector(t,e){t.startVector(4,e,4)}static addInts(t,e){t.addFieldOffset(9,e,0)}static createIntsVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startIntsVector(t,e){t.startVector(8,e,8)}static addStrings(t,e){t.addFieldOffset(10,e,0)}static createStringsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startStringsVector(t,e){t.startVector(4,e,4)}static addTensors(t,e){t.addFieldOffset(11,e,0)}static createTensorsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startTensorsVector(t,e){t.startVector(4,e,4)}static addGraphs(t,e){t.addFieldOffset(12,e,0)}static createGraphsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startGraphsVector(t,e){t.startVector(4,e,4)}static endAttribute(t){return t.endObject()}static createAttribute(t,e,r,i,d,g,m,b,y,w,v,S,A,O){return c.startAttribute(t),c.addName(t,e),c.addDocString(t,r),c.addType(t,i),c.addF(t,d),c.addI(t,g),c.addS(t,m),c.addT(t,b),c.addG(t,y),c.addFloats(t,w),c.addInts(t,v),c.addStrings(t,S),c.addTensors(t,A),c.addGraphs(t,O),c.endAttribute(t)}}f.Attribute=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsGraph(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsGraph(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}initializers(t,e){let r=this.bb.__offset(this.bb_pos,4);return r?(e||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}initializersLength(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.__vector_len(this.bb_pos+t):0}nodeArgs(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.ValueInfo).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}nodeArgsLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}nodes(t,e){let r=this.bb.__offset(this.bb_pos,8);return r?(e||new s.experimental.fbs.Node).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}nodesLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}maxNodeIndex(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readUint32(this.bb_pos+t):0}nodeEdges(t,e){let r=this.bb.__offset(this.bb_pos,12);return r?(e||new s.experimental.fbs.NodeEdge).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}nodeEdgesLength(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.__vector_len(this.bb_pos+t):0}inputs(t,e){let r=this.bb.__offset(this.bb_pos,14);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}inputsLength(){let t=this.bb.__offset(this.bb_pos,14);return t?this.bb.__vector_len(this.bb_pos+t):0}outputs(t,e){let r=this.bb.__offset(this.bb_pos,16);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}outputsLength(){let t=this.bb.__offset(this.bb_pos,16);return t?this.bb.__vector_len(this.bb_pos+t):0}sparseInitializers(t,e){let r=this.bb.__offset(this.bb_pos,18);return r?(e||new s.experimental.fbs.SparseTensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}sparseInitializersLength(){let t=this.bb.__offset(this.bb_pos,18);return t?this.bb.__vector_len(this.bb_pos+t):0}static startGraph(t){t.startObject(8)}static addInitializers(t,e){t.addFieldOffset(0,e,0)}static createInitializersVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startInitializersVector(t,e){t.startVector(4,e,4)}static addNodeArgs(t,e){t.addFieldOffset(1,e,0)}static createNodeArgsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startNodeArgsVector(t,e){t.startVector(4,e,4)}static addNodes(t,e){t.addFieldOffset(2,e,0)}static createNodesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startNodesVector(t,e){t.startVector(4,e,4)}static addMaxNodeIndex(t,e){t.addFieldInt32(3,e,0)}static addNodeEdges(t,e){t.addFieldOffset(4,e,0)}static createNodeEdgesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startNodeEdgesVector(t,e){t.startVector(4,e,4)}static addInputs(t,e){t.addFieldOffset(5,e,0)}static createInputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startInputsVector(t,e){t.startVector(4,e,4)}static addOutputs(t,e){t.addFieldOffset(6,e,0)}static createOutputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startOutputsVector(t,e){t.startVector(4,e,4)}static addSparseInitializers(t,e){t.addFieldOffset(7,e,0)}static createSparseInitializersVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startSparseInitializersVector(t,e){t.startVector(4,e,4)}static endGraph(t){return t.endObject()}static createGraph(t,e,r,i,d,g,m,b,y){return c.startGraph(t),c.addInitializers(t,e),c.addNodeArgs(t,r),c.addNodes(t,i),c.addMaxNodeIndex(t,d),c.addNodeEdges(t,g),c.addInputs(t,m),c.addOutputs(t,b),c.addSparseInitializers(t,y),c.endGraph(t)}}f.Graph=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsModel(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsModel(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}irVersion(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}opsetImport(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.OperatorSetId).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}opsetImportLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}producerName(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__string(this.bb_pos+e,t):null}producerVersion(t){let e=this.bb.__offset(this.bb_pos,10);return e?this.bb.__string(this.bb_pos+e,t):null}domain(t){let e=this.bb.__offset(this.bb_pos,12);return e?this.bb.__string(this.bb_pos+e,t):null}modelVersion(){let t=this.bb.__offset(this.bb_pos,14);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}docString(t){let e=this.bb.__offset(this.bb_pos,16);return e?this.bb.__string(this.bb_pos+e,t):null}graph(t){let e=this.bb.__offset(this.bb_pos,18);return e?(t||new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}graphDocString(t){let e=this.bb.__offset(this.bb_pos,20);return e?this.bb.__string(this.bb_pos+e,t):null}static startModel(t){t.startObject(9)}static addIrVersion(t,e){t.addFieldInt64(0,e,t.createLong(0,0))}static addOpsetImport(t,e){t.addFieldOffset(1,e,0)}static createOpsetImportVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startOpsetImportVector(t,e){t.startVector(4,e,4)}static addProducerName(t,e){t.addFieldOffset(2,e,0)}static addProducerVersion(t,e){t.addFieldOffset(3,e,0)}static addDomain(t,e){t.addFieldOffset(4,e,0)}static addModelVersion(t,e){t.addFieldInt64(5,e,t.createLong(0,0))}static addDocString(t,e){t.addFieldOffset(6,e,0)}static addGraph(t,e){t.addFieldOffset(7,e,0)}static addGraphDocString(t,e){t.addFieldOffset(8,e,0)}static endModel(t){return t.endObject()}static createModel(t,e,r,i,d,g,m,b,y,w){return c.startModel(t),c.addIrVersion(t,e),c.addOpsetImport(t,r),c.addProducerName(t,i),c.addProducerVersion(t,d),c.addDomain(t,g),c.addModelVersion(t,m),c.addDocString(t,b),c.addGraph(t,y),c.addGraphDocString(t,w),c.endModel(t)}}f.Model=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsKernelCreateInfos(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsKernelCreateInfos(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}nodeIndices(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.readUint32(this.bb.__vector(this.bb_pos+e)+4*t):0}nodeIndicesLength(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.__vector_len(this.bb_pos+t):0}nodeIndicesArray(){let t=this.bb.__offset(this.bb_pos,4);return t?new Uint32Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}kernelDefHashes(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.readUint64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}kernelDefHashesLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}static startKernelCreateInfos(t){t.startObject(2)}static addNodeIndices(t,e){t.addFieldOffset(0,e,0)}static createNodeIndicesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addInt32(e[r]);return t.endVector()}static startNodeIndicesVector(t,e){t.startVector(4,e,4)}static addKernelDefHashes(t,e){t.addFieldOffset(1,e,0)}static createKernelDefHashesVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startKernelDefHashesVector(t,e){t.startVector(8,e,8)}static endKernelCreateInfos(t){return t.endObject()}static createKernelCreateInfos(t,e,r){return c.startKernelCreateInfos(t),c.addNodeIndices(t,e),c.addKernelDefHashes(t,r),c.endKernelCreateInfos(t)}}f.KernelCreateInfos=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSubGraphSessionState(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSubGraphSessionState(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}graphId(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}sessionState(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.SessionState).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startSubGraphSessionState(t){t.startObject(2)}static addGraphId(t,e){t.addFieldOffset(0,e,0)}static addSessionState(t,e){t.addFieldOffset(1,e,0)}static endSubGraphSessionState(t){let e=t.endObject();return t.requiredField(e,4),e}static createSubGraphSessionState(t,e,r){return c.startSubGraphSessionState(t),c.addGraphId(t,e),c.addSessionState(t,r),c.endSubGraphSessionState(t)}}f.SubGraphSessionState=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSessionState(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSessionState(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}kernels(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.KernelCreateInfos).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}subGraphSessionStates(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.SubGraphSessionState).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}subGraphSessionStatesLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}static startSessionState(t){t.startObject(2)}static addKernels(t,e){t.addFieldOffset(0,e,0)}static addSubGraphSessionStates(t,e){t.addFieldOffset(1,e,0)}static createSubGraphSessionStatesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startSubGraphSessionStatesVector(t,e){t.startVector(4,e,4)}static endSessionState(t){return t.endObject()}static createSessionState(t,e,r){return c.startSessionState(t),c.addKernels(t,e),c.addSubGraphSessionStates(t,r),c.endSessionState(t)}}f.SessionState=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class c{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsInferenceSession(t,e){return(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsInferenceSession(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new c).__init(t.readInt32(t.position())+t.position(),t)}static bufferHasIdentifier(t){return t.__has_identifier("ORTM")}ortVersion(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}model(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.Model).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}sessionState(t){let e=this.bb.__offset(this.bb_pos,8);return e?(t||new s.experimental.fbs.SessionState).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startInferenceSession(t){t.startObject(3)}static addOrtVersion(t,e){t.addFieldOffset(0,e,0)}static addModel(t,e){t.addFieldOffset(1,e,0)}static addSessionState(t,e){t.addFieldOffset(2,e,0)}static endInferenceSession(t){return t.endObject()}static finishInferenceSessionBuffer(t,e){t.finish(e,"ORTM")}static finishSizePrefixedInferenceSessionBuffer(t,e){t.finish(e,"ORTM",!0)}static createInferenceSession(t,e,r,i){return c.startInferenceSession(t),c.addOrtVersion(t,e),c.addModel(t,r),c.addSessionState(t,i),c.endInferenceSession(t)}}f.InferenceSession=c})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={}))},7448:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.OnnxjsSessionHandler=void 0;const u=a(1670),l=a(9162);n.OnnxjsSessionHandler=class{constructor(p){this.session=p,this.inputNames=this.session.inputNames,this.outputNames=this.session.outputNames}async dispose(){}async run(p,s,h){const f=new Map;for(const t in p)if(Object.hasOwnProperty.call(p,t)){const e=p[t];f.set(t,new l.Tensor(e.dims,e.type,void 0,void 0,e.data))}const c=await this.session.run(f),o={};return c.forEach((t,e)=>{o[e]=new u.Tensor(t.type,t.data,t.dims)}),o}startProfiling(){this.session.startProfiling()}endProfiling(){this.session.endProfiling()}}},6919:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Session=void 0;const u=a(7067),l=a(1296),p=a(7091),s=a(1036),h=a(6231),f=a(2644);n.Session=class{constructor(c={}){this._initialized=!1,this.backendHint=c.backendHint,this.profiler=h.Profiler.create(c.profiler),this.context={profiler:this.profiler,graphInputTypes:[],graphInputDims:[]}}get inputNames(){return this._model.graph.getInputNames()}get outputNames(){return this._model.graph.getOutputNames()}startProfiling(){this.profiler.start()}endProfiling(){this.profiler.stop()}async loadModel(c,o,t){await this.profiler.event("session","Session.loadModel",async()=>{const e=await(0,p.resolveBackend)(this.backendHint);if(this.sessionHandler=e.createSessionHandler(this.context),this._model=new f.Model,typeof c=="string"){const r=c.endsWith(".ort");if(typeof fetch>"u"){const i=await(0,l.promisify)(u.readFile)(c);this.initialize(i,r)}else{const i=await fetch(c),d=await i.arrayBuffer();this.initialize(new Uint8Array(d),r)}}else if(ArrayBuffer.isView(c))this.initialize(c);else{const r=new Uint8Array(c,o||0,t||c.byteLength);this.initialize(r)}})}initialize(c,o){if(this._initialized)throw new Error("already initialized");this.profiler.event("session","Session.initialize",()=>{const t=this.sessionHandler.transformGraph?this.sessionHandler:void 0;this._model.load(c,t,o),this.sessionHandler.onGraphInitialized&&this.sessionHandler.onGraphInitialized(this._model.graph),this.initializeOps(this._model.graph),this._executionPlan=new s.ExecutionPlan(this._model.graph,this._ops,this.profiler)}),this._initialized=!0}async run(c){if(!this._initialized)throw new Error("session not initialized yet");return this.profiler.event("session","Session.run",async()=>{const o=this.normalizeAndValidateInputs(c),t=await this._executionPlan.execute(this.sessionHandler,o);return this.createOutput(t)})}normalizeAndValidateInputs(c){const o=this._model.graph.getInputNames();if(Array.isArray(c)){if(c.length!==o.length)throw new Error(`incorrect input array length: expected ${o.length} but got ${c.length}`)}else{if(c.size!==o.length)throw new Error(`incorrect input map size: expected ${o.length} but got ${c.size}`);const t=new Array(c.size);let e=0;for(let r=0;rtypeof O=="string")))throw new TypeError("cache should be a string array");A&&(this.cache=new Array(S))}else{if(w!==void 0){const O=e(m);if(!(w instanceof O))throw new TypeError(`cache should be type ${O.name}`)}if(A){const O=new ArrayBuffer(S*function(x){switch(x){case"bool":case"int8":case"uint8":return 1;case"int16":case"uint16":return 2;case"int32":case"uint32":case"float32":return 4;case"float64":return 8;default:throw new Error(`cannot calculate sizeof() on type ${x}`)}}(m));this.cache=function(x,I){return new(e(I))(x)}(O,m)}}}static fromProto(g){if(!g)throw new Error("cannot construct Value from an empty tensor");const m=f.ProtoUtil.tensorDataTypeFromProto(g.dataType),b=f.ProtoUtil.tensorDimsFromProto(g.dims),y=new o(b,m);if(m==="string")g.stringData.forEach((w,v)=>{y.data[v]=(0,f.decodeUtf8String)(w)});else if(g.rawData&&typeof g.rawData.byteLength=="number"&&g.rawData.byteLength>0){const w=y.data,v=new DataView(g.rawData.buffer,g.rawData.byteOffset,g.rawData.byteLength),S=t(g.dataType),A=g.rawData.byteLength/S;if(g.rawData.byteLength%S!=0)throw new Error("invalid buffer length");if(w.length!==A)throw new Error("buffer length mismatch");for(let O=0;O0){const w=y.data,v=new DataView(g.rawDataArray().buffer,g.rawDataArray().byteOffset,g.rawDataLength()),S=t(g.dataType()),A=g.rawDataLength()/S;if(g.rawDataLength()%S!=0)throw new Error("invalid buffer length");if(w.length!==A)throw new Error("buffer length mismatch");for(let O=0;O 1&&I>1)return;A[S-O]=Math.max(x,I)}return A}static index(m,b){const y=new Array(b.length);return c.fillIndex(m,b,y),y}static fillIndex(m,b,y){const w=m.length-b.length;for(let v=0;v=0;Z--)x[Z]=R%S[Z],R=Math.floor(R/S[Z]);G||(c.fillIndex(x,m.dims,I),L=m.get(I)),D||(c.fillIndex(x,b.dims,N),$=b.get(N)),O.set(x,y(L,$))}}return O}}static isValidBroadcast(m,b){const y=m.length,w=b.length;if(y>w)return!1;for(let v=1;v<=y;v++)if(m[y-v]!==1&&m[y-v]!==b[w-v])return!1;return!0}static getBroadcastDims(m,b){const y=m.length,w=[];for(let v=0;v1&&A===1&&w.unshift(S)}return w}}n.BroadcastUtil=c,n.arrayCopyHelper=function(g,m,b,y,w){if(y<0||y>=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(y+w>m.length)throw new Error("source indices to be copied are outside bounds");if(b+w>g.length)throw new Error("target array is too small to hold result");for(let v=0;vp.default.isLong(b)?b.toNumber():b)}static tensorValueTypeFromProto(m){return{tensorType:o.tensorDataTypeFromProto(m.elemType),shape:{dims:o.tensorDimsFromProto(m.shape.dim.map(b=>b.dimValue))}}}static tensorDimsFromORTFormat(m){const b=[];for(let y=0;ym.length)throw new Error(`invalid dimension of ${b} for sizeFromDimension as Tensor has ${m.length} dimensions.`);return e.getSizeFromDimensionRange(m,b,m.length)}static sizeToDimension(m,b){if(b<0||b>m.length)throw new Error(`invalid dimension of ${b} for sizeToDimension as Tensor has ${m.length} dimensions.`);return e.getSizeFromDimensionRange(m,0,b)}static getSizeFromDimensionRange(m,b,y){let w=1;for(let v=b;v=0;--w)y[w]=y[w+1]*m[w+1];return y}static transpose(m){return m.slice().reverse()}static indicesToOffset(m,b,y){y===void 0&&(y=m.length);let w=0;for(let v=0;v=b)throw new Error("unsupported axis for this operation.");return m<0?m+b:m}static normalizeAxes(m,b){return m.map(y=>this.normalizeAxis(y,b))}static incrementIndex(m,b,y){if(b.length===0||m.length===0)throw new Error("Index incrementing unsupported for scalar Tensor");if(y===void 0)y=b.length;else if(y<=0||y>b.length)throw new Error("Incorrect axis to increment on");for(let w=y-1;w>=0&&(m[w]++,!(m[w]=m.length)throw new Error("the dimension with value zero exceeds the dimension size of the input tensor");w[O]=m[O]}else w[O]=b[O];S*=w[O]}}const A=e.size(m);if(v!==-1){if(A%S!=0)throw new Error(`the input tensor cannot be reshaped to the requested shape. Input shape: [${m}] Output shape: [${b}]`);w[v]=A/S}else if(S!==A)throw new Error("reshapedDims and originalDims don't have matching sizes");return w}static sortBasedOnPerm(m,b){return b?b.map(y=>m[y]):m.slice().reverse()}static padShape(m,b){const y=m.length;return m.map((w,v)=>w+b[v]+b[v+y])}static areEqual(m,b){return m.length===b.length&&m.every((y,w)=>y===b[w])}static validateDimsAndCalcSize(m){if(m.length>6)throw new TypeError("Only rank 0 to 6 is supported for tensor shape.");let b=1;for(const y of m){if(!Number.isInteger(y))throw new TypeError(`Invalid shape: ${y} is not an integer`);if(y<0||y>2147483647)throw new TypeError(`Invalid shape: length ${y} is not allowed`);b*=y}return b}static flattenShape(m,b){b<0&&(b+=m.length);const y=m.reduce((v,S)=>v*S,1),w=m.slice(b).reduce((v,S)=>v*S,1);return[y/w,w]}static squeezeShape(m,b){const y=new Array;b=e.normalizeAxes(b,m.length);for(let w=0;w=0;if(v&&m[w]!==1)throw new Error("squeeze an axis of size different than 1");(b.length===0&&m[w]>1||b.length>0&&!v)&&y.push(m[w])}return y}static unsqueezeShape(m,b){const y=new Array(m.length+b.length);y.fill(0);for(let v=0;v=y.length)throw new Error("'axes' has an out of range axis");if(y[S]!==0)throw new Error("'axes' has a duplicate axis");y[S]=1}let w=0;for(let v=0;v=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(y+w>m.length)throw new Error("source indices to be copied are outside bounds");if(b+w>g.length)throw new Error("target array is too small to hold result");for(let v=0;v=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(y+w>m.length)throw new Error("source indices to be copied are outside bounds");if(b+w>g.length)throw new Error("target array is too small to hold result");for(let S=0;S=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(y+w>m.length)throw new Error("source indices to be copied are outside bounds");if(b+w>g.length)throw new Error("target array is too small to hold result");for(let S=0;S=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(y+w>m.length)throw new Error("source indices to be copied are outside bounds");if(b+w>g.length)throw new Error("target array is too small to hold result");for(let v=0;vb.push($));const A=i.calcReduceShape(S,b,!0),O=e.size(A),x=new h.Tensor(A,m.type),I=e.computeStrides(A),N=e.computeStrides(S),R=new Array(S.length);for(let L=0;L=b.length)return S(m[v]);const x=b[w],I=x>=y.length?1:e.size(y.slice(x+1));for(let N=0;Nv!==0)}}n.ReduceUtil=i;class d{static adjustPoolAttributes(m,b,y,w,v,S){if(!m&&y.length!==b.length-2)throw new Error("length of specified kernel shapes should be 2 less than length of input dimensions");if(m)for(let A=0;A=y.length?y.push(b[A+2]):y[A]=b[A+2];for(let A=0;A=y[A]||S[A+y.length]>=y[A])throw new Error("pads should be smaller than kernel")}}static adjustPadsBasedOnAutoPad(m,b,y,w,v,S){if(S){if(v.length!==2*(m.length-2))throw new Error("length of pads should be twice the length of data dimensions");if(b.length!==m.length-2)throw new Error("length of strides should be the length of data dimensions");if(w.length!==m.length-2)throw new Error("length of kernel shapes should be the length of data dimensions");for(let A=0;A{Object.defineProperty(n,"__esModule",{value:!0}),n.iterateExtraOptions=void 0,n.iterateExtraOptions=(a,u,l,p)=>{if(typeof a=="object"&&a!==null){if(l.has(a))throw new Error("Circular reference in options");l.add(a)}Object.entries(a).forEach(([s,h])=>{const f=u?u+s:s;if(typeof h=="object")(0,n.iterateExtraOptions)(h,f+".",l,p);else if(typeof h=="string"||typeof h=="number")p(f,h.toString());else{if(typeof h!="boolean")throw new Error("Can't handle extra config type: "+typeof h);p(f,h?"1":"0")}})}},2157:function(_,n,a){var u,l=this&&this.__createBinding||(Object.create?function(I,N,R,L){L===void 0&&(L=R);var $=Object.getOwnPropertyDescriptor(N,R);$&&!("get"in $?!N.__esModule:$.writable||$.configurable)||($={enumerable:!0,get:function(){return N[R]}}),Object.defineProperty(I,L,$)}:function(I,N,R,L){L===void 0&&(L=R),I[L]=N[R]}),p=this&&this.__setModuleDefault||(Object.create?function(I,N){Object.defineProperty(I,"default",{enumerable:!0,value:N})}:function(I,N){I.default=N}),s=this&&this.__importStar||function(I){if(I&&I.__esModule)return I;var N={};if(I!=null)for(var R in I)R!=="default"&&Object.prototype.hasOwnProperty.call(I,R)&&l(N,I,R);return p(N,I),N};Object.defineProperty(n,"__esModule",{value:!0}),n.endProfiling=n.run=n.releaseSession=n.createSession=n.createSessionFinalize=n.createSessionAllocate=n.initOrt=n.initWasm=void 0;const h=a(1670),f=s(a(349)),c=a(6361),o=()=>!!h.env.wasm.proxy&&typeof document<"u";let t,e,r,i=!1,d=!1,g=!1;const m=[],b=[],y=[],w=[],v=[],S=[],A=()=>{if(i||!d||g||!t)throw new Error("worker not ready")},O=I=>{switch(I.data.type){case"init-wasm":i=!1,I.data.err?(g=!0,e[1](I.data.err)):(d=!0,e[0]());break;case"init-ort":I.data.err?r[1](I.data.err):r[0]();break;case"create_allocate":I.data.err?m.shift()[1](I.data.err):m.shift()[0](I.data.out);break;case"create_finalize":I.data.err?b.shift()[1](I.data.err):b.shift()[0](I.data.out);break;case"create":I.data.err?y.shift()[1](I.data.err):y.shift()[0](I.data.out);break;case"release":I.data.err?w.shift()[1](I.data.err):w.shift()[0]();break;case"run":I.data.err?v.shift()[1](I.data.err):v.shift()[0](I.data.out);break;case"end-profiling":I.data.err?S.shift()[1](I.data.err):S.shift()[0]()}},x=typeof document<"u"?(u=document==null?void 0:document.currentScript)===null||u===void 0?void 0:u.src:void 0;n.initWasm=async()=>{if(o()){if(d)return;if(i)throw new Error("multiple calls to 'initWasm()' detected.");if(g)throw new Error("previous call to 'initWasm()' failed.");return i=!0,h.env.wasm.wasmPaths===void 0&&x&&x.indexOf("blob:")!==0&&(h.env.wasm.wasmPaths=x.substr(0,+x.lastIndexOf("/")+1)),new Promise((I,N)=>{t==null||t.terminate(),t=a(9710).Z(),t.onmessage=O,e=[I,N];const R={type:"init-wasm",in:h.env.wasm};t.postMessage(R)})}return(0,c.initializeWebAssembly)(h.env.wasm)},n.initOrt=async(I,N)=>{if(o())return A(),new Promise((R,L)=>{r=[R,L];const $={type:"init-ort",in:{numThreads:I,loggingLevel:N}};t.postMessage($)});f.initOrt(I,N)},n.createSessionAllocate=async I=>o()?(A(),new Promise((N,R)=>{m.push([N,R]);const L={type:"create_allocate",in:{model:I}};t.postMessage(L,[I.buffer])})):f.createSessionAllocate(I),n.createSessionFinalize=async(I,N)=>o()?(A(),new Promise((R,L)=>{b.push([R,L]);const $={type:"create_finalize",in:{modeldata:I,options:N}};t.postMessage($)})):f.createSessionFinalize(I,N),n.createSession=async(I,N)=>o()?(A(),new Promise((R,L)=>{y.push([R,L]);const $={type:"create",in:{model:I,options:N}};t.postMessage($,[I.buffer])})):f.createSession(I,N),n.releaseSession=async I=>{if(o())return A(),new Promise((N,R)=>{w.push([N,R]);const L={type:"release",in:I};t.postMessage(L)});f.releaseSession(I)},n.run=async(I,N,R,L,$)=>o()?(A(),new Promise((G,D)=>{v.push([G,D]);const j={type:"run",in:{sessionId:I,inputIndices:N,inputs:R,outputIndices:L,options:$}};t.postMessage(j,f.extractTransferableBuffers(R))})):f.run(I,N,R,L,$),n.endProfiling=async I=>{if(o())return A(),new Promise((N,R)=>{S.push([N,R]);const L={type:"end-profiling",in:I};t.postMessage(L)});f.endProfiling(I)}},586:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.setRunOptions=void 0;const u=a(7967),l=a(4983),p=a(6361);n.setRunOptions=s=>{const h=(0,p.getInstance)();let f=0;const c=[],o=s||{};try{if((s==null?void 0:s.logSeverityLevel)===void 0)o.logSeverityLevel=2;else if(typeof s.logSeverityLevel!="number"||!Number.isInteger(s.logSeverityLevel)||s.logSeverityLevel<0||s.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${s.logSeverityLevel}`);if((s==null?void 0:s.logVerbosityLevel)===void 0)o.logVerbosityLevel=0;else if(typeof s.logVerbosityLevel!="number"||!Number.isInteger(s.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${s.logVerbosityLevel}`);(s==null?void 0:s.terminate)===void 0&&(o.terminate=!1);let t=0;if((s==null?void 0:s.tag)!==void 0&&(t=(0,l.allocWasmString)(s.tag,c)),f=h._OrtCreateRunOptions(o.logSeverityLevel,o.logVerbosityLevel,!!o.terminate,t),f===0)throw new Error("Can't create run options");return(s==null?void 0:s.extra)!==void 0&&(0,u.iterateExtraOptions)(s.extra,"",new WeakSet,(e,r)=>{const i=(0,l.allocWasmString)(e,c),d=(0,l.allocWasmString)(r,c);if(h._OrtAddRunConfigEntry(f,i,d)!==0)throw new Error(`Can't set a run config entry: ${e} - ${r}`)}),[f,c]}catch(t){throw f!==0&&h._OrtReleaseRunOptions(f),c.forEach(h._free),t}}},2306:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.OnnxruntimeWebAssemblySessionHandler=void 0;const u=a(2806),l=a(1670),p=a(2850),s=a(2157);let h;n.OnnxruntimeWebAssemblySessionHandler=class{async createSessionAllocate(f){const c=await fetch(f),o=await c.arrayBuffer();return(0,s.createSessionAllocate)(new Uint8Array(o))}async loadModel(f,c){if(h||(await(0,s.initOrt)(l.env.wasm.numThreads,(o=>{switch(o){case"verbose":return 0;case"info":return 1;case"warning":return 2;case"error":return 3;case"fatal":return 4;default:throw new Error(`unsupported logging level: ${o}`)}})(l.env.logLevel)),h=!0),typeof f=="string")if(typeof fetch>"u"){const o=await(0,p.promisify)(u.readFile)(f);[this.sessionId,this.inputNames,this.outputNames]=await(0,s.createSession)(o,c)}else{const o=await this.createSessionAllocate(f);[this.sessionId,this.inputNames,this.outputNames]=await(0,s.createSessionFinalize)(o,c)}else[this.sessionId,this.inputNames,this.outputNames]=await(0,s.createSession)(f,c)}async dispose(){return(0,s.releaseSession)(this.sessionId)}async run(f,c,o){const t=[],e=[];Object.entries(f).forEach(g=>{const m=g[0],b=g[1],y=this.inputNames.indexOf(m);if(y===-1)throw new Error(`invalid input '${m}'`);t.push(b),e.push(y)});const r=[];Object.entries(c).forEach(g=>{const m=g[0],b=this.outputNames.indexOf(m);if(b===-1)throw new Error(`invalid output '${m}'`);r.push(b)});const i=await(0,s.run)(this.sessionId,e,t.map(g=>[g.type,g.dims,g.data]),r,o),d={};for(let g=0;g{Object.defineProperty(n,"__esModule",{value:!0}),n.setSessionOptions=void 0;const u=a(7967),l=a(4983),p=a(6361);n.setSessionOptions=s=>{const h=(0,p.getInstance)();let f=0;const c=[],o=s||{};(t=>{t.extra||(t.extra={}),t.extra.session||(t.extra.session={});const e=t.extra.session;e.use_ort_model_bytes_directly||(e.use_ort_model_bytes_directly="1")})(o);try{(s==null?void 0:s.graphOptimizationLevel)===void 0&&(o.graphOptimizationLevel="all");const t=(i=>{switch(i){case"disabled":return 0;case"basic":return 1;case"extended":return 2;case"all":return 99;default:throw new Error(`unsupported graph optimization level: ${i}`)}})(o.graphOptimizationLevel);(s==null?void 0:s.enableCpuMemArena)===void 0&&(o.enableCpuMemArena=!0),(s==null?void 0:s.enableMemPattern)===void 0&&(o.enableMemPattern=!0),(s==null?void 0:s.executionMode)===void 0&&(o.executionMode="sequential");const e=(i=>{switch(i){case"sequential":return 0;case"parallel":return 1;default:throw new Error(`unsupported execution mode: ${i}`)}})(o.executionMode);let r=0;if((s==null?void 0:s.logId)!==void 0&&(r=(0,l.allocWasmString)(s.logId,c)),(s==null?void 0:s.logSeverityLevel)===void 0)o.logSeverityLevel=2;else if(typeof s.logSeverityLevel!="number"||!Number.isInteger(s.logSeverityLevel)||s.logSeverityLevel<0||s.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${s.logSeverityLevel}`);if((s==null?void 0:s.logVerbosityLevel)===void 0)o.logVerbosityLevel=0;else if(typeof s.logVerbosityLevel!="number"||!Number.isInteger(s.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${s.logVerbosityLevel}`);if((s==null?void 0:s.enableProfiling)===void 0&&(o.enableProfiling=!1),f=h._OrtCreateSessionOptions(t,!!o.enableCpuMemArena,!!o.enableMemPattern,e,!!o.enableProfiling,0,r,o.logSeverityLevel,o.logVerbosityLevel),f===0)throw new Error("Can't create session options");return s!=null&&s.executionProviders&&((i,d,g)=>{for(const m of d){let b=typeof m=="string"?m:m.name;switch(b){case"xnnpack":b="XNNPACK";break;case"wasm":case"cpu":continue;default:throw new Error(`not supported EP: ${b}`)}const y=(0,l.allocWasmString)(b,g);if((0,p.getInstance)()._OrtAppendExecutionProvider(i,y)!==0)throw new Error(`Can't append execution provider: ${b}`)}})(f,s.executionProviders,c),(s==null?void 0:s.extra)!==void 0&&(0,u.iterateExtraOptions)(s.extra,"",new WeakSet,(i,d)=>{const g=(0,l.allocWasmString)(i,c),m=(0,l.allocWasmString)(d,c);if(h._OrtAddSessionConfigEntry(f,g,m)!==0)throw new Error(`Can't set a session config entry: ${i} - ${d}`)}),[f,c]}catch(t){throw f!==0&&h._OrtReleaseSessionOptions(f),c.forEach(h._free),t}}},4983:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.allocWasmString=void 0;const u=a(6361);n.allocWasmString=(l,p)=>{const s=(0,u.getInstance)(),h=s.lengthBytesUTF8(l)+1,f=s._malloc(h);return s.stringToUTF8(l,f,h),p.push(f),f}},349:(_,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.extractTransferableBuffers=n.endProfiling=n.run=n.releaseSession=n.createSession=n.createSessionFinalize=n.createSessionAllocate=n.initOrt=void 0;const u=a(586),l=a(4919),p=a(4983),s=a(6361);n.initOrt=(t,e)=>{const r=(0,s.getInstance)()._OrtInit(t,e);if(r!==0)throw new Error(`Can't initialize onnxruntime. error code = ${r}`)};const h=new Map;n.createSessionAllocate=t=>{const e=(0,s.getInstance)(),r=e._malloc(t.byteLength);return e.HEAPU8.set(t,r),[r,t.byteLength]},n.createSessionFinalize=(t,e)=>{const r=(0,s.getInstance)();let i=0,d=0,g=[];try{if([d,g]=(0,l.setSessionOptions)(e),i=r._OrtCreateSession(t[0],t[1],d),i===0)throw new Error("Can't create a session")}finally{r._free(t[0]),r._OrtReleaseSessionOptions(d),g.forEach(r._free)}const m=r._OrtGetInputCount(i),b=r._OrtGetOutputCount(i),y=[],w=[],v=[],S=[];for(let A=0;A{const r=(0,n.createSessionAllocate)(t);return(0,n.createSessionFinalize)(r,e)},n.releaseSession=t=>{const e=(0,s.getInstance)(),r=h.get(t);if(!r)throw new Error("invalid session id");const i=r[0],d=r[1],g=r[2];d.forEach(e._OrtFree),g.forEach(e._OrtFree),e._OrtReleaseSession(i),h.delete(t)};const f=t=>{switch(t){case"int8":return 3;case"uint8":return 2;case"bool":return 9;case"int16":return 5;case"uint16":return 4;case"int32":return 6;case"uint32":return 12;case"float32":return 1;case"float64":return 11;case"string":return 8;case"int64":return 7;case"uint64":return 13;default:throw new Error(`unsupported data type: ${t}`)}},c=t=>{switch(t){case 3:return"int8";case 2:return"uint8";case 9:return"bool";case 5:return"int16";case 4:return"uint16";case 6:return"int32";case 12:return"uint32";case 1:return"float32";case 11:return"float64";case 8:return"string";case 7:return"int64";case 13:return"uint64";default:throw new Error(`unsupported data type: ${t}`)}},o=t=>{switch(t){case"float32":return Float32Array;case"uint8":case"bool":return Uint8Array;case"int8":return Int8Array;case"uint16":return Uint16Array;case"int16":return Int16Array;case"int32":return Int32Array;case"float64":return Float64Array;case"uint32":return Uint32Array;case"int64":return BigInt64Array;case"uint64":return BigUint64Array;default:throw new Error(`unsupported type: ${t}`)}};n.run=(t,e,r,i,d)=>{const g=(0,s.getInstance)(),m=h.get(t);if(!m)throw new Error("invalid session id");const b=m[0],y=m[1],w=m[2],v=e.length,S=i.length;let A=0,O=[];const x=[],I=[];try{[A,O]=(0,u.setRunOptions)(d);for(let D=0;Dg.HEAP32[ve++]=ye);const oe=g._OrtCreateTensor(f(j),J,ee,Se,Z.length);if(oe===0)throw new Error("Can't create a tensor");x.push(oe)}finally{g.stackRestore(ue)}}const N=g.stackSave(),R=g.stackAlloc(4*v),L=g.stackAlloc(4*v),$=g.stackAlloc(4*S),G=g.stackAlloc(4*S);try{let D=R/4,j=L/4,Z=$/4,X=G/4;for(let ue=0;ueAe*Be);if(ye=c(Fe),ye==="string"){const Ae=[];let Be=be/4;for(let Ge=0;Ge{const e=(0,s.getInstance)(),r=h.get(t);if(!r)throw new Error("invalid session id");const i=r[0],d=e._OrtEndProfiling(i);if(d===0)throw new Error("Can't get an profile file name");e._OrtFree(d)},n.extractTransferableBuffers=t=>{const e=[];for(const r of t){const i=r[2];!Array.isArray(i)&&i.buffer&&e.push(i.buffer)}return e}},6361:function(_,n,a){var u=this&&this.__createBinding||(Object.create?function(d,g,m,b){b===void 0&&(b=m);var y=Object.getOwnPropertyDescriptor(g,m);y&&!("get"in y?!g.__esModule:y.writable||y.configurable)||(y={enumerable:!0,get:function(){return g[m]}}),Object.defineProperty(d,b,y)}:function(d,g,m,b){b===void 0&&(b=m),d[b]=g[m]}),l=this&&this.__setModuleDefault||(Object.create?function(d,g){Object.defineProperty(d,"default",{enumerable:!0,value:g})}:function(d,g){d.default=g}),p=this&&this.__importStar||function(d){if(d&&d.__esModule)return d;var g={};if(d!=null)for(var m in d)m!=="default"&&Object.prototype.hasOwnProperty.call(d,m)&&u(g,d,m);return l(g,d),g},s=this&&this.__importDefault||function(d){return d&&d.__esModule?d:{default:d}};Object.defineProperty(n,"__esModule",{value:!0}),n.dispose=n.getInstance=n.initializeWebAssembly=void 0;const h=p(a(6449)),f=s(a(932)),c=a(3474);let o,t=!1,e=!1,r=!1;const i=(d,g)=>g?d?"ort-wasm-simd-threaded.wasm":"ort-wasm-threaded.wasm":d?"ort-wasm-simd.wasm":"ort-wasm.wasm";n.initializeWebAssembly=async d=>{if(t)return Promise.resolve();if(e)throw new Error("multiple calls to 'initializeWebAssembly()' detected.");if(r)throw new Error("previous call to 'initializeWebAssembly()' failed.");e=!0;const g=d.initTimeout,m=d.numThreads,b=d.simd,y=m>1&&(()=>{try{return typeof SharedArrayBuffer<"u"&&(typeof MessageChannel<"u"&&new MessageChannel().port1.postMessage(new SharedArrayBuffer(1)),WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,5,4,1,3,1,1,10,11,1,9,0,65,0,254,16,2,0,26,11])))}catch{return!1}})(),w=b&&(()=>{try{return WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,10,30,1,28,0,65,0,253,15,253,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,253,186,1,26,11]))}catch{return!1}})(),v=typeof d.wasmPaths=="string"?d.wasmPaths:void 0,S=i(!1,y),A=i(w,y),O=typeof d.wasmPaths=="object"?d.wasmPaths[A]:void 0;let x=!1;const I=[];if(g>0&&I.push(new Promise(N=>{setTimeout(()=>{x=!0,N()},g)})),I.push(new Promise((N,R)=>{const L=y?c:f.default,$={locateFile:(G,D)=>y&&G.endsWith(".worker.js")&&typeof Blob<"u"?URL.createObjectURL(new Blob([a(4154)],{type:"text/javascript"})):G===S?O??(v??D)+A:D+G};if(y)if(typeof Blob>"u")$.mainScriptUrlOrBlob=h.join("/","ort-wasm-threaded.js");else{const G=`var ortWasmThreaded=(function(){var _scriptDir;return ${L.toString()}})();`;$.mainScriptUrlOrBlob=new Blob([G],{type:"text/javascript"})}L($).then(G=>{e=!1,t=!0,o=G,N()},G=>{e=!1,r=!0,R(G)})})),await Promise.race(I),x)throw new Error(`WebAssembly backend initializing failed due to timeout: ${g}ms`)},n.getInstance=()=>{if(t&&o)return o;throw new Error("WebAssembly is not initialized yet.")},n.dispose=()=>{var d;!t||e||r||(e=!0,(d=o.PThread)===null||d===void 0||d.terminateAllThreads(),o=void 0,e=!1,t=!1,r=!0)}},9710:(_,n,a)=>{a.d(n,{Z:()=>p});var u=a(477),l=a.n(u);function p(){return l()('/*!\n* ONNX Runtime Web v1.14.0\n* Copyright (c) Microsoft Corporation. All rights reserved.\n* Licensed under the MIT License.\n*/\n(()=>{var t={474:(t,e,n)=>{var _scriptDir,r=(_scriptDir=(_scriptDir="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(t){function e(){return j.buffer!=D&&N(j.buffer),P}function r(){return j.buffer!=D&&N(j.buffer),U}function a(){return j.buffer!=D&&N(j.buffer),F}function i(){return j.buffer!=D&&N(j.buffer),I}function o(){return j.buffer!=D&&N(j.buffer),W}var u,c,s;t=t||{},u||(u=void 0!==t?t:{}),u.ready=new Promise((function(t,e){c=t,s=e}));var l,f,p,h,d,y,b=Object.assign({},u),m="./this.program",g=(t,e)=>{throw e},v="object"==typeof window,w="function"==typeof importScripts,_="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,O=u.ENVIRONMENT_IS_PTHREAD||!1,A="";function S(t){return u.locateFile?u.locateFile(t,A):A+t}if(_){let e;A=w?n(908).dirname(A)+"/":"//",y=()=>{d||(h=n(384),d=n(908))},l=function(t,e){return y(),t=d.normalize(t),h.readFileSync(t,e?void 0:"utf8")},p=t=>((t=l(t,!0)).buffer||(t=new Uint8Array(t)),t),f=(t,e,n)=>{y(),t=d.normalize(t),h.readFile(t,(function(t,r){t?n(t):e(r.buffer)}))},1{if(Q())throw process.exitCode=t,e;e instanceof ct||x("exiting due to exception: "+e),process.exit(t)},u.inspect=function(){return"[Emscripten Module object]"};try{e=n(925)}catch(t){throw console.error(\'The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?\'),t}n.g.Worker=e.Worker}else(v||w)&&(w?A=self.location.href:"undefined"!=typeof document&&document.currentScript&&(A=document.currentScript.src),_scriptDir&&(A=_scriptDir),A=0!==A.indexOf("blob:")?A.substr(0,A.replace(/[?#].*/,"").lastIndexOf("/")+1):"",_||(l=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.send(null),e.responseText},w&&(p=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.responseType="arraybuffer",e.send(null),new Uint8Array(e.response)}),f=(t,e,n)=>{var r=new XMLHttpRequest;r.open("GET",t,!0),r.responseType="arraybuffer",r.onload=()=>{200==r.status||0==r.status&&r.response?e(r.response):n()},r.onerror=n,r.send(null)}));_&&"undefined"==typeof performance&&(n.g.performance=n(953).performance);var T=console.log.bind(console),E=console.warn.bind(console);_&&(y(),T=t=>h.writeSync(1,t+"\\n"),E=t=>h.writeSync(2,t+"\\n"));var M,C=u.print||T,x=u.printErr||E;Object.assign(u,b),b=null,u.thisProgram&&(m=u.thisProgram),u.quit&&(g=u.quit),u.wasmBinary&&(M=u.wasmBinary);var R=u.noExitRuntime||!1;"object"!=typeof WebAssembly&&at("no native wasm support detected");var j,k,D,P,U,F,I,W,H=!1,L="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function z(t,e,n){var r=(e>>>=0)+n;for(n=e;t[n]&&!(n>=r);)++n;if(16(a=224==(240&a)?(15&a)<<12|i<<6|o:(7&a)<<18|i<<12|o<<6|63&t[e++])?r+=String.fromCharCode(a):(a-=65536,r+=String.fromCharCode(55296|a>>10,56320|1023&a))}}else r+=String.fromCharCode(a)}return r}function Y(t,e){return(t>>>=0)?z(r(),t,e):""}function B(t,e,n,r){if(!(0>>=0;r=n+r-1;for(var i=0;i=o&&(o=65536+((1023&o)<<10)|1023&t.charCodeAt(++i)),127>=o){if(n>=r)break;e[n++>>>0]=o}else{if(2047>=o){if(n+1>=r)break;e[n++>>>0]=192|o>>6}else{if(65535>=o){if(n+2>=r)break;e[n++>>>0]=224|o>>12}else{if(n+3>=r)break;e[n++>>>0]=240|o>>18,e[n++>>>0]=128|o>>12&63}e[n++>>>0]=128|o>>6&63}e[n++>>>0]=128|63&o}}return e[n>>>0]=0,n-a}function G(t){for(var e=0,n=0;n=r?e++:2047>=r?e+=2:55296<=r&&57343>=r?(e+=4,++n):e+=3}return e}function N(t){D=t,u.HEAP8=P=new Int8Array(t),u.HEAP16=new Int16Array(t),u.HEAP32=F=new Int32Array(t),u.HEAPU8=U=new Uint8Array(t),u.HEAPU16=new Uint16Array(t),u.HEAPU32=I=new Uint32Array(t),u.HEAPF32=new Float32Array(t),u.HEAPF64=W=new Float64Array(t)}O&&(D=u.buffer);var V=u.INITIAL_MEMORY||16777216;if(O)j=u.wasmMemory,D=u.buffer;else if(u.wasmMemory)j=u.wasmMemory;else if(!((j=new WebAssembly.Memory({initial:V/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw x("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),_&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");j&&(D=j.buffer),V=D.byteLength,N(D);var $,q=[],X=[],J=[],Z=[];function Q(){return R||!1}function K(){var t=u.preRun.shift();q.unshift(t)}var tt,et=0,nt=null,rt=null;function at(t){throw O?postMessage({cmd:"onAbort",arg:t}):u.onAbort&&u.onAbort(t),x(t="Aborted("+t+")"),H=!0,t=new WebAssembly.RuntimeError(t+". Build with -sASSERTIONS for more info."),s(t),t}function it(){return tt.startsWith("data:application/octet-stream;base64,")}function ot(){var t=tt;try{if(t==tt&&M)return new Uint8Array(M);if(p)return p(t);throw"both async and sync fetching of the wasm failed"}catch(t){at(t)}}tt="ort-wasm-threaded.wasm",it()||(tt=S(tt));var ut={};function ct(t){this.name="ExitStatus",this.message="Program terminated with exit("+t+")",this.status=t}function st(t){(t=ht.Vb[t])||at(),ht.mc(t)}function lt(t){var e=ht.Cc();if(!e)return 6;ht.ac.push(e),ht.Vb[t.Ub]=e,e.Ub=t.Ub;var n={cmd:"run",start_routine:t.Ic,arg:t.zc,pthread_ptr:t.Ub};return e.$b=()=>{n.time=performance.now(),e.postMessage(n,t.Nc)},e.loaded&&(e.$b(),delete e.$b),0}function ft(t){if(O)return $t(1,1,t);Q()||(ht.oc(),u.onExit&&u.onExit(t),H=!0),g(t,new ct(t))}function pt(t,e){if(!e&&O)throw bt(t),"unwind";Q()||O||(me(),dt(J),be(0),re[1].length&&ae(1,10),re[2].length&&ae(2,10),ht.oc()),ft(t)}var ht={Yb:[],ac:[],qc:[],Vb:{},fc:function(){O&&ht.Ec()},Pc:function(){},Ec:function(){ht.receiveObjectTransfer=ht.Gc,ht.threadInitTLS=ht.pc,ht.setExitStatus=ht.nc,R=!1},nc:function(){},oc:function(){for(var t of Object.values(ht.Vb))ht.mc(t);for(t of ht.Yb)t.terminate();ht.Yb=[]},mc:function(t){var e=t.Ub;delete ht.Vb[e],ht.Yb.push(t),ht.ac.splice(ht.ac.indexOf(t),1),t.Ub=0,Oe(e)},Gc:function(){},pc:function(){ht.qc.forEach((t=>t()))},Fc:function(t,e){t.onmessage=n=>{var r=(n=n.data).cmd;if(t.Ub&&(ht.Bc=t.Ub),n.targetThread&&n.targetThread!=he()){var a=ht.Vb[n.Qc];a?a.postMessage(n,n.transferList):x(\'Internal error! Worker sent a message "\'+r+\'" to target pthread \'+n.targetThread+", but that thread no longer exists!")}else"processProxyingQueue"===r?zt(n.queue):"spawnThread"===r?lt(n):"cleanupThread"===r?st(n.thread):"killThread"===r?(n=n.thread,r=ht.Vb[n],delete ht.Vb[n],r.terminate(),Oe(n),ht.ac.splice(ht.ac.indexOf(r),1),r.Ub=0):"cancelThread"===r?ht.Vb[n.thread].postMessage({cmd:"cancel"}):"loaded"===r?(t.loaded=!0,e&&e(t),t.$b&&(t.$b(),delete t.$b)):"print"===r?C("Thread "+n.threadId+": "+n.text):"printErr"===r?x("Thread "+n.threadId+": "+n.text):"alert"===r?alert("Thread "+n.threadId+": "+n.text):"setimmediate"===n.target?t.postMessage(n):"onAbort"===r?u.onAbort&&u.onAbort(n.arg):r&&x("worker sent an unknown command "+r);ht.Bc=void 0},t.onerror=t=>{throw x("worker sent an error! "+t.filename+":"+t.lineno+": "+t.message),t},_&&(t.on("message",(function(e){t.onmessage({data:e})})),t.on("error",(function(e){t.onerror(e)})),t.on("detachedExit",(function(){}))),t.postMessage({cmd:"load",urlOrBlob:u.mainScriptUrlOrBlob||_scriptDir,wasmMemory:j,wasmModule:k})},yc:function(){var t=S("ort-wasm-threaded.worker.js");ht.Yb.push(new Worker(t))},Cc:function(){return 0==ht.Yb.length&&(ht.yc(),ht.Fc(ht.Yb[0])),ht.Yb.pop()}};function dt(t){for(;0>2>>>0];t=a()[t+48>>2>>>0],Te(e,e-t),Me(e)};var mt=[];function gt(t){var e=mt[t];return e||(t>=mt.length&&(mt.length=t+1),mt[t]=e=$.get(t)),e}u.invokeEntryPoint=function(t,e){t=gt(t)(e),Q()?ht.nc(t):Ae(t)};var vt,wt,_t=[],Ot=0,At=0;function St(t){this.Zb=t,this.Sb=t-24,this.xc=function(t){i()[this.Sb+4>>2>>>0]=t},this.bc=function(){return i()[this.Sb+4>>2>>>0]},this.wc=function(t){i()[this.Sb+8>>2>>>0]=t},this.Dc=function(){return i()[this.Sb+8>>2>>>0]},this.rc=function(){a()[this.Sb>>2>>>0]=0},this.hc=function(t){t=t?1:0,e()[this.Sb+12>>0>>>0]=t},this.uc=function(){return 0!=e()[this.Sb+12>>0>>>0]},this.ic=function(t){t=t?1:0,e()[this.Sb+13>>0>>>0]=t},this.kc=function(){return 0!=e()[this.Sb+13>>0>>>0]},this.fc=function(t,e){this.cc(0),this.xc(t),this.wc(e),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(a(),this.Sb>>2,1)},this.Hc=function(){return 1===Atomics.sub(a(),this.Sb>>2,1)},this.cc=function(t){i()[this.Sb+16>>2>>>0]=t},this.tc=function(){return i()[this.Sb+16>>2>>>0]},this.vc=function(){if(Re(this.bc()))return i()[this.Zb>>2>>>0];var t=this.tc();return 0!==t?t:this.Zb}}function Tt(t){return ye(new St(t).Sb)}function Et(t,e,n,r){return O?$t(3,1,t,e,n,r):Mt(t,e,n,r)}function Mt(t,e,n,r){if("undefined"==typeof SharedArrayBuffer)return x("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var a=[];return O&&0===a.length?Et(t,e,n,r):(t={Ic:n,Ub:t,zc:r,Nc:a},O?(t.Oc="spawnThread",postMessage(t,a),0):lt(t))}function Ct(t,e,n){return O?$t(4,1,t,e,n):0}function xt(t,e){if(O)return $t(5,1,t,e)}function Rt(t,e){if(O)return $t(6,1,t,e)}function jt(t,e,n){if(O)return $t(7,1,t,e,n)}function kt(t,e,n){return O?$t(8,1,t,e,n):0}function Dt(t,e){if(O)return $t(9,1,t,e)}function Pt(t,e,n){if(O)return $t(10,1,t,e,n)}function Ut(t,e,n,r){if(O)return $t(11,1,t,e,n,r)}function Ft(t,e,n,r){if(O)return $t(12,1,t,e,n,r)}function It(t,e,n,r){if(O)return $t(13,1,t,e,n,r)}function Wt(t){if(O)return $t(14,1,t)}function Ht(t,e){if(O)return $t(15,1,t,e)}function Lt(t,e,n){if(O)return $t(16,1,t,e,n)}function zt(t){Atomics.store(a(),t>>2,1),he()&&_e(t),Atomics.compareExchange(a(),t>>2,1,0)}function Yt(t){return i()[t>>>2]+4294967296*a()[t+4>>>2]}function Bt(t,e,n,r,a,i){return O?$t(17,1,t,e,n,r,a,i):-52}function Gt(t,e,n,r,a,i){if(O)return $t(18,1,t,e,n,r,a,i)}function Nt(t){var n=G(t)+1,r=de(n);return r&&B(t,e(),r,n),r}function Vt(t,e,n){function r(t){return(t=t.toTimeString().match(/\\(([A-Za-z ]+)\\)$/))?t[1]:"GMT"}if(O)return $t(19,1,t,e,n);var o=(new Date).getFullYear(),u=new Date(o,0,1),c=new Date(o,6,1);o=u.getTimezoneOffset();var s=c.getTimezoneOffset(),l=Math.max(o,s);a()[t>>2>>>0]=60*l,a()[e>>2>>>0]=Number(o!=s),t=r(u),e=r(c),t=Nt(t),e=Nt(e),s>2>>>0]=t,i()[n+4>>2>>>0]=e):(i()[n>>2>>>0]=e,i()[n+4>>2>>>0]=t)}function $t(t,e){var n=arguments.length-2,r=arguments;return yt((()=>{for(var a=Ce(8*n),i=a>>3,u=0;u>>0]=c}return we(t,n,a,e)}))}u.executeNotifiedProxyingQueue=zt,wt=_?()=>{var t=process.hrtime();return 1e3*t[0]+t[1]/1e6}:O?()=>performance.now()-u.__performance_now_clock_drift:()=>performance.now();var qt,Xt=[],Jt={};function Zt(){if(!qt){var t,e={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:m||"./this.program"};for(t in Jt)void 0===Jt[t]?delete e[t]:e[t]=Jt[t];var n=[];for(t in e)n.push(t+"="+e[t]);qt=n}return qt}function Qt(t,n){if(O)return $t(20,1,t,n);var r=0;return Zt().forEach((function(a,o){var u=n+r;for(o=i()[t+4*o>>2>>>0]=u,u=0;u>0>>>0]=a.charCodeAt(u);e()[o>>0>>>0]=0,r+=a.length+1})),0}function Kt(t,e){if(O)return $t(21,1,t,e);var n=Zt();i()[t>>2>>>0]=n.length;var r=0;return n.forEach((function(t){r+=t.length+1})),i()[e>>2>>>0]=r,0}function te(t){return O?$t(22,1,t):52}function ee(t,e,n,r){return O?$t(23,1,t,e,n,r):52}function ne(t,e,n,r,a){return O?$t(24,1,t,e,n,r,a):70}var re=[null,[],[]];function ae(t,e){var n=re[t];0===e||10===e?((1===t?C:x)(z(n,0)),n.length=0):n.push(e)}function ie(t,e,n,a){if(O)return $t(25,1,t,e,n,a);for(var o=0,u=0;u>2>>>0],s=i()[e+4>>2>>>0];e+=8;for(var l=0;l>>0]);o+=s}return i()[a>>2>>>0]=o,0}var oe=0;function ue(t){return 0==t%4&&(0!=t%100||0==t%400)}var ce=[31,29,31,30,31,30,31,31,30,31,30,31],se=[31,28,31,30,31,30,31,31,30,31,30,31];function le(t,n,r,i){function o(t,e,n){for(t="number"==typeof t?t.toString():t||"";t.lengtht?-1:0r-t.getDate())){t.setDate(t.getDate()+e);break}e-=r-t.getDate()+1,t.setDate(1),11>n?t.setMonth(n+1):(t.setMonth(0),t.setFullYear(t.getFullYear()+1))}return n=new Date(t.getFullYear()+1,0,4),e=s(new Date(t.getFullYear(),0,4)),n=s(n),0>=c(e,t)?0>=c(n,t)?t.getFullYear()+1:t.getFullYear():t.getFullYear()-1}var f=a()[i+40>>2>>>0];for(var p in i={Lc:a()[i>>2>>>0],Kc:a()[i+4>>2>>>0],dc:a()[i+8>>2>>>0],jc:a()[i+12>>2>>>0],ec:a()[i+16>>2>>>0],Xb:a()[i+20>>2>>>0],Tb:a()[i+24>>2>>>0],Wb:a()[i+28>>2>>>0],Rc:a()[i+32>>2>>>0],Jc:a()[i+36>>2>>>0],Mc:f?Y(f):""},r=Y(r),f={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})r=r.replace(new RegExp(p,"g"),f[p]);var h="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),d="January February March April May June July August September October November December".split(" ");for(p in f={"%a":function(t){return h[t.Tb].substring(0,3)},"%A":function(t){return h[t.Tb]},"%b":function(t){return d[t.ec].substring(0,3)},"%B":function(t){return d[t.ec]},"%C":function(t){return u((t.Xb+1900)/100|0,2)},"%d":function(t){return u(t.jc,2)},"%e":function(t){return o(t.jc,2," ")},"%g":function(t){return l(t).toString().substring(2)},"%G":function(t){return l(t)},"%H":function(t){return u(t.dc,2)},"%I":function(t){return 0==(t=t.dc)?t=12:12t.dc?"AM":"PM"},"%S":function(t){return u(t.Lc,2)},"%t":function(){return"\\t"},"%u":function(t){return t.Tb||7},"%U":function(t){return u(Math.floor((t.Wb+7-t.Tb)/7),2)},"%V":function(t){var e=Math.floor((t.Wb+7-(t.Tb+6)%7)/7);if(2>=(t.Tb+371-t.Wb-2)%7&&e++,e)53==e&&(4==(n=(t.Tb+371-t.Wb)%7)||3==n&&ue(t.Xb)||(e=1));else{e=52;var n=(t.Tb+7-t.Wb-1)%7;(4==n||5==n&&ue(t.Xb%400-1))&&e++}return u(e,2)},"%w":function(t){return t.Tb},"%W":function(t){return u(Math.floor((t.Wb+7-(t.Tb+6)%7)/7),2)},"%y":function(t){return(t.Xb+1900).toString().substring(2)},"%Y":function(t){return t.Xb+1900},"%z":function(t){var e=0<=(t=t.Jc);return t=Math.abs(t)/60,(e?"+":"-")+String("0000"+(t/60*100+t%60)).slice(-4)},"%Z":function(t){return t.Mc},"%%":function(){return"%"}},r=r.replace(/%%/g,"\\0\\0"),f)r.includes(p)&&(r=r.replace(new RegExp(p,"g"),f[p](i)));return p=function(t){var e=Array(G(t)+1);return B(t,e,0,e.length),e}(r=r.replace(/\\0\\0/g,"%")),p.length>n?0:(function(t,n){e().set(t,n>>>0)}(p,t),p.length-1)}ht.fc();var fe=[null,ft,bt,Et,Ct,xt,Rt,jt,kt,Dt,Pt,Ut,Ft,It,Wt,Ht,Lt,Bt,Gt,Vt,Qt,Kt,te,ee,ne,ie],pe={b:function(t){return de(t+24)+24},n:function(t){return(t=new St(t)).uc()||(t.hc(!0),Ot--),t.ic(!1),_t.push(t),t.sc(),t.vc()},ma:function(t){throw x("Unexpected exception thrown, this is not properly supported - aborting"),H=!0,t},x:function(){Se(0);var t=_t.pop();if(t.Hc()&&!t.kc()){var e=t.Dc();e&>(e)(t.Zb),Tt(t.Zb)}At=0},e:function(){var t=At;if(!t)return oe=0;var e=new St(t);e.cc(t);var n=e.bc();if(!n)return oe=0,t;for(var r=Array.prototype.slice.call(arguments),a=0;azt(r)));else if(O)postMessage({targetThread:t,cmd:"processProxyingQueue",queue:r});else{if(!(t=ht.Vb[t]))return;t.postMessage({cmd:"processProxyingQueue",queue:r})}return 1},Ea:function(){return-1},Pa:function(t,e){t=new Date(1e3*Yt(t)),a()[e>>2>>>0]=t.getUTCSeconds(),a()[e+4>>2>>>0]=t.getUTCMinutes(),a()[e+8>>2>>>0]=t.getUTCHours(),a()[e+12>>2>>>0]=t.getUTCDate(),a()[e+16>>2>>>0]=t.getUTCMonth(),a()[e+20>>2>>>0]=t.getUTCFullYear()-1900,a()[e+24>>2>>>0]=t.getUTCDay(),t=(t.getTime()-Date.UTC(t.getUTCFullYear(),0,1,0,0,0,0))/864e5|0,a()[e+28>>2>>>0]=t},Qa:function(t,e){t=new Date(1e3*Yt(t)),a()[e>>2>>>0]=t.getSeconds(),a()[e+4>>2>>>0]=t.getMinutes(),a()[e+8>>2>>>0]=t.getHours(),a()[e+12>>2>>>0]=t.getDate(),a()[e+16>>2>>>0]=t.getMonth(),a()[e+20>>2>>>0]=t.getFullYear()-1900,a()[e+24>>2>>>0]=t.getDay();var n=new Date(t.getFullYear(),0,1),r=(t.getTime()-n.getTime())/864e5|0;a()[e+28>>2>>>0]=r,a()[e+36>>2>>>0]=-60*t.getTimezoneOffset(),r=new Date(t.getFullYear(),6,1).getTimezoneOffset(),t=0|(r!=(n=n.getTimezoneOffset())&&t.getTimezoneOffset()==Math.min(n,r)),a()[e+32>>2>>>0]=t},Ra:function(t){var e=new Date(a()[t+20>>2>>>0]+1900,a()[t+16>>2>>>0],a()[t+12>>2>>>0],a()[t+8>>2>>>0],a()[t+4>>2>>>0],a()[t>>2>>>0],0),n=a()[t+32>>2>>>0],r=e.getTimezoneOffset(),i=new Date(e.getFullYear(),0,1),o=new Date(e.getFullYear(),6,1).getTimezoneOffset(),u=i.getTimezoneOffset(),c=Math.min(u,o);return 0>n?a()[t+32>>2>>>0]=Number(o!=u&&c==r):0>2>>>0]=e.getDay(),n=(e.getTime()-i.getTime())/864e5|0,a()[t+28>>2>>>0]=n,a()[t>>2>>>0]=e.getSeconds(),a()[t+4>>2>>>0]=e.getMinutes(),a()[t+8>>2>>>0]=e.getHours(),a()[t+12>>2>>>0]=e.getDate(),a()[t+16>>2>>>0]=e.getMonth(),e.getTime()/1e3|0},Aa:Bt,Ba:Gt,Sa:function t(e,n,r){t.Ac||(t.Ac=!0,Vt(e,n,r))},y:function(){at("")},U:function(){if(!_&&!w){var t="Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread";vt||(vt={}),vt[t]||(vt[t]=1,_&&(t="warning: "+t),x(t))}},ra:function(){return 4294901760},B:wt,Ia:function(t,e,n){r().copyWithin(t>>>0,e>>>0,e+n>>>0)},F:function(){return _?n(993).cpus().length:navigator.hardwareConcurrency},Da:function(t,e,n){Xt.length=e,n>>=3;for(var r=0;r>>0];return(0>t?ut[-t-1]:fe[t]).apply(null,Xt)},qa:function(t){var e=r().length;if((t>>>=0)<=e||4294901760=n;n*=2){var a=e*(1+.2/n);a=Math.min(a,t+100663296);var i=Math;a=Math.max(t,a),i=i.min.call(i,4294901760,a+(65536-a%65536)%65536);t:{try{j.grow(i-D.byteLength+65535>>>16),N(j.buffer);var o=1;break t}catch(t){}o=void 0}if(o)return!0}return!1},Na:function(){throw"unwind"},Ga:Qt,Ha:Kt,J:pt,I:te,S:ee,ga:ne,R:ie,d:function(){return oe},na:function t(r,a){t.lc||(t.lc=function(){if("object"==typeof crypto&&"function"==typeof crypto.getRandomValues){var t=new Uint8Array(1);return()=>(crypto.getRandomValues(t),t[0])}if(_)try{var e=n(Object(function(){var t=new Error("Cannot find module \'crypto\'");throw t.code="MODULE_NOT_FOUND",t}()));return()=>e.randomBytes(1)[0]}catch(t){}return()=>at("randomDevice")}());for(var i=0;i>0>>>0]=t.lc();return 0},ia:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},ja:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},K:function(t){var e=Ee();try{return gt(t)()}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},f:function(t,e){var n=Ee();try{return gt(t)(e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},P:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},Q:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},k:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},p:function(t,e,n,r){var a=Ee();try{return gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},q:function(t,e,n,r,a){var i=Ee();try{return gt(t)(e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},N:function(t,e,n,r,a,i){var o=Ee();try{return gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},s:function(t,e,n,r,a,i){var o=Ee();try{return gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},w:function(t,e,n,r,a,i,o){var u=Ee();try{return gt(t)(e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},L:function(t,e,n,r,a,i,o,u){var c=Ee();try{return gt(t)(e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},E:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=Ee();try{return gt(t)(e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(Me(p),t!==t+0)throw t;Se(1,0)}},aa:function(t,e,n,r,a,i,o,u){var c=Ee();try{return He(t,e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},_:function(t,e,n,r,a,i,o){var u=Ee();try{return ke(t,e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},Z:function(t,e,n,r,a){var i=Ee();try{return Le(t,e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},ca:function(t,e,n,r){var a=Ee();try{return Ie(t,e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},$:function(t){var e=Ee();try{return je(t)}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},ba:function(t,e){var n=Ee();try{return We(t,e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},Y:function(t,e,n){var r=Ee();try{return De(t,e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},g:function(t){var e=Ee();try{gt(t)()}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},r:function(t,e){var n=Ee();try{gt(t)(e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},i:function(t,e,n){var r=Ee();try{gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},ha:function(t,e,n,r){var a=Ee();try{gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},m:function(t,e,n,r){var a=Ee();try{gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},v:function(t,e,n,r,a){var i=Ee();try{gt(t)(e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},u:function(t,e,n,r,a,i){var o=Ee();try{gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},O:function(t,e,n,r,a,i,o){var u=Ee();try{gt(t)(e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},A:function(t,e,n,r,a,i,o,u){var c=Ee();try{gt(t)(e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},ka:function(t,e,n,r,a,i,o,u,c){var s=Ee();try{gt(t)(e,n,r,a,i,o,u,c)}catch(t){if(Me(s),t!==t+0)throw t;Se(1,0)}},C:function(t,e,n,r,a,i,o,u,c,s,l){var f=Ee();try{gt(t)(e,n,r,a,i,o,u,c,s,l)}catch(t){if(Me(f),t!==t+0)throw t;Se(1,0)}},D:function(t,e,n,r,a,i,o,u,c,s,l,f,p,h,d,y){var b=Ee();try{gt(t)(e,n,r,a,i,o,u,c,s,l,f,p,h,d,y)}catch(t){if(Me(b),t!==t+0)throw t;Se(1,0)}},fa:function(t,e,n,r,a,i,o,u){var c=Ee();try{Pe(t,e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},da:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=Ee();try{Fe(t,e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(Me(p),t!==t+0)throw t;Se(1,0)}},ea:function(t,e,n,r,a,i){var o=Ee();try{Ue(t,e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},o:function(t){return t},a:j||u.wasmMemory,G:function(t){oe=t},la:le,z:function(t,e,n,r){return le(t,e,n,r)}};!function(){function t(t,e){u.asm=t.exports,ht.qc.push(u.asm.sb),$=u.asm.ub,X.unshift(u.asm.Va),k=e,O||(et--,u.monitorRunDependencies&&u.monitorRunDependencies(et),0==et&&(null!==nt&&(clearInterval(nt),nt=null),rt&&(t=rt,rt=null,t())))}function e(e){t(e.instance,e.module)}function n(t){return function(){if(!M&&(v||w)){if("function"==typeof fetch&&!tt.startsWith("file://"))return fetch(tt,{credentials:"same-origin"}).then((function(t){if(!t.ok)throw"failed to load wasm binary file at \'"+tt+"\'";return t.arrayBuffer()})).catch((function(){return ot()}));if(f)return new Promise((function(t,e){f(tt,(function(e){t(new Uint8Array(e))}),e)}))}return Promise.resolve().then((function(){return ot()}))}().then((function(t){return WebAssembly.instantiate(t,r)})).then((function(t){return t})).then(t,(function(t){x("failed to asynchronously prepare wasm: "+t),at(t)}))}var r={a:pe};if(O||(et++,u.monitorRunDependencies&&u.monitorRunDependencies(et)),u.instantiateWasm)try{return u.instantiateWasm(r,t)}catch(t){return x("Module.instantiateWasm callback failed with error: "+t),!1}(M||"function"!=typeof WebAssembly.instantiateStreaming||it()||tt.startsWith("file://")||_||"function"!=typeof fetch?n(e):fetch(tt,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,r).then(e,(function(t){return x("wasm streaming compile failed: "+t),x("falling back to ArrayBuffer instantiation"),n(e)}))}))).catch(s)}(),u.___wasm_call_ctors=function(){return(u.___wasm_call_ctors=u.asm.Va).apply(null,arguments)},u._OrtInit=function(){return(u._OrtInit=u.asm.Wa).apply(null,arguments)},u._OrtCreateSessionOptions=function(){return(u._OrtCreateSessionOptions=u.asm.Xa).apply(null,arguments)},u._OrtAppendExecutionProvider=function(){return(u._OrtAppendExecutionProvider=u.asm.Ya).apply(null,arguments)},u._OrtAddSessionConfigEntry=function(){return(u._OrtAddSessionConfigEntry=u.asm.Za).apply(null,arguments)},u._OrtReleaseSessionOptions=function(){return(u._OrtReleaseSessionOptions=u.asm._a).apply(null,arguments)},u._OrtCreateSession=function(){return(u._OrtCreateSession=u.asm.$a).apply(null,arguments)},u._OrtReleaseSession=function(){return(u._OrtReleaseSession=u.asm.ab).apply(null,arguments)},u._OrtGetInputCount=function(){return(u._OrtGetInputCount=u.asm.bb).apply(null,arguments)},u._OrtGetOutputCount=function(){return(u._OrtGetOutputCount=u.asm.cb).apply(null,arguments)},u._OrtGetInputName=function(){return(u._OrtGetInputName=u.asm.db).apply(null,arguments)},u._OrtGetOutputName=function(){return(u._OrtGetOutputName=u.asm.eb).apply(null,arguments)},u._OrtFree=function(){return(u._OrtFree=u.asm.fb).apply(null,arguments)},u._OrtCreateTensor=function(){return(u._OrtCreateTensor=u.asm.gb).apply(null,arguments)},u._OrtGetTensorData=function(){return(u._OrtGetTensorData=u.asm.hb).apply(null,arguments)},u._OrtReleaseTensor=function(){return(u._OrtReleaseTensor=u.asm.ib).apply(null,arguments)},u._OrtCreateRunOptions=function(){return(u._OrtCreateRunOptions=u.asm.jb).apply(null,arguments)},u._OrtAddRunConfigEntry=function(){return(u._OrtAddRunConfigEntry=u.asm.kb).apply(null,arguments)},u._OrtReleaseRunOptions=function(){return(u._OrtReleaseRunOptions=u.asm.lb).apply(null,arguments)},u._OrtRun=function(){return(u._OrtRun=u.asm.mb).apply(null,arguments)},u._OrtEndProfiling=function(){return(u._OrtEndProfiling=u.asm.nb).apply(null,arguments)};var he=u._pthread_self=function(){return(he=u._pthread_self=u.asm.ob).apply(null,arguments)},de=u._malloc=function(){return(de=u._malloc=u.asm.pb).apply(null,arguments)},ye=u._free=function(){return(ye=u._free=u.asm.qb).apply(null,arguments)},be=u._fflush=function(){return(be=u._fflush=u.asm.rb).apply(null,arguments)};u.__emscripten_tls_init=function(){return(u.__emscripten_tls_init=u.asm.sb).apply(null,arguments)};var me=u.___funcs_on_exit=function(){return(me=u.___funcs_on_exit=u.asm.tb).apply(null,arguments)},ge=u.__emscripten_thread_init=function(){return(ge=u.__emscripten_thread_init=u.asm.vb).apply(null,arguments)};u.__emscripten_thread_crashed=function(){return(u.__emscripten_thread_crashed=u.asm.wb).apply(null,arguments)};var ve,we=u._emscripten_run_in_main_runtime_thread_js=function(){return(we=u._emscripten_run_in_main_runtime_thread_js=u.asm.xb).apply(null,arguments)},_e=u.__emscripten_proxy_execute_task_queue=function(){return(_e=u.__emscripten_proxy_execute_task_queue=u.asm.yb).apply(null,arguments)},Oe=u.__emscripten_thread_free_data=function(){return(Oe=u.__emscripten_thread_free_data=u.asm.zb).apply(null,arguments)},Ae=u.__emscripten_thread_exit=function(){return(Ae=u.__emscripten_thread_exit=u.asm.Ab).apply(null,arguments)},Se=u._setThrew=function(){return(Se=u._setThrew=u.asm.Bb).apply(null,arguments)},Te=u._emscripten_stack_set_limits=function(){return(Te=u._emscripten_stack_set_limits=u.asm.Cb).apply(null,arguments)},Ee=u.stackSave=function(){return(Ee=u.stackSave=u.asm.Db).apply(null,arguments)},Me=u.stackRestore=function(){return(Me=u.stackRestore=u.asm.Eb).apply(null,arguments)},Ce=u.stackAlloc=function(){return(Ce=u.stackAlloc=u.asm.Fb).apply(null,arguments)},xe=u.___cxa_can_catch=function(){return(xe=u.___cxa_can_catch=u.asm.Gb).apply(null,arguments)},Re=u.___cxa_is_pointer_type=function(){return(Re=u.___cxa_is_pointer_type=u.asm.Hb).apply(null,arguments)},je=u.dynCall_j=function(){return(je=u.dynCall_j=u.asm.Ib).apply(null,arguments)},ke=u.dynCall_iiiiij=function(){return(ke=u.dynCall_iiiiij=u.asm.Jb).apply(null,arguments)},De=u.dynCall_jii=function(){return(De=u.dynCall_jii=u.asm.Kb).apply(null,arguments)},Pe=u.dynCall_viiiiij=function(){return(Pe=u.dynCall_viiiiij=u.asm.Lb).apply(null,arguments)},Ue=u.dynCall_vjji=function(){return(Ue=u.dynCall_vjji=u.asm.Mb).apply(null,arguments)},Fe=u.dynCall_viiijjjii=function(){return(Fe=u.dynCall_viiijjjii=u.asm.Nb).apply(null,arguments)},Ie=u.dynCall_iij=function(){return(Ie=u.dynCall_iij=u.asm.Ob).apply(null,arguments)},We=u.dynCall_ji=function(){return(We=u.dynCall_ji=u.asm.Pb).apply(null,arguments)},He=u.dynCall_iiiiiij=function(){return(He=u.dynCall_iiiiiij=u.asm.Qb).apply(null,arguments)},Le=u.dynCall_iiij=function(){return(Le=u.dynCall_iiij=u.asm.Rb).apply(null,arguments)};function ze(){function t(){if(!ve&&(ve=!0,u.calledRun=!0,!H)&&(O||dt(X),c(u),u.onRuntimeInitialized&&u.onRuntimeInitialized(),!O)){if(u.postRun)for("function"==typeof u.postRun&&(u.postRun=[u.postRun]);u.postRun.length;){var t=u.postRun.shift();Z.unshift(t)}dt(Z)}}if(!(0{var _scriptDir,r=(_scriptDir=(_scriptDir="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(t){var e,r,a;t=t||{},e||(e=void 0!==t?t:{}),e.ready=new Promise((function(t,e){r=t,a=e}));var i,o,u,c,s,l,f=Object.assign({},e),p="./this.program",h=(t,e)=>{throw e},d="object"==typeof window,y="function"==typeof importScripts,b="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,m="";b?(m=y?n(908).dirname(m)+"/":"//",l=()=>{s||(c=n(384),s=n(908))},i=function(t,e){return l(),t=s.normalize(t),c.readFileSync(t,e?void 0:"utf8")},u=t=>((t=i(t,!0)).buffer||(t=new Uint8Array(t)),t),o=(t,e,n)=>{l(),t=s.normalize(t),c.readFile(t,(function(t,r){t?n(t):e(r.buffer)}))},1{if(_||0{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.send(null),e.responseText},y&&(u=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.responseType="arraybuffer",e.send(null),new Uint8Array(e.response)}),o=(t,e,n)=>{var r=new XMLHttpRequest;r.open("GET",t,!0),r.responseType="arraybuffer",r.onload=()=>{200==r.status||0==r.status&&r.response?e(r.response):n()},r.onerror=n,r.send(null)});var g,v=e.print||console.log.bind(console),w=e.printErr||console.warn.bind(console);Object.assign(e,f),f=null,e.thisProgram&&(p=e.thisProgram),e.quit&&(h=e.quit),e.wasmBinary&&(g=e.wasmBinary);var _=e.noExitRuntime||!1;"object"!=typeof WebAssembly&&V("no native wasm support detected");var O,A,S,T,E,M,C=!1,x="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function R(t,e,n){var r=(e>>>=0)+n;for(n=e;t[n]&&!(n>=r);)++n;if(16(a=224==(240&a)?(15&a)<<12|i<<6|o:(7&a)<<18|i<<12|o<<6|63&t[e++])?r+=String.fromCharCode(a):(a-=65536,r+=String.fromCharCode(55296|a>>10,56320|1023&a))}}else r+=String.fromCharCode(a)}return r}function j(t,e){return(t>>>=0)?R(T,t,e):""}function k(t,e,n,r){if(!(0>>=0;r=n+r-1;for(var i=0;i=o&&(o=65536+((1023&o)<<10)|1023&t.charCodeAt(++i)),127>=o){if(n>=r)break;e[n++>>>0]=o}else{if(2047>=o){if(n+1>=r)break;e[n++>>>0]=192|o>>6}else{if(65535>=o){if(n+2>=r)break;e[n++>>>0]=224|o>>12}else{if(n+3>=r)break;e[n++>>>0]=240|o>>18,e[n++>>>0]=128|o>>12&63}e[n++>>>0]=128|o>>6&63}e[n++>>>0]=128|63&o}}return e[n>>>0]=0,n-a}function D(t){for(var e=0,n=0;n=r?e++:2047>=r?e+=2:55296<=r&&57343>=r?(e+=4,++n):e+=3}return e}function P(){var t=O.buffer;A=t,e.HEAP8=S=new Int8Array(t),e.HEAP16=new Int16Array(t),e.HEAP32=E=new Int32Array(t),e.HEAPU8=T=new Uint8Array(t),e.HEAPU16=new Uint16Array(t),e.HEAPU32=M=new Uint32Array(t),e.HEAPF32=new Float32Array(t),e.HEAPF64=new Float64Array(t)}var U,F=[],I=[],W=[],H=[],L=0;function z(){var t=e.preRun.shift();F.unshift(t)}var Y,B=0,G=null,N=null;function V(t){throw e.onAbort&&e.onAbort(t),w(t="Aborted("+t+")"),C=!0,t=new WebAssembly.RuntimeError(t+". Build with -sASSERTIONS for more info."),a(t),t}function $(){return Y.startsWith("data:application/octet-stream;base64,")}if(Y="ort-wasm.wasm",!$()){var q=Y;Y=e.locateFile?e.locateFile(q,m):m+q}function X(){var t=Y;try{if(t==Y&&g)return new Uint8Array(g);if(u)return u(t);throw"both async and sync fetching of the wasm failed"}catch(t){V(t)}}function J(t){this.name="ExitStatus",this.message="Program terminated with exit("+t+")",this.status=t}function Z(t){for(;0>2>>>0]=t},this.Eb=function(){return M[this.zb+4>>2>>>0]},this.Sb=function(t){M[this.zb+8>>2>>>0]=t},this.Wb=function(){return M[this.zb+8>>2>>>0]},this.Tb=function(){E[this.zb>>2>>>0]=0},this.Ib=function(t){S[this.zb+12>>0>>>0]=t?1:0},this.Pb=function(){return 0!=S[this.zb+12>>0>>>0]},this.Jb=function(t){S[this.zb+13>>0>>>0]=t?1:0},this.Lb=function(){return 0!=S[this.zb+13>>0>>>0]},this.Rb=function(t,e){this.Fb(0),this.Ub(t),this.Sb(e),this.Tb(),this.Ib(!1),this.Jb(!1)},this.Nb=function(){E[this.zb>>2>>>0]+=1},this.Xb=function(){var t=E[this.zb>>2>>>0];return E[this.zb>>2>>>0]=t-1,1===t},this.Fb=function(t){M[this.zb+16>>2>>>0]=t},this.Ob=function(){return M[this.zb+16>>2>>>0]},this.Qb=function(){if(Mt(this.Eb()))return M[this.Db>>2>>>0];var t=this.Ob();return 0!==t?t:this.Db}}function nt(t){return vt(new et(t).zb)}var rt=[];function at(t){var e=rt[t];return e||(t>=rt.length&&(rt.length=t+1),rt[t]=e=U.get(t)),e}function it(t){var e=D(t)+1,n=gt(e);return n&&k(t,S,n,e),n}var ot={};function ut(){if(!ct){var t,e={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:p||"./this.program"};for(t in ot)void 0===ot[t]?delete e[t]:e[t]=ot[t];var n=[];for(t in e)n.push(t+"="+e[t]);ct=n}return ct}var ct,st=[null,[],[]];function lt(t,e){var n=st[t];0===e||10===e?((1===t?v:w)(R(n,0)),n.length=0):n.push(e)}var ft=0;function pt(t){return 0==t%4&&(0!=t%100||0==t%400)}var ht=[31,29,31,30,31,30,31,31,30,31,30,31],dt=[31,28,31,30,31,30,31,31,30,31,30,31];function yt(t,e,n,r){function a(t,e,n){for(t="number"==typeof t?t.toString():t||"";t.length