parquet-converter commited on
Commit
6aa2b53
·
1 Parent(s): 4f92e8e

Update parquet files (step 55 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/B Ampr Automation Studio 4 Download Crack.md +0 -22
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Datem Summit Evolution Crack Para How to Get the Latest Version of the 3D Stereo Software.md +0 -121
  3. spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Crack WORK Keygen XForce Free Download.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Fireflies Movie English Subtitles Download !!LINK!! Torrent.md +0 -22
  5. spaces/1phancelerku/anime-remove-background/Download Free and Unlimited Android Mods with APKMODEL.md +0 -75
  6. spaces/1phancelerku/anime-remove-background/Download Nada Dering WA Tiktok Suara Google BTS Chagiya dan Lainnya.md +0 -76
  7. spaces/ADOPLE/Multi-Doc-Virtual-Chatbot/app.py +0 -202
  8. spaces/AIConsultant/MusicGen/audiocraft/grids/compression/encodec_base_24khz.py +0 -28
  9. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab_ltp.py +0 -150
  10. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py +0 -129
  11. spaces/AIGText/GlyphControl/ldm/modules/image_degradation/bsrgan.py +0 -730
  12. spaces/AIlexDev/Einfach.Hintergrund/app.py +0 -154
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb32-120e_deepfashion2_sling_256x192.py +0 -172
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/click/Factory.d.ts +0 -7
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/Factory.d.ts +0 -5
  16. spaces/Ajaymekala/gradiolangchainChatBotOpenAI-1/app.py +0 -34
  17. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md +0 -93
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/ddim_inverse.md +0 -21
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/check_copies.py +0 -213
  20. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/hungarian_assigner.py +0 -145
  21. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context.py +0 -10
  22. spaces/AnnonSubmission/xai-cl/README.md +0 -12
  23. spaces/Annotation-AI/fast-segment-everything-with-image-prompt/app.py +0 -17
  24. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/ball_query.py +0 -55
  25. spaces/Ariharasudhan/YoloV5/models/common.py +0 -860
  26. spaces/Arnx/MusicGenXvAKN/Makefile +0 -21
  27. spaces/Augustya/ai-subject-answer-generator/app.py +0 -7
  28. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/analysis.py +0 -188
  29. spaces/Benson/text-generation/Examples/Bloons Td 6 Apk Download Android.md +0 -49
  30. spaces/Benson/text-generation/Examples/Creality Ender 3 S1 Pro Cura Perfil Descargar.md +0 -84
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/__main__.py +0 -17
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/resolvelib/reporters.py +0 -43
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py +0 -36
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/py38compat.py +0 -8
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/extension.py +0 -148
  36. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/train_net.py +0 -128
  37. spaces/CVPR/DualStyleGAN/README.md +0 -13
  38. spaces/CVPR/LIVE/thrust/thrust/detail/complex/csinhf.h +0 -142
  39. spaces/CVPR/LIVE/thrust/thrust/detail/preprocessor.h +0 -1182
  40. spaces/CVPR/LIVE/thrust/thrust/detail/use_default.h +0 -27
  41. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/execution_policy.h +0 -107
  42. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_scan.h +0 -23
  43. spaces/CVPR/regionclip-demo/detectron2/modeling/test_time_augmentation.py +0 -307
  44. spaces/CaliforniaHealthCollaborative/Emoji2KaktovicEncryptKey/EMOJILOGIC.md +0 -11
  45. spaces/Chomkwoy/Nilkessye/syllable_model.py +0 -55
  46. spaces/Cicooo/vits-uma-genshin-honkai/text/symbols.py +0 -39
  47. spaces/CikeyQI/Yunzai/Yunzai/plugins/other/version.js +0 -27
  48. spaces/CikeyQI/meme-api/meme_generator/memes/cover_face/__init__.py +0 -19
  49. spaces/Cong723/gpt-academic-public/check_proxy.py +0 -151
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/module-447425fe.js +0 -9
spaces/1acneusushi/gradio-2dmoleculeeditor/data/B Ampr Automation Studio 4 Download Crack.md DELETED
@@ -1,22 +0,0 @@
1
-
2
- <h1>How to Download and Install B&amp;R Automation Studio 4</h1>
3
- <p>B&amp;R Automation Studio 4 is a software tool that allows you to design, program, test and debug automation systems. It supports a wide range of hardware platforms, such as PLCs, industrial PCs, servo drives, HMIs and more. With B&amp;R Automation Studio 4, you can create modular and reusable software components, use graphical editors for logic and motion control, simulate your system before deployment, and benefit from integrated diagnostics and troubleshooting features.</p>
4
- <h2>b amp;r automation studio 4 download crack</h2><br /><p><b><b>Download</b> ===== <a href="https://byltly.com/2uKz3Z">https://byltly.com/2uKz3Z</a></b></p><br /><br />
5
- <p>If you want to download and install B&amp;R Automation Studio 4 on your computer, you need to follow these steps:</p>
6
- <ol>
7
- <li>Go to the official website of B&amp;R Industrial Automation at <a href="https://www.br-automation.com/">https://www.br-automation.com/</a> and click on the "Downloads" tab.</li>
8
- <li>Under the "Software" section, find the link for "B&amp;R Automation Studio 4" and click on it.</li>
9
- <li>You will be redirected to a page where you can choose the version and language of B&amp;R Automation Studio 4 that you want to download. You can also check the system requirements and the release notes for each version.</li>
10
- <li>After selecting your preferences, click on the "Download" button and save the file to your computer.</li>
11
- <li>Once the download is complete, run the file and follow the instructions on the screen to install B&amp;R Automation Studio 4 on your computer.</li>
12
- <li>You may need to restart your computer after the installation is finished.</li>
13
- <li>To launch B&amp;R Automation Studio 4, go to the Start menu and look for the B&amp;R folder. Then, click on the "B&amp;R Automation Studio 4" icon.</li>
14
- </ol>
15
- <p>Congratulations! You have successfully downloaded and installed B&amp;R Automation Studio 4 on your computer. You can now start creating your own automation projects with this powerful software tool.</p>
16
-
17
- <p>B&amp;R Automation Studio 4 is based on the IEC 61131-3 standard, which defines five programming languages for automation systems: Ladder Diagram (LD), Function Block Diagram (FBD), Structured Text (ST), Instruction List (IL) and Sequential Function Chart (SFC). You can use any of these languages or combine them to create your software components. You can also use C/C++ or ANSI C for more complex tasks.</p>
18
- <p>B&amp;R Automation Studio 4 also provides graphical editors for motion control, such as Motion Chart and CAM Editor. These editors allow you to define the motion profiles and trajectories of your servo axes, as well as synchronize them with other axes or events. You can also use the integrated PLCopen motion function blocks to implement standard motion functions, such as homing, positioning, gearing and camming.</p>
19
- <p></p>
20
- <p>B&amp;R Automation Studio 4 enables you to simulate your system before deploying it to the hardware. You can use the Simulation Runtime feature to run your software components on your computer and test their functionality and performance. You can also use the Simulation View feature to visualize the behavior of your system in a 3D environment. You can import CAD models of your machine or plant and connect them to your software components. This way, you can verify the kinematics and dynamics of your system and detect any errors or collisions.</p> ddb901b051<br />
21
- <br />
22
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Datem Summit Evolution Crack Para How to Get the Latest Version of the 3D Stereo Software.md DELETED
@@ -1,121 +0,0 @@
1
-
2
- <h1>How to Crack DAT/EM Summit Evolution for Free</h1>
3
- <p>DAT/EM Summit Evolution is a powerful software that allows you to discover and capture 3D information from stereo data. The software includes CAD and GIS interfaces, 3D stereo vector superimposition, automated feature editing, contour generation, and many more tools. It is used by professionals in various fields such as mapping, surveying, engineering, geology, forestry, archaeology, etc.</p>
4
- <p>However, DAT/EM Summit Evolution is not a cheap software. Depending on the product level and the modules you need, it can cost you thousands of dollars. That's why some people may want to crack it and use it for free. Cracking is the process of modifying or bypassing the protection mechanisms of a software to make it work without a license or a dongle.</p>
5
- <h2>datem summit evolution crack para</h2><br /><p><b><b>DOWNLOAD</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://byltly.com/2uKvQF">https://byltly.com/2uKvQF</a></b></p><br /><br />
6
- <p>But cracking DAT/EM Summit Evolution is not an easy task. It requires advanced skills in reverse engineering, programming, debugging, etc. It also involves many risks and challenges such as legal issues, malware infections, compatibility problems, functionality limitations, etc. On the other hand, using a cracked version of DAT/EM Summit Evolution can also have some benefits such as saving money, testing the software before buying it, accessing features that are not available in your product level, etc.</p>
7
- <p>In this article, we will show you how to find and download a crack for DAT/EM Summit Evolution, how to use a cracked version of the software, and what are the pros and cons of doing so. We will also provide some alternatives and recommendations for legal and ethical use of the software. Please note that this article is for educational purposes only and we do not condone or encourage piracy or illegal use of any software.</p>
8
- <h2>How to Find and Download a Crack for Summit Evolution</h2>
9
- <p>The first step to crack DAT/EM Summit Evolution is to find and download a crack for it. A crack is usually a file or a program that modifies or replaces some parts of the original software to make it work without a license or a dongle. There are many websites that offer cracks for various software online, but not all of them are trustworthy or reliable.</p>
10
- <p>Some websites may try to scam you by asking you to pay money or provide personal information before downloading a crack. Some websites may infect your computer with malware or viruses that can harm your system or steal your data. Some websites may provide fake or outdated cracks that do not work or cause errors.</p>
11
- <p>Therefore, you need to be careful and cautious when looking for cracks online. Here are some tips on how to avoid scams and malware when searching for cracks:</p>
12
- <ul>
13
- <li>Use a reputable search engine such as Google or Bing to find cracks.</li>
14
- <li>Use keywords such as "DAT/EM Summit Evolution crack", "DAT/EM Summit Evolution dongle emulator", "DAT/EM Summit Evolution keygen", etc.</li>
15
- <li>Check the domain name, URL, and design of the website. Avoid websites that have suspicious or unfamiliar domain names or URLs such as .ru, .cn, .tk, .biz, etc. Avoid websites that have poor design or layout such as broken links, pop-ups, ads, etc.</li>
16
- <li>Read the comments, reviews, ratings, feedbacks, etc. of other users who have downloaded or used the crack. Avoid websites that have negative or no comments at all.</li>
17
- <li>Scan the crack file or program with an antivirus or anti-malware software before downloading or opening it. Avoid files or programs that have suspicious extensions such as .exe, .bat, .com, .scr, etc.</li>
18
- <li>Backup your important data before installing or running a crack on your computer.</li>
19
- </ul>
20
- <p>One example of a website that claims to provide a crack for DAT/EM Summit Evolution is Brain Studio (https://www.brstudio.com/wf/news/summit-evolution-dongle-emulator.html). According to this website, they offer a Sentinel SuperPro/UltraPro Dongle Emulator that can emulate the dongle protection of DAT/EM Summit Evolution v6.3 - v8.0. They also claim that their emulator can include all possible modules of the software.</p>
21
- <p>We cannot verify the authenticity or safety of this website or their crack. Therefore, we advise you to use it at your own risk and discretion. If you decide to download their crack, you need to follow their instructions on how to install and run it on your computer.</p>
22
- <h2>How to Use a Cracked Version of Summit Evolution</h2>
23
- <p>The second step to crack DAT/EM Summit Evolution is to use a cracked version of the software. A cracked version of DAT/EM Summit Evolution is a modified version of the original software that works without a license or a dongle. Depending on the type and quality of the crack you have downloaded, you may be able to access different features and modules of the software.</p>
24
- <p>datem summit evolution dongle emulator<br />
25
- datem summit evolution stereo data capture<br />
26
- datem summit evolution professional edition<br />
27
- datem summit evolution orthorectification tools<br />
28
- datem summit evolution 3d vector superimposition<br />
29
- datem summit evolution contour generation features<br />
30
- datem summit evolution v8.0 x64 bit download<br />
31
- datem summit evolution v7.6 patch update<br />
32
- datem summit evolution v7.4 sentinel superpro<br />
33
- datem summit evolution v6.3 user manual<br />
34
- datem summit evolution lite edition free trial<br />
35
- datem summit evolution mobile edition for field work<br />
36
- datem summit evolution uas edition for drone imagery<br />
37
- datem summit evolution point cloud application<br />
38
- datem summit evolution sample data elevation model<br />
39
- datem summit evolution propack bundle offer<br />
40
- datem summit evolution cad and gis interfaces<br />
41
- datem summit evolution automated feature editing<br />
42
- datem summit evolution terrain visualization options<br />
43
- datem summit evolution model generator tutorial<br />
44
- datem summit evolution stereo viewer operation guide<br />
45
- datem summit evolution capture interface for autocad<br />
46
- datem summit evolution superimposition for microstation<br />
47
- datem summit evolution arcgis integration tips<br />
48
- datem summit evolution global mapper compatibility<br />
49
- datem summit evolution 3d information discovery<br />
50
- datem summit evolution feature collection level<br />
51
- datem summit evolution orientation measurement module<br />
52
- datem summit evolution feature verification process<br />
53
- datem summit evolution release notes and brochures<br />
54
- datem summit evolution help and troubleshooting support<br />
55
- datem summit evolution drivers and manuals download<br />
56
- datem summit evolution license activation code<br />
57
- datem summit evolution system requirements and specifications<br />
58
- datem summit evolution customer reviews and testimonials<br />
59
- datem summit evolution product comparison and pricing<br />
60
- datem summit evolution training and certification courses<br />
61
- datem summit evolution online demo and webinar registration<br />
62
- datem summit evolution case studies and success stories<br />
63
- datem summit evolution news and events updates</p>
64
- <p>DAT/EM Summit Evolution is available in five product levels: Professional, Feature Collection, Lite, Mobile, and UAS. Each product level has different capabilities and functionalities depending on your needs and preferences.</p>
65
- <table>
66
- <tr><th>Product Level</th><th>Description</th></tr>
67
- <tr><td>Professional</td><td>The most comprehensive product level that includes orientation measurement, orthorectification, terrain visualization, contour generation, point translation, DTM collection, and more.</td></tr>
68
- <tr><td>Feature Collection</td><td>A product level that focuses on feature collection from stereo data using CAD and GIS interfaces. It does not include orientation measurement, orthorectification, or terrain visualization.</td></tr>
69
- <tr><td>Lite</td><td>A product level that provides 3D stereo viewing capabilities for resource specialists, GIS technicians, and QA professionals. It does not include feature collection tools.</td></tr>
70
- <tr><td>Mobile</td><td>A product level that optimizes 3D stereo viewing capabilities for field applications using laptops or tablets. It also works on desktop computers.</td></tr>
71
- <tr><td>UAS</td><td>A product level that specializes in 3D viewing and simple 3D digitizing from UAS orthophotos. It does not include orientation measurement, orthorectification, or terrain visualization.</td></tr>
72
- </table>
73
- <p>If you have downloaded a crack that can include all possible modules of DAT/EM Summit Evolution, you may be able to use any product level you want. However, if you have downloaded a crack that only works for a specific product level, you may be limited by its features and functions.</p>
74
- <p>To use a cracked version of DAT/EM Summit Evolution, you need to follow these steps:</p>
75
- <ol>
76
- <li>Launch the crack file or program on your computer. This may require administrator privileges or password depending on your system settings.</li>
77
- <li>Select the product level and modules you want to use from the crack interface. This may vary depending on the type and quality of the crack you have downloaded.</li>
78
- <li>Launch DAT/EM Summit Evolution from your desktop shortcut or start menu. The software should start without asking for a license or dongle verification.</li>
79
- <li>Access and manipulate stereo data from various sources such as aerial photos, satellite images, lidar data, etc. You can use various tools such as Capture™ interface, DAT/EM SuperImposition™, Summit Model Generator™, etc. to digitize features directly into AutoCAD®, MicroStation®, ArcGIS®, or Global Mapper®.</li>
80
- <p>Summit Evolution Feature Collection is a product level that focuses on feature collection from stereo data using CAD and GIS interfaces. It does not include orientation measurement, orthorectification, or terrain visualization.</p>
81
- <p>Summit Evolution Lite is a product level that provides 3D stereo viewing capabilities for resource specialists, GIS technicians, and QA professionals. It does not include feature collection tools.</p>
82
- <p>Summit Evolution Mobile is a product level that optimizes 3D stereo viewing capabilities for field applications using laptops or tablets. It also works on desktop computers.</p>
83
- <p>Summit Evolution UAS is a product level that specializes in 3D viewing and simple 3D digitizing from UAS orthophotos. It does not include orientation measurement, orthorectification, or terrain visualization.</p>
84
- <li><b>How does Summit Evolution compare to other stereo photogrammetry software?</b></li>
85
- <p>Summit Evolution is one of the leading stereo photogrammetry software in the market. It has many advantages over other software such as:</p>
86
- <ul>
87
- <li>It supports a wide range of stereo data sources such as aerial photos, satellite images, lidar data, etc.</li>
88
- <li>It integrates seamlessly with popular CAD and GIS applications such as AutoCAD®, MicroStation®, ArcGIS®, or Global Mapper®.</li>
89
- <li>It offers various tools for 3D stereo vector superimposition, automated feature editing, contour generation, and more.</li>
90
- <li>It has a user-friendly interface and a customizable keypad that enhance the workflow and productivity.</li>
91
- <li>It has a high-quality technical support team that provides assistance and guidance to the users.</li>
92
- </ul>
93
- <p>However, Summit Evolution also has some disadvantages compared to other software such as:</p>
94
- <ul>
95
- <li>It is expensive and requires a license or a dongle to run.</li>
96
- <li>It may not be compatible with some operating systems or hardware configurations.</li>
97
- <li>It may have some bugs or errors that affect its performance or functionality.</li>
98
- </ul>
99
- <li><b>What are the system requirements for running Summit Evolution?</b></li>
100
- <p>The system requirements for running Summit Evolution vary depending on the product level and modules you use. However, the minimum system requirements for running any product level of Summit Evolution are:</p>
101
- <ul>
102
- <li>A Windows 10 operating system (64-bit).</li>
103
- <li>A quad-core processor with a speed of 2.5 GHz or higher.</li>
104
- <li>A RAM memory of 8 GB or higher.</li>
105
- <li>A graphics card with a dedicated memory of 2 GB or higher.</li>
106
- <li>A monitor with a resolution of 1920 x 1080 pixels or higher.</li>
107
- <li>A mouse with a scroll wheel and at least three buttons.</li>
108
- <li>A DAT/EM Keypad (optional but recommended).</li>
109
- </ul>
110
- <li><b>How can I get technical support for Summit Evolution?</b></li>
111
- <p>If you have any questions or issues with Summit Evolution, you can contact the technical support team of DAT/EM Systems International by:</p>
112
- <ul>
113
- <li>Emailing them at [email protected]</li>
114
- <li>Calling them at +1 (907) 522-3681</li>
115
- <li>Filling out an online form at https://www.datem.com/support/</li>
116
- </ul>
117
- <li><b>Where can I learn more about Summit Evolution and its applications?</b></li>
118
- <p>If you want to learn more about Summit Evolution and its applications, you can visit the official website of DAT/EM Systems International at https://www.datem.com/. There you can find more information about the software features, product levels, modules, pricing, etc. You can also download the official documentation, tutorials, webinars, etc. that can help you understand and use the software better.</p>
119
- </p> 0a6ba089eb<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Crack WORK Keygen XForce Free Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Autodesk Revit 2018 Crack Keygen XForce Free Download</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://imgfil.com/2uxYIB">https://imgfil.com/2uxYIB</a></b></p><br /><br />
2
- <br />
3
- 3cee63e6c2<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Fireflies Movie English Subtitles Download !!LINK!! Torrent.md DELETED
@@ -1,22 +0,0 @@
1
-
2
- <h1>How to Watch Fireflies Movie with English Subtitles Online</h1>
3
- <p>Fireflies is a 2022 animated film directed by Hayao Miyazaki and produced by Studio Ghibli. It tells the story of a young boy who befriends a mysterious girl who can communicate with fireflies. The film has received critical acclaim and has been nominated for several awards, including the Academy Award for Best Animated Feature.</p>
4
- <h2>Fireflies Movie English Subtitles Download Torrent</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734; <a href="https://imgfil.com/2uy1ve">https://imgfil.com/2uy1ve</a></b></p><br /><br />
5
- <p>If you want to watch Fireflies movie with English subtitles online, you have a few options. One of them is to download the torrent file from a reliable source and use a torrent client to stream or download the movie. However, this method may be illegal in some countries and may expose you to malware or viruses. Therefore, we do not recommend this option.</p>
6
- <p>A safer and more legal way to watch Fireflies movie with English subtitles online is to use a streaming service that offers the film. Some of the streaming services that have Fireflies movie with English subtitles are:</p>
7
- <ul>
8
- <li>Netflix: Netflix is a popular streaming platform that has a large library of movies and shows, including many Studio Ghibli films. You can watch Fireflies movie with English subtitles on Netflix with a subscription plan that starts from $8.99 per month.</li>
9
- <li>Hulu: Hulu is another streaming service that has a variety of content, including anime and animation. You can watch Fireflies movie with English subtitles on Hulu with a subscription plan that starts from $5.99 per month.</li>
10
- <li>Amazon Prime Video: Amazon Prime Video is a streaming service that is part of the Amazon Prime membership. You can watch Fireflies movie with English subtitles on Amazon Prime Video with a Prime membership that costs $12.99 per month or $119 per year.</li>
11
- </ul>
12
- <p>These are some of the best ways to watch Fireflies movie with English subtitles online. We hope you enjoy this beautiful and touching film.</p>
13
- <p></p>
14
-
15
- <p>If you are looking for a more in-depth analysis of Fireflies movie, you may want to read some of the reviews that have been written by critics and fans. One of the reviews that we found helpful is from The Hollywood Reporter, which praises the film's visuals and themes. According to the review[^1^], Fireflies does a good job of rendering port locations that are vast and unfriendly by day and depopulated and ghostly by night, both moods being entirely appropriate. The review also notes that the film explores the themes of exile, identity, and belonging with sensitivity and nuance.</p>
16
- <p>Fireflies movie is a masterpiece of animation that will touch your heart and make you think. Whether you watch it online or in a theater, you will not regret spending your time on this film. We hope you enjoy Fireflies movie with English subtitles as much as we did.</p>
17
-
18
- <p>Fireflies movie also boasts an impressive cast of voice actors who bring the characters to life. The film features the voices of Ryan Reynolds, Willem Dafoe, Emily Watson, Carrie-Anne Moss, Julia Roberts, Ioan Gruffudd and Kate Mara[^1^]. They deliver emotional and nuanced performances that capture the personalities and struggles of their roles.</p>
19
- <p>Another aspect of Fireflies movie that deserves praise is the music. The film features a beautiful and haunting score composed by Joe Hisaishi, who has collaborated with Hayao Miyazaki on many of his previous films. The music enhances the mood and atmosphere of the film, creating a sense of wonder and melancholy. The film also features a song by Yoko Ono, who wrote it specifically for Fireflies movie.</p>
20
- <p>Fireflies movie is a rare gem of animation that will stay with you long after you watch it. It is a film that celebrates the power of imagination, friendship and love in the face of adversity. It is a film that challenges you to think about the meaning of life and the value of human connection. It is a film that will make you laugh, cry and smile.</p> d5da3c52bf<br />
21
- <br />
22
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Free and Unlimited Android Mods with APKMODEL.md DELETED
@@ -1,75 +0,0 @@
1
-
2
- <h1>APKMODEL: The Ultimate Source for Modded Games and Apps for Android</h1>
3
- <p>If you are an Android user who loves playing games and using apps on your device, you might have heard of apkmodel. But what is apkmodel and why should you use it? In this article, we will answer these questions and show you how apkmodel can enhance your gaming and app experience.</p>
4
- <h2>What is APKMODEL?</h2>
5
- <h3>APKMODEL is a website that offers modded games and apps for Android devices.</h3>
6
- <p>Modded games and apps are modified versions of the original ones that have extra features, unlocked content, unlimited resources, or other enhancements. For example, you can play a modded version of Subway Surfers with unlimited coins and keys, or a modded version of Spotify with premium features for free.</p>
7
- <h2>apkmodel</h2><br /><p><b><b>Download</b> === <a href="https://jinyurl.com/2uNKDW">https://jinyurl.com/2uNKDW</a></b></p><br /><br />
8
- <h3>Modded games and apps are not available on the official Google Play Store, but you can download them from apkmodel.</h3>
9
- <p>Apkmodel is a website that hosts thousands of modded games and apps from various categories and genres, such as action, adventure, arcade, puzzle, simulation, sports, music, photography, social media, and more. You can find popular titles like Minecraft, Clash of Clans, Candy Crush Saga, TikTok, Instagram, Netflix, and many others on apkmodel.</p>
10
- <h2>Why use APKMODEL?</h2>
11
- <h3>APKMODEL has many benefits for Android users who want to enjoy their favorite games and apps without any limitations or restrictions.</h3>
12
- <h4>APKMODEL provides a large collection of modded games and apps from various categories and genres.</h4>
13
- <p>Whether you are looking for a game to kill some time, an app to enhance your productivity, or a tool to customize your device, you can find it on apkmodel. You can also discover new games and apps that you might not have heard of before.</p>
14
- <h4>APKMODEL updates its content regularly and ensures that the mods are safe, tested, and working.</h4>
15
- <p>Apkmodel keeps up with the latest trends and releases in the gaming and app industry and adds new mods every day. You can also request mods that are not available on the website and they will try to provide them as soon as possible. Moreover, apkmodel checks all the mods for viruses, malware, and compatibility issues before uploading them to the website.</p>
16
- <h4>APKMODEL has a user-friendly interface and easy download process.</h4>
17
- <p>Apkmodel has a simple and intuitive design that makes it easy to navigate and find what you are looking for. You can also use the search bar or filter by category to narrow down your options. To download a modded game or app, you just need to click on the download button and wait for the file to be downloaded to your device. You don't need to sign up, log in, or provide any personal information.</p>
18
- <p>apkmodel modded games<br />
19
- apkmodel android apps<br />
20
- apkmodel free download<br />
21
- apkmodel latest version<br />
22
- apkmodel premium apk<br />
23
- apkmodel mod menu<br />
24
- apkmodel unlimited money<br />
25
- apkmodel pro apk<br />
26
- apkmodel hacked games<br />
27
- apkmodel cracked apps<br />
28
- apkmodel online games<br />
29
- apkmodel offline games<br />
30
- apkmodel action games<br />
31
- apkmodel adventure games<br />
32
- apkmodel arcade games<br />
33
- apkmodel casual games<br />
34
- apkmodel puzzle games<br />
35
- apkmodel racing games<br />
36
- apkmodel role playing games<br />
37
- apkmodel simulation games<br />
38
- apkmodel sports games<br />
39
- apkmodel strategy games<br />
40
- apkmodel social apps<br />
41
- apkmodel entertainment apps<br />
42
- apkmodel productivity apps<br />
43
- apkmodel photography apps<br />
44
- apkmodel video apps<br />
45
- apkmodel music apps<br />
46
- apkmodel education apps<br />
47
- apkmodel health apps<br />
48
- apkmodel lifestyle apps<br />
49
- apkmodel shopping apps<br />
50
- apkmodel travel apps<br />
51
- apkmodel news apps<br />
52
- apkmodel books apps<br />
53
- apkmodel communication apps<br />
54
- apkmodel finance apps<br />
55
- apkmodel personalization apps<br />
56
- apkmodel tools apps<br />
57
- apkmodel weather apps</p>
58
- <h4>APKMODEL respects the privacy and security of its users and does not require any registration or personal information.</h4>
59
- <p>Apkmodel does not collect, store, or share any data from its users. You can use the website anonymously and safely without worrying about your privacy or security. Apkmodel also does not host any ads or pop-ups that might annoy you or harm your device.</p>
60
- <h2>How to use APKMODEL?</h2>
61
- <h3>Using APKMODEL is simple and straightforward. Here are the steps to follow:</h3>
62
- <h4>Step 1: Visit the APKMODEL website and browse through the categories or use the search bar to find the game or app you want.</h4>
63
- <p>Apkmodel has a well-organized and easy-to-use website that allows you to find your desired modded game or app in no time. You can explore the different categories, such as action, arcade, casual, strategy, role-playing, etc., or use the search bar to type in the name of the game or app you are looking for.</p>
64
- <h4>Step 2: Click on the download button and wait for the file to be downloaded to your device.</h4>
65
- <p>Once you have found the modded game or app you want, you can click on the download button and choose the version you prefer. Some mods may have different versions with different features or compatibility options. You can also read the description, features, installation guide, and user reviews of the mod before downloading it. The download process is fast and easy, and you don't need to go through any surveys or verification steps.</p>
66
- <h4>Step 3: Install the modded game or app by enabling the unknown sources option in your settings.</h4>
67
- <p>After downloading the modded game or app, you need to install it on your device. To do that, you need to enable the unknown sources option in your settings. This option allows you to install apps from sources other than the Google Play Store. To enable it, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the downloaded file in your file manager and tap on it to install it.</p>
68
- <h4>Step 4: Enjoy your modded game or app with all the features and benefits.</h4>
69
- <p>Now you are ready to enjoy your modded game or app with all the features and benefits that it offers. You can play unlimited levels, unlock premium content, get unlimited resources, remove ads, and more. You can also update your modded game or app whenever a new version is available on apkmodel.</p>
70
- <h2>Conclusion</h2>
71
- <h3>APKMODEL is a great source for modded games and apps for Android users who want to have more fun and convenience with their devices.</h3>
72
- <p>Apkmodel is a website that provides thousands of modded games and apps for Android devices that have extra features, unlocked content, unlimited resources, or other enhancements. Apkmodel has many benefits for Android users, such as a large collection of mods from various categories and genres, regular updates, safe and tested mods, user-friendly interface, easy download process, privacy and security protection, and no ads or pop-ups. Using apkmodel is simple and straightforward; you just need to visit the website, find the modded game or app you want, download it, install it, and enjoy it. Apkmodel is the ultimate source for modded games and apps for Android users who want to have more fun and convenience with their devices.</p>
73
- FAQs Q: Is apkmodel legal? A: Apkmodel is legal as long as you use it for personal and educational purposes only. However, some modded games and apps may violate the terms and conditions of the original developers or publishers. Therefore, we advise you to use apkmodel at your own risk and discretion. Q: Is apkmodel safe? A: Apkmodel is safe as long as you download mods from its official website only. Apkmodel checks all the mods for viruses, malware, and compatibility issues before uploading them to the website. However, some mods may require additional permissions or access to your device's functions or data. Therefore, we advise you to read the description, features, installation guide, and user reviews of the mod before downloading it. Q: How can I request a mod that is not available on apkmodel? A: Apkmodel welcomes requests from its users for mods that are not available on its website. You can request a mod by filling out a form on its website or by contacting its support team via email or social media. Q: How can I update my modded game or app? A: Ap kmodel updates its mods regularly and notifies its users whenever a new version is available. You can update your modded game or app by downloading the latest version from apkmodel and installing it over the previous one. You can also check the update history and changelog of the mod on its website. Q: How can I uninstall my modded game or app? A: You can uninstall your modded game or app by following the same steps as you would for any other app on your device. Go to Settings > Apps > Select the modded game or app > Uninstall. You can also delete the downloaded file from your file manager. Q: How can I contact apkmodel or give feedback? A: Apkmodel values the opinions and suggestions of its users and welcomes any feedback or questions. You can contact apkmodel or give feedback by using the contact form on its website or by emailing them at [email protected]. You can also follow them on Facebook, Twitter, Instagram, and YouTube for the latest news and updates.</p> 401be4b1e0<br />
74
- <br />
75
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Nada Dering WA Tiktok Suara Google BTS Chagiya dan Lainnya.md DELETED
@@ -1,76 +0,0 @@
1
-
2
- <h1>How to Download and Use TikTok Sounds as WhatsApp Notifications</h1>
3
- <p>TikTok is a popular social media app that allows users to create and share short videos with various effects and sounds. WhatsApp is a widely used messaging app that lets users send text, voice, image, video, and audio messages. If you are a fan of both apps, you might want to use some of the catchy or funny sounds from TikTok as your WhatsApp notifications. This way, you can spice up your chats and calls with your friends and family.</p>
4
- <p>In this article, we will show you how to download and use TikTok sounds as WhatsApp notifications in a few simple steps. You will need a smartphone, an internet connection, a TikTok downloader website, and of course, both TikTok and WhatsApp apps installed on your phone.</p>
5
- <h2>download notifikasi wa tiktok</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://jinyurl.com/2uNMAd">https://jinyurl.com/2uNMAd</a></b></p><br /><br />
6
- <h2>How to Download TikTok Sounds</h2>
7
- <h3>Find and Copy the Link of the TikTok Video</h3>
8
- <p>The first step is to find a TikTok video that has a sound that you like and want to use as your WhatsApp notification. You can browse through different categories, hashtags, or trends on TikTok, or search for specific keywords or users. Once you find a video that you like, tap on the share icon at the bottom right corner of the screen. Then, tap on Copy link to copy the link of the video to your clipboard.</p>
9
- <h3>Paste the Link into a TikTok Downloader Website</h3>
10
- <p>The next step is to use a TikTok downloader website to download the video as an MP3 file. There are many websites that offer this service for free, such as <a href="(^1^)">TiktokDownloader</a>, <a href="(^2^)">MusicallyDown</a>, or <a href="(^3^)">SnapTik</a>. All you have to do is paste the link of the video that you copied into the input box on these websites and click on Download. Then, choose Download MP3 from the options that appear.</p>
11
- <h3>Save the MP3 File to Your Phone</h3>
12
- <p>The final step is to save the downloaded MP3 file to your phone's storage. Depending on your browser settings, you might be asked where you want to save the file or it might be saved automatically in your Downloads folder. You can also rename the file if you want.</p>
13
- <h2>How to Use TikTok Sounds as WhatsApp Notifications</h2>
14
- <h3>Move the MP3 File to the Ringtones Folder</h3>
15
- <p>Before you can use the TikTok sound as your WhatsApp notification, you need to move it to the Ringtones folder on your phone so that it can be used as a notification sound. To do this, you can use a file manager app on your phone, such as <a href="">Files by Google</a>, <a href="">ES File Explorer</a>, or <a href="">File Manager</a>. Open the app and locate the MP3 file that you downloaded. Then, long-press on the file and select Move or Cut. Navigate to the Ringtones folder on your phone, which is usually under Internal storage > Ringtones. Then, tap on Paste or Move here to move the file to the Ringtones folder.</p>
16
- <h3>Open WhatsApp and Go to Settings</h3>
17
- <p>Now that you have moved the TikTok sound to the Ringtones folder, you can use it as your WhatsApp notification. To do this, open WhatsApp and tap on the three dots icon at the top right corner of the screen. Then, tap on Settings from the menu that appears. This will open the Settings menu of WhatsApp.</p>
18
- <p>Download nada dering wa tiktok viral<br />
19
- Cara download sound tiktok ke wa jadi nada dering lucu<br />
20
- Download notifikasi wa chagiya tiktok viral lucu dan imut<br />
21
- Download kumpulan nada dering wa pendek dari tiktok<br />
22
- Download nada dering wa bts dari lagu-lagu tiktok<br />
23
- Download nada dering wa suara google dari tiktok<br />
24
- Download nada dering wa doraemon baling-baling bambu dari tiktok<br />
25
- Download nada dering wa ayam dj lucu jawa dari tiktok<br />
26
- Download nada dering wa minion beatbox dari tiktok<br />
27
- Download nada dering wa lel funny dari tiktok<br />
28
- Download nada dering wa bahasa sunda dari tiktok<br />
29
- Download nada dering wa bahasa jawa dari tiktok<br />
30
- Download nada dering wa hihi hahah dari tiktok<br />
31
- Download nada dering wa intro dari tiktok<br />
32
- Download nada dering wa suara air jatuh dari tiktok<br />
33
- Download nada dering wa ketuk pintu dari tiktok<br />
34
- Download nada dering wa lucu super mario dari tiktok<br />
35
- Download nada dering wa lucu orang batuk dari tiktok<br />
36
- Download nada dering wa sahur suara google dari tiktok<br />
37
- Download nada dering wa nani ohayo yang viral di tiktok<br />
38
- Download nada dering wa dynamite bts yang viral di tiktok<br />
39
- Download nada dering wa morning call bts yang viral di tiktok<br />
40
- Download nada dering wa jungkook bts yang viral di tiktok<br />
41
- Download nada dering wa v bts yang viral di tiktok<br />
42
- Download nada dering wa jimin bts yang viral di tiktok<br />
43
- Download nada dering wa rm bts yang viral di tiktok<br />
44
- Download nada dering wa jin bts yang viral di tiktok<br />
45
- Download nada dering wa suga bts yang viral di tiktok<br />
46
- Download nada dering wa j-hope bts yang viral di tiktok<br />
47
- Download nada dering wa korea imut yang viral di tiktok<br />
48
- Download nada dering wa mobile legends yang viral di tiktok<br />
49
- Download nada dering wa harvest moon yang viral di tiktok<br />
50
- Download nada dering wa kata sayang yang viral di tiktok<br />
51
- Download nada dering wa 1 detik yang viral di tiktok<br />
52
- Cara membuat notifikasi wa pakai suara sendiri dari tiktok<br />
53
- Cara mengganti notifikasi wa dengan mp3 dari tiktok<br />
54
- Cara download notifikasi wa di jalantikus dari tiktok<br />
55
- Aplikasi download notifikasi wa terbaik dari tiktok<br />
56
- Kumpulan ringtone wa terbaik lainnya dari tiktok<br />
57
- Tips memilih notifikasi wa yang sesuai dengan kepribadian dari tiktok</p>
58
- <h3>Choose the Notification Sound that You Want to Change</h3>
59
- <p>In the Settings menu, tap on Notifications to access the notification settings of WhatsApp. Here, you can choose between message, call, or group notifications and customize them according to your preferences. For example, if you want to change the notification sound for messages, tap on Notification tone under Message notifications. This will open a list of available notification tones on your phone.</p>
60
- <h3>Select the TikTok Sound from the List</h3>
61
- <p>In the list of notification tones, scroll down until you find the TikTok sound that you downloaded and moved to the Ringtones folder. It should have the same name as the MP3 file that you saved. Tap on it to select it as your notification tone for messages. You can also preview the sound by tapping on the play icon next to it. Once you are satisfied with your choice, tap on OK to save it.</p>
62
- <h2>Conclusion</h2>
63
- <p>Congratulations! You have successfully downloaded and used a TikTok sound as your WhatsApp notification. You can repeat the same steps for any other TikTok sound that you like and use it for different types of notifications on WhatsApp. You can also share your TikTok sounds with your friends and family by sending them the MP3 files or the links of the videos. This way, you can have fun and express yourself with TikTok sounds on WhatsApp.</p>
64
- <h2>FAQs</h2>
65
- <h4>Q: Can I use TikTok sounds as my phone's ringtone?</h4>
66
- <p>A: Yes, you can use TikTok sounds as your phone's ringtone by following the same steps as above, but instead of choosing Notification tone, choose Phone ringtone in the Settings menu of WhatsApp.</p>
67
- <h4>Q: Can I use TikTok sounds as my alarm sound?</h4>
68
- <p>A: Yes, you can use TikTok sounds as your alarm sound by following the same steps as above, but instead of moving the MP3 file to the Ringtones folder, move it to the Alarms folder on your phone.</p>
69
- <h4>Q: How can I delete a TikTok sound from my phone?</h4>
70
- <p>A: If you want to delete a TikTok sound from your phone, you can use a file manager app to locate and delete the MP3 file from your phone's storage. You can also go to the Settings menu of WhatsApp and choose Reset notification settings to restore the default notification sounds.</p>
71
- <h4>Q: How can I edit a TikTok sound before using it as my WhatsApp notification?</h4>
72
- <p>A: If you want to edit a TikTok sound before using it as your WhatsApp notification, you can use an audio editor app on your phone, such as <a href="">MP3 Cutter and Ringtone Maker</a>, <a href="">Ringtone Maker</a>, or <a href="">Audio MP3 Cutter Mix Converter and Ringtone Maker</a>. These apps allow you to trim, cut, merge, mix, or add effects to your audio files.</p>
73
- <h4>Q: How can I find more TikTok sounds that I like?</h4>
74
- <p>A: If you want to find more TikTok sounds that you like, you can explore different categories, hashtags, or trends on TikTok, or search for specific keywords or users. You can also follow your favorite creators or celebrities on TikTok and see what sounds they use in their videos.</p> 401be4b1e0<br />
75
- <br />
76
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ADOPLE/Multi-Doc-Virtual-Chatbot/app.py DELETED
@@ -1,202 +0,0 @@
1
- from pydantic import NoneStr
2
- import os
3
- from langchain.chains.question_answering import load_qa_chain
4
- from langchain.document_loaders import UnstructuredFileLoader
5
- from langchain.embeddings.openai import OpenAIEmbeddings
6
- from langchain.llms import OpenAI
7
- from langchain.text_splitter import CharacterTextSplitter
8
- from langchain.vectorstores import FAISS
9
- from langchain.vectorstores import Chroma
10
- from langchain.chains import ConversationalRetrievalChain
11
- import gradio as gr
12
- import openai
13
- from langchain import PromptTemplate, OpenAI, LLMChain
14
- import validators
15
- import requests
16
- import mimetypes
17
- import tempfile
18
-
19
- class Chatbot:
20
- def __init__(self):
21
- openai.api_key = os.getenv("OPENAI_API_KEY")
22
- def get_empty_state(self):
23
-
24
- """ Create empty Knowledge base"""
25
-
26
- return {"knowledge_base": None}
27
-
28
- def create_knowledge_base(self,docs):
29
-
30
- """Create a knowledge base from the given documents.
31
- Args:
32
- docs (List[str]): List of documents.
33
- Returns:
34
- FAISS: Knowledge base built from the documents.
35
- """
36
-
37
- # Initialize a CharacterTextSplitter to split the documents into chunks
38
- # Each chunk has a maximum length of 500 characters
39
- # There is no overlap between the chunks
40
- text_splitter = CharacterTextSplitter(
41
- separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len
42
- )
43
-
44
- # Split the documents into chunks using the text_splitter
45
- chunks = text_splitter.split_documents(docs)
46
-
47
- # Initialize an OpenAIEmbeddings model to compute embeddings of the chunks
48
- embeddings = OpenAIEmbeddings()
49
-
50
- # Build a knowledge base using Chroma from the chunks and their embeddings
51
- knowledge_base = Chroma.from_documents(chunks, embeddings)
52
-
53
- # Return the resulting knowledge base
54
- return knowledge_base
55
-
56
-
57
- def upload_file(self,file_paths):
58
- """Upload a file and create a knowledge base from its contents.
59
- Args:
60
- file_paths : The files to uploaded.
61
- Returns:
62
- tuple: A tuple containing the file name and the knowledge base.
63
- """
64
-
65
- file_paths = [i.name for i in file_paths]
66
- print(file_paths)
67
-
68
-
69
- loaders = [UnstructuredFileLoader(file_obj, strategy="fast") for file_obj in file_paths]
70
-
71
- # Load the contents of the file using the loader
72
- docs = []
73
- for loader in loaders:
74
- docs.extend(loader.load())
75
-
76
- # Create a knowledge base from the loaded documents using the create_knowledge_base() method
77
- knowledge_base = self.create_knowledge_base(docs)
78
-
79
-
80
- # Return a tuple containing the file name and the knowledge base
81
- return file_paths, {"knowledge_base": knowledge_base}
82
-
83
- def add_text(self,history, text):
84
- history = history + [(text, None)]
85
- print("History for Add text : ",history)
86
- return history, gr.update(value="", interactive=False)
87
-
88
-
89
-
90
- def upload_multiple_urls(self,urls):
91
- urlss = [url.strip() for url in urls.split(',')]
92
- all_docs = []
93
- file_paths = []
94
- for url in urlss:
95
- if validators.url(url):
96
- headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',}
97
- r = requests.get(url,headers=headers)
98
- if r.status_code != 200:
99
- raise ValueError("Check the url of your file; returned status code %s" % r.status_code)
100
- content_type = r.headers.get("content-type")
101
- file_extension = mimetypes.guess_extension(content_type)
102
- temp_file = tempfile.NamedTemporaryFile(suffix=file_extension, delete=False)
103
- temp_file.write(r.content)
104
- file_path = temp_file.name
105
- file_paths.append(file_path)
106
-
107
- loaders = [UnstructuredFileLoader(file_obj, strategy="fast") for file_obj in file_paths]
108
-
109
- # Load the contents of the file using the loader
110
- docs = []
111
- for loader in loaders:
112
- docs.extend(loader.load())
113
-
114
- # Create a knowledge base from the loaded documents using the create_knowledge_base() method
115
- knowledge_base = self.create_knowledge_base(docs)
116
-
117
- return file_paths,{"knowledge_base":knowledge_base}
118
-
119
- def answer_question(self, question,history,state):
120
- """Answer a question based on the current knowledge base.
121
- Args:
122
- state (dict): The current state containing the knowledge base.
123
- Returns:
124
- str: The answer to the question.
125
- """
126
-
127
- # Retrieve the knowledge base from the state dictionary
128
- knowledge_base = state["knowledge_base"]
129
- retriever = knowledge_base.as_retriever()
130
- qa = ConversationalRetrievalChain.from_llm(
131
- llm=OpenAI(temperature=0.1),
132
- retriever=retriever,
133
- return_source_documents=False)
134
- # Set the question for which we want to find the answer
135
- res = []
136
- question = history[-1][0]
137
- for human, ai in history[:-1]:
138
- pair = (human, ai)
139
- res.append(pair)
140
-
141
- chat_history = []
142
-
143
- query = question
144
- result = qa({"question": query, "chat_history": chat_history})
145
- # Perform a similarity search on the knowledge base to retrieve relevant documents
146
- response = result["answer"]
147
- # Return the response as the answer to the question
148
- history[-1][1] = response
149
- print("History for QA : ",history)
150
- return history
151
-
152
-
153
- def clear_function(self,state):
154
- state.clear()
155
- # state = gr.State(self.get_empty_state())
156
-
157
- def gradio_interface(self):
158
-
159
- """Create the Gradio interface for the Chemical Identifier."""
160
-
161
- with gr.Blocks(css="style.css",theme='karthikeyan-adople/hudsonhayes-gray') as demo:
162
- gr.HTML("""<center class="darkblue" style='background-color:rgb(0,1,36); text-align:center;padding:25px;'>
163
- <center>
164
- <h1 class ="center" style="color:#fff">
165
- ADOPLE AI
166
- </h1>
167
- </center>
168
- <be>
169
- <h1 style="color:#fff">
170
- Virtual Assistant Chatbot
171
- </h1>
172
- </center>""")
173
- state = gr.State(self.get_empty_state())
174
- with gr.Column(elem_id="col-container"):
175
- with gr.Accordion("Upload Files", open = False):
176
- with gr.Row(elem_id="row-flex"):
177
- with gr.Row(elem_id="row-flex"):
178
- with gr.Column(scale=1,):
179
- file_url = gr.Textbox(label='file url :',show_label=True, placeholder="")
180
- with gr.Row(elem_id="row-flex"):
181
- with gr.Column(scale=1):
182
- file_output = gr.File()
183
- with gr.Column(scale=1):
184
- upload_button = gr.UploadButton("Browse File", file_types=[".txt", ".pdf", ".doc", ".docx"],file_count = "multiple")
185
- with gr.Row():
186
- chatbot = gr.Chatbot([], elem_id="chatbot")
187
- with gr.Row():
188
- txt = gr.Textbox(label = "Question",show_label=True,placeholder="Enter text and press Enter")
189
- with gr.Row():
190
- clear_btn = gr.Button(value="Clear")
191
-
192
- txt_msg = txt.submit(self.add_text, [chatbot, txt], [chatbot, txt], queue=False).then(self.answer_question, [txt, chatbot, state], chatbot)
193
- txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
194
- file_url.submit(self.upload_multiple_urls, file_url, [file_output, state])
195
- clear_btn.click(self.clear_function,[state],[])
196
- clear_btn.click(lambda: None, None, chatbot, queue=False)
197
- upload_button.upload(self.upload_file, upload_button, [file_output,state])
198
- demo.queue().launch(debug=True)
199
-
200
- if __name__=="__main__":
201
- chatbot = Chatbot()
202
- chatbot.gradio_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/grids/compression/encodec_base_24khz.py DELETED
@@ -1,28 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Grid search file, simply list all the exp you want in `explorer`.
9
- Any new exp added there will be scheduled.
10
- You can cancel and experiment by commenting its line.
11
-
12
- This grid shows how to train a base causal EnCodec model at 24 kHz.
13
- """
14
-
15
- from ._explorers import CompressionExplorer
16
- from ...environment import AudioCraftEnvironment
17
-
18
-
19
- @CompressionExplorer
20
- def explorer(launcher):
21
- partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
22
- launcher.slurm_(gpus=8, partition=partitions)
23
- # base causal EnCodec trained on monophonic audio sampled at 24 kHz
24
- launcher.bind_(solver='compression/encodec_base_24khz')
25
- # replace this by the desired dataset
26
- launcher.bind_(dset='audio/example')
27
- # launch xp
28
- launcher()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab_ltp.py DELETED
@@ -1,150 +0,0 @@
1
- import json
2
- from tqdm import tqdm
3
- import logging
4
- import pickle
5
- from collections import Counter
6
- import re
7
- import fire
8
-
9
- class Vocabulary(object):
10
- """Simple vocabulary wrapper."""
11
- def __init__(self):
12
- self.word2idx = {}
13
- self.idx2word = {}
14
- self.idx = 0
15
-
16
- def add_word(self, word):
17
- if not word in self.word2idx:
18
- self.word2idx[word] = self.idx
19
- self.idx2word[self.idx] = word
20
- self.idx += 1
21
-
22
- def __call__(self, word):
23
- if not word in self.word2idx:
24
- return self.word2idx["<unk>"]
25
- return self.word2idx[word]
26
-
27
- def __len__(self):
28
- return len(self.word2idx)
29
-
30
- def build_vocab(input_json: str,
31
- output_json: str,
32
- threshold: int,
33
- keep_punctuation: bool,
34
- character_level: bool = False,
35
- zh: bool = True ):
36
- """Build vocabulary from csv file with a given threshold to drop all counts < threshold
37
-
38
- Args:
39
- input_json(string): Preprossessed json file. Structure like this:
40
- {
41
- 'audios': [
42
- {
43
- 'audio_id': 'xxx',
44
- 'captions': [
45
- {
46
- 'caption': 'xxx',
47
- 'cap_id': 'xxx'
48
- }
49
- ]
50
- },
51
- ...
52
- ]
53
- }
54
- threshold (int): Threshold to drop all words with counts < threshold
55
- keep_punctuation (bool): Includes or excludes punctuation.
56
-
57
- Returns:
58
- vocab (Vocab): Object with the processed vocabulary
59
- """
60
- data = json.load(open(input_json, "r"))["audios"]
61
- counter = Counter()
62
- pretokenized = "tokens" in data[0]["captions"][0]
63
-
64
- if zh:
65
- from ltp import LTP
66
- from zhon.hanzi import punctuation
67
- if not pretokenized:
68
- parser = LTP("base")
69
- for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
70
- for cap_idx in range(len(data[audio_idx]["captions"])):
71
- if pretokenized:
72
- tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
73
- else:
74
- caption = data[audio_idx]["captions"][cap_idx]["caption"]
75
- if character_level:
76
- tokens = list(caption)
77
- else:
78
- tokens, _ = parser.seg([caption])
79
- tokens = tokens[0]
80
- # Remove all punctuations
81
- if not keep_punctuation:
82
- tokens = [token for token in tokens if token not in punctuation]
83
- data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
84
- counter.update(tokens)
85
- else:
86
- if pretokenized:
87
- for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
88
- for cap_idx in range(len(data[audio_idx]["captions"])):
89
- tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
90
- counter.update(tokens)
91
- else:
92
- from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
93
- captions = {}
94
- for audio_idx in range(len(data)):
95
- audio_id = data[audio_idx]["audio_id"]
96
- captions[audio_id] = []
97
- for cap_idx in range(len(data[audio_idx]["captions"])):
98
- caption = data[audio_idx]["captions"][cap_idx]["caption"]
99
- captions[audio_id].append({
100
- "audio_id": audio_id,
101
- "id": cap_idx,
102
- "caption": caption
103
- })
104
- tokenizer = PTBTokenizer()
105
- captions = tokenizer.tokenize(captions)
106
- for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
107
- audio_id = data[audio_idx]["audio_id"]
108
- for cap_idx in range(len(data[audio_idx]["captions"])):
109
- tokens = captions[audio_id][cap_idx]
110
- data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
111
- counter.update(tokens.split(" "))
112
-
113
- if not pretokenized:
114
- if output_json is None:
115
- output_json = input_json
116
- json.dump({ "audios": data }, open(output_json, "w"), indent=4, ensure_ascii=not zh)
117
- words = [word for word, cnt in counter.items() if cnt >= threshold]
118
-
119
- # Create a vocab wrapper and add some special tokens.
120
- vocab = Vocabulary()
121
- vocab.add_word("<pad>")
122
- vocab.add_word("<start>")
123
- vocab.add_word("<end>")
124
- vocab.add_word("<unk>")
125
-
126
- # Add the words to the vocabulary.
127
- for word in words:
128
- vocab.add_word(word)
129
- return vocab
130
-
131
- def process(input_json: str,
132
- output_file: str,
133
- output_json: str = None,
134
- threshold: int = 1,
135
- keep_punctuation: bool = False,
136
- character_level: bool = False,
137
- zh: bool = True):
138
- logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
139
- logging.basicConfig(level=logging.INFO, format=logfmt)
140
- logging.info("Build Vocab")
141
- vocabulary = build_vocab(
142
- input_json=input_json, output_json=output_json, threshold=threshold,
143
- keep_punctuation=keep_punctuation, character_level=character_level, zh=zh)
144
- pickle.dump(vocabulary, open(output_file, "wb"))
145
- logging.info("Total vocabulary size: {}".format(len(vocabulary)))
146
- logging.info("Saved vocab to '{}'".format(output_file))
147
-
148
-
149
- if __name__ == '__main__':
150
- fire.Fire(process)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py DELETED
@@ -1,129 +0,0 @@
1
- """ OpenAI pretrained model functions
2
-
3
- Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
- """
5
-
6
- import os
7
- import warnings
8
- from typing import Union, List
9
-
10
- import torch
11
-
12
- from .model import build_model_from_openai_state_dict
13
- from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
14
-
15
- __all__ = ["list_openai_models", "load_openai_model"]
16
-
17
-
18
- def list_openai_models() -> List[str]:
19
- """Returns the names of available CLIP models"""
20
- return list_pretrained_tag_models('openai')
21
-
22
-
23
- def load_openai_model(
24
- name: str,
25
- model_cfg,
26
- device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
27
- jit=True,
28
- cache_dir=os.path.expanduser("~/.cache/clip"),
29
- enable_fusion: bool = False,
30
- fusion_type: str = 'None'
31
- ):
32
- """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
33
-
34
- Parameters
35
- ----------
36
- name : str
37
- A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
38
- device : Union[str, torch.device]
39
- The device to put the loaded model
40
- jit : bool
41
- Whether to load the optimized JIT model (default) or more hackable non-JIT model.
42
-
43
- Returns
44
- -------
45
- model : torch.nn.Module
46
- The CLAP model
47
- preprocess : Callable[[PIL.Image], torch.Tensor]
48
- A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
49
- """
50
- if get_pretrained_url(name, 'openai'):
51
- model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir)
52
- elif os.path.isfile(name):
53
- model_path = name
54
- else:
55
- raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
56
-
57
- try:
58
- # loading JIT archive
59
- model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
60
- state_dict = None
61
- except RuntimeError:
62
- # loading saved state dict
63
- if jit:
64
- warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
65
- jit = False
66
- state_dict = torch.load(model_path, map_location="cpu")
67
-
68
- if not jit:
69
- try:
70
- model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device)
71
- except KeyError:
72
- sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
73
- model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device)
74
-
75
- if str(device) == "cpu":
76
- model.float()
77
- return model
78
-
79
- # patch the device names
80
- device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
81
- device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
82
-
83
- def patch_device(module):
84
- try:
85
- graphs = [module.graph] if hasattr(module, "graph") else []
86
- except RuntimeError:
87
- graphs = []
88
-
89
- if hasattr(module, "forward1"):
90
- graphs.append(module.forward1.graph)
91
-
92
- for graph in graphs:
93
- for node in graph.findAllNodes("prim::Constant"):
94
- if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
95
- node.copyAttributes(device_node)
96
-
97
- model.apply(patch_device)
98
- patch_device(model.encode_audio)
99
- patch_device(model.encode_text)
100
-
101
- # patch dtype to float32 on CPU
102
- if str(device) == "cpu":
103
- float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
104
- float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
105
- float_node = float_input.node()
106
-
107
- def patch_float(module):
108
- try:
109
- graphs = [module.graph] if hasattr(module, "graph") else []
110
- except RuntimeError:
111
- graphs = []
112
-
113
- if hasattr(module, "forward1"):
114
- graphs.append(module.forward1.graph)
115
-
116
- for graph in graphs:
117
- for node in graph.findAllNodes("aten::to"):
118
- inputs = list(node.inputs())
119
- for i in [1, 2]: # dtype can be the second or third argument to aten::to()
120
- if inputs[i].node()["value"] == 5:
121
- inputs[i].node().copyAttributes(float_node)
122
-
123
- model.apply(patch_float)
124
- patch_float(model.encode_audio)
125
- patch_float(model.encode_text)
126
- model.float()
127
-
128
- model.audio_branch.audio_length = model.audio_cfg.audio_length
129
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/ldm/modules/image_degradation/bsrgan.py DELETED
@@ -1,730 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- # --------------------------------------------
4
- # Super-Resolution
5
- # --------------------------------------------
6
- #
7
- # Kai Zhang ([email protected])
8
- # https://github.com/cszn
9
- # From 2019/03--2021/08
10
- # --------------------------------------------
11
- """
12
-
13
- import numpy as np
14
- import cv2
15
- import torch
16
-
17
- from functools import partial
18
- import random
19
- from scipy import ndimage
20
- import scipy
21
- import scipy.stats as ss
22
- from scipy.interpolate import interp2d
23
- from scipy.linalg import orth
24
- import albumentations
25
-
26
- import ldm.modules.image_degradation.utils_image as util
27
-
28
-
29
- def modcrop_np(img, sf):
30
- '''
31
- Args:
32
- img: numpy image, WxH or WxHxC
33
- sf: scale factor
34
- Return:
35
- cropped image
36
- '''
37
- w, h = img.shape[:2]
38
- im = np.copy(img)
39
- return im[:w - w % sf, :h - h % sf, ...]
40
-
41
-
42
- """
43
- # --------------------------------------------
44
- # anisotropic Gaussian kernels
45
- # --------------------------------------------
46
- """
47
-
48
-
49
- def analytic_kernel(k):
50
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
51
- k_size = k.shape[0]
52
- # Calculate the big kernels size
53
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
54
- # Loop over the small kernel to fill the big one
55
- for r in range(k_size):
56
- for c in range(k_size):
57
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
58
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
59
- crop = k_size // 2
60
- cropped_big_k = big_k[crop:-crop, crop:-crop]
61
- # Normalize to 1
62
- return cropped_big_k / cropped_big_k.sum()
63
-
64
-
65
- def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
66
- """ generate an anisotropic Gaussian kernel
67
- Args:
68
- ksize : e.g., 15, kernel size
69
- theta : [0, pi], rotation angle range
70
- l1 : [0.1,50], scaling of eigenvalues
71
- l2 : [0.1,l1], scaling of eigenvalues
72
- If l1 = l2, will get an isotropic Gaussian kernel.
73
- Returns:
74
- k : kernel
75
- """
76
-
77
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
78
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
79
- D = np.array([[l1, 0], [0, l2]])
80
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
81
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
82
-
83
- return k
84
-
85
-
86
- def gm_blur_kernel(mean, cov, size=15):
87
- center = size / 2.0 + 0.5
88
- k = np.zeros([size, size])
89
- for y in range(size):
90
- for x in range(size):
91
- cy = y - center + 1
92
- cx = x - center + 1
93
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
94
-
95
- k = k / np.sum(k)
96
- return k
97
-
98
-
99
- def shift_pixel(x, sf, upper_left=True):
100
- """shift pixel for super-resolution with different scale factors
101
- Args:
102
- x: WxHxC or WxH
103
- sf: scale factor
104
- upper_left: shift direction
105
- """
106
- h, w = x.shape[:2]
107
- shift = (sf - 1) * 0.5
108
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
109
- if upper_left:
110
- x1 = xv + shift
111
- y1 = yv + shift
112
- else:
113
- x1 = xv - shift
114
- y1 = yv - shift
115
-
116
- x1 = np.clip(x1, 0, w - 1)
117
- y1 = np.clip(y1, 0, h - 1)
118
-
119
- if x.ndim == 2:
120
- x = interp2d(xv, yv, x)(x1, y1)
121
- if x.ndim == 3:
122
- for i in range(x.shape[-1]):
123
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
124
-
125
- return x
126
-
127
-
128
- def blur(x, k):
129
- '''
130
- x: image, NxcxHxW
131
- k: kernel, Nx1xhxw
132
- '''
133
- n, c = x.shape[:2]
134
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
135
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
136
- k = k.repeat(1, c, 1, 1)
137
- k = k.view(-1, 1, k.shape[2], k.shape[3])
138
- x = x.view(1, -1, x.shape[2], x.shape[3])
139
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
140
- x = x.view(n, c, x.shape[2], x.shape[3])
141
-
142
- return x
143
-
144
-
145
- def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
146
- """"
147
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
148
- # Kai Zhang
149
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
150
- # max_var = 2.5 * sf
151
- """
152
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
153
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
154
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
155
- theta = np.random.rand() * np.pi # random theta
156
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
157
-
158
- # Set COV matrix using Lambdas and Theta
159
- LAMBDA = np.diag([lambda_1, lambda_2])
160
- Q = np.array([[np.cos(theta), -np.sin(theta)],
161
- [np.sin(theta), np.cos(theta)]])
162
- SIGMA = Q @ LAMBDA @ Q.T
163
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
164
-
165
- # Set expectation position (shifting kernel for aligned image)
166
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
167
- MU = MU[None, None, :, None]
168
-
169
- # Create meshgrid for Gaussian
170
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
171
- Z = np.stack([X, Y], 2)[:, :, :, None]
172
-
173
- # Calcualte Gaussian for every pixel of the kernel
174
- ZZ = Z - MU
175
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
176
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
177
-
178
- # shift the kernel so it will be centered
179
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
180
-
181
- # Normalize the kernel and return
182
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
183
- kernel = raw_kernel / np.sum(raw_kernel)
184
- return kernel
185
-
186
-
187
- def fspecial_gaussian(hsize, sigma):
188
- hsize = [hsize, hsize]
189
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
190
- std = sigma
191
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
192
- arg = -(x * x + y * y) / (2 * std * std)
193
- h = np.exp(arg)
194
- h[h < scipy.finfo(float).eps * h.max()] = 0
195
- sumh = h.sum()
196
- if sumh != 0:
197
- h = h / sumh
198
- return h
199
-
200
-
201
- def fspecial_laplacian(alpha):
202
- alpha = max([0, min([alpha, 1])])
203
- h1 = alpha / (alpha + 1)
204
- h2 = (1 - alpha) / (alpha + 1)
205
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
206
- h = np.array(h)
207
- return h
208
-
209
-
210
- def fspecial(filter_type, *args, **kwargs):
211
- '''
212
- python code from:
213
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
214
- '''
215
- if filter_type == 'gaussian':
216
- return fspecial_gaussian(*args, **kwargs)
217
- if filter_type == 'laplacian':
218
- return fspecial_laplacian(*args, **kwargs)
219
-
220
-
221
- """
222
- # --------------------------------------------
223
- # degradation models
224
- # --------------------------------------------
225
- """
226
-
227
-
228
- def bicubic_degradation(x, sf=3):
229
- '''
230
- Args:
231
- x: HxWxC image, [0, 1]
232
- sf: down-scale factor
233
- Return:
234
- bicubicly downsampled LR image
235
- '''
236
- x = util.imresize_np(x, scale=1 / sf)
237
- return x
238
-
239
-
240
- def srmd_degradation(x, k, sf=3):
241
- ''' blur + bicubic downsampling
242
- Args:
243
- x: HxWxC image, [0, 1]
244
- k: hxw, double
245
- sf: down-scale factor
246
- Return:
247
- downsampled LR image
248
- Reference:
249
- @inproceedings{zhang2018learning,
250
- title={Learning a single convolutional super-resolution network for multiple degradations},
251
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
252
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
253
- pages={3262--3271},
254
- year={2018}
255
- }
256
- '''
257
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
258
- x = bicubic_degradation(x, sf=sf)
259
- return x
260
-
261
-
262
- def dpsr_degradation(x, k, sf=3):
263
- ''' bicubic downsampling + blur
264
- Args:
265
- x: HxWxC image, [0, 1]
266
- k: hxw, double
267
- sf: down-scale factor
268
- Return:
269
- downsampled LR image
270
- Reference:
271
- @inproceedings{zhang2019deep,
272
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
273
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
274
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
275
- pages={1671--1681},
276
- year={2019}
277
- }
278
- '''
279
- x = bicubic_degradation(x, sf=sf)
280
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
281
- return x
282
-
283
-
284
- def classical_degradation(x, k, sf=3):
285
- ''' blur + downsampling
286
- Args:
287
- x: HxWxC image, [0, 1]/[0, 255]
288
- k: hxw, double
289
- sf: down-scale factor
290
- Return:
291
- downsampled LR image
292
- '''
293
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
294
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
295
- st = 0
296
- return x[st::sf, st::sf, ...]
297
-
298
-
299
- def add_sharpening(img, weight=0.5, radius=50, threshold=10):
300
- """USM sharpening. borrowed from real-ESRGAN
301
- Input image: I; Blurry image: B.
302
- 1. K = I + weight * (I - B)
303
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
304
- 3. Blur mask:
305
- 4. Out = Mask * K + (1 - Mask) * I
306
- Args:
307
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
308
- weight (float): Sharp weight. Default: 1.
309
- radius (float): Kernel size of Gaussian blur. Default: 50.
310
- threshold (int):
311
- """
312
- if radius % 2 == 0:
313
- radius += 1
314
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
315
- residual = img - blur
316
- mask = np.abs(residual) * 255 > threshold
317
- mask = mask.astype('float32')
318
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
319
-
320
- K = img + weight * residual
321
- K = np.clip(K, 0, 1)
322
- return soft_mask * K + (1 - soft_mask) * img
323
-
324
-
325
- def add_blur(img, sf=4):
326
- wd2 = 4.0 + sf
327
- wd = 2.0 + 0.2 * sf
328
- if random.random() < 0.5:
329
- l1 = wd2 * random.random()
330
- l2 = wd2 * random.random()
331
- k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
332
- else:
333
- k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
334
- img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
335
-
336
- return img
337
-
338
-
339
- def add_resize(img, sf=4):
340
- rnum = np.random.rand()
341
- if rnum > 0.8: # up
342
- sf1 = random.uniform(1, 2)
343
- elif rnum < 0.7: # down
344
- sf1 = random.uniform(0.5 / sf, 1)
345
- else:
346
- sf1 = 1.0
347
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
348
- img = np.clip(img, 0.0, 1.0)
349
-
350
- return img
351
-
352
-
353
- # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
354
- # noise_level = random.randint(noise_level1, noise_level2)
355
- # rnum = np.random.rand()
356
- # if rnum > 0.6: # add color Gaussian noise
357
- # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
358
- # elif rnum < 0.4: # add grayscale Gaussian noise
359
- # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
360
- # else: # add noise
361
- # L = noise_level2 / 255.
362
- # D = np.diag(np.random.rand(3))
363
- # U = orth(np.random.rand(3, 3))
364
- # conv = np.dot(np.dot(np.transpose(U), D), U)
365
- # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
366
- # img = np.clip(img, 0.0, 1.0)
367
- # return img
368
-
369
- def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
370
- noise_level = random.randint(noise_level1, noise_level2)
371
- rnum = np.random.rand()
372
- if rnum > 0.6: # add color Gaussian noise
373
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
374
- elif rnum < 0.4: # add grayscale Gaussian noise
375
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
376
- else: # add noise
377
- L = noise_level2 / 255.
378
- D = np.diag(np.random.rand(3))
379
- U = orth(np.random.rand(3, 3))
380
- conv = np.dot(np.dot(np.transpose(U), D), U)
381
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
382
- img = np.clip(img, 0.0, 1.0)
383
- return img
384
-
385
-
386
- def add_speckle_noise(img, noise_level1=2, noise_level2=25):
387
- noise_level = random.randint(noise_level1, noise_level2)
388
- img = np.clip(img, 0.0, 1.0)
389
- rnum = random.random()
390
- if rnum > 0.6:
391
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
392
- elif rnum < 0.4:
393
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
394
- else:
395
- L = noise_level2 / 255.
396
- D = np.diag(np.random.rand(3))
397
- U = orth(np.random.rand(3, 3))
398
- conv = np.dot(np.dot(np.transpose(U), D), U)
399
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
400
- img = np.clip(img, 0.0, 1.0)
401
- return img
402
-
403
-
404
- def add_Poisson_noise(img):
405
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
406
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
407
- if random.random() < 0.5:
408
- img = np.random.poisson(img * vals).astype(np.float32) / vals
409
- else:
410
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
411
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
412
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
413
- img += noise_gray[:, :, np.newaxis]
414
- img = np.clip(img, 0.0, 1.0)
415
- return img
416
-
417
-
418
- def add_JPEG_noise(img):
419
- quality_factor = random.randint(30, 95)
420
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
421
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
422
- img = cv2.imdecode(encimg, 1)
423
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
424
- return img
425
-
426
-
427
- def random_crop(lq, hq, sf=4, lq_patchsize=64):
428
- h, w = lq.shape[:2]
429
- rnd_h = random.randint(0, h - lq_patchsize)
430
- rnd_w = random.randint(0, w - lq_patchsize)
431
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
432
-
433
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
434
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
435
- return lq, hq
436
-
437
-
438
- def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
439
- """
440
- This is the degradation model of BSRGAN from the paper
441
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
442
- ----------
443
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
444
- sf: scale factor
445
- isp_model: camera ISP model
446
- Returns
447
- -------
448
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
449
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
450
- """
451
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
452
- sf_ori = sf
453
-
454
- h1, w1 = img.shape[:2]
455
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
456
- h, w = img.shape[:2]
457
-
458
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
459
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
460
-
461
- hq = img.copy()
462
-
463
- if sf == 4 and random.random() < scale2_prob: # downsample1
464
- if np.random.rand() < 0.5:
465
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
466
- interpolation=random.choice([1, 2, 3]))
467
- else:
468
- img = util.imresize_np(img, 1 / 2, True)
469
- img = np.clip(img, 0.0, 1.0)
470
- sf = 2
471
-
472
- shuffle_order = random.sample(range(7), 7)
473
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
474
- if idx1 > idx2: # keep downsample3 last
475
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
476
-
477
- for i in shuffle_order:
478
-
479
- if i == 0:
480
- img = add_blur(img, sf=sf)
481
-
482
- elif i == 1:
483
- img = add_blur(img, sf=sf)
484
-
485
- elif i == 2:
486
- a, b = img.shape[1], img.shape[0]
487
- # downsample2
488
- if random.random() < 0.75:
489
- sf1 = random.uniform(1, 2 * sf)
490
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
491
- interpolation=random.choice([1, 2, 3]))
492
- else:
493
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
494
- k_shifted = shift_pixel(k, sf)
495
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
496
- img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
497
- img = img[0::sf, 0::sf, ...] # nearest downsampling
498
- img = np.clip(img, 0.0, 1.0)
499
-
500
- elif i == 3:
501
- # downsample3
502
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
503
- img = np.clip(img, 0.0, 1.0)
504
-
505
- elif i == 4:
506
- # add Gaussian noise
507
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
508
-
509
- elif i == 5:
510
- # add JPEG noise
511
- if random.random() < jpeg_prob:
512
- img = add_JPEG_noise(img)
513
-
514
- elif i == 6:
515
- # add processed camera sensor noise
516
- if random.random() < isp_prob and isp_model is not None:
517
- with torch.no_grad():
518
- img, hq = isp_model.forward(img.copy(), hq)
519
-
520
- # add final JPEG compression noise
521
- img = add_JPEG_noise(img)
522
-
523
- # random crop
524
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
525
-
526
- return img, hq
527
-
528
-
529
- # todo no isp_model?
530
- def degradation_bsrgan_variant(image, sf=4, isp_model=None):
531
- """
532
- This is the degradation model of BSRGAN from the paper
533
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
534
- ----------
535
- sf: scale factor
536
- isp_model: camera ISP model
537
- Returns
538
- -------
539
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
540
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
541
- """
542
- image = util.uint2single(image)
543
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
544
- sf_ori = sf
545
-
546
- h1, w1 = image.shape[:2]
547
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
548
- h, w = image.shape[:2]
549
-
550
- hq = image.copy()
551
-
552
- if sf == 4 and random.random() < scale2_prob: # downsample1
553
- if np.random.rand() < 0.5:
554
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
555
- interpolation=random.choice([1, 2, 3]))
556
- else:
557
- image = util.imresize_np(image, 1 / 2, True)
558
- image = np.clip(image, 0.0, 1.0)
559
- sf = 2
560
-
561
- shuffle_order = random.sample(range(7), 7)
562
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
563
- if idx1 > idx2: # keep downsample3 last
564
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
565
-
566
- for i in shuffle_order:
567
-
568
- if i == 0:
569
- image = add_blur(image, sf=sf)
570
-
571
- elif i == 1:
572
- image = add_blur(image, sf=sf)
573
-
574
- elif i == 2:
575
- a, b = image.shape[1], image.shape[0]
576
- # downsample2
577
- if random.random() < 0.75:
578
- sf1 = random.uniform(1, 2 * sf)
579
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
580
- interpolation=random.choice([1, 2, 3]))
581
- else:
582
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
583
- k_shifted = shift_pixel(k, sf)
584
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
585
- image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
586
- image = image[0::sf, 0::sf, ...] # nearest downsampling
587
- image = np.clip(image, 0.0, 1.0)
588
-
589
- elif i == 3:
590
- # downsample3
591
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
592
- image = np.clip(image, 0.0, 1.0)
593
-
594
- elif i == 4:
595
- # add Gaussian noise
596
- image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
597
-
598
- elif i == 5:
599
- # add JPEG noise
600
- if random.random() < jpeg_prob:
601
- image = add_JPEG_noise(image)
602
-
603
- # elif i == 6:
604
- # # add processed camera sensor noise
605
- # if random.random() < isp_prob and isp_model is not None:
606
- # with torch.no_grad():
607
- # img, hq = isp_model.forward(img.copy(), hq)
608
-
609
- # add final JPEG compression noise
610
- image = add_JPEG_noise(image)
611
- image = util.single2uint(image)
612
- example = {"image":image}
613
- return example
614
-
615
-
616
- # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
617
- def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
618
- """
619
- This is an extended degradation model by combining
620
- the degradation models of BSRGAN and Real-ESRGAN
621
- ----------
622
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
623
- sf: scale factor
624
- use_shuffle: the degradation shuffle
625
- use_sharp: sharpening the img
626
- Returns
627
- -------
628
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
629
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
630
- """
631
-
632
- h1, w1 = img.shape[:2]
633
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
634
- h, w = img.shape[:2]
635
-
636
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
637
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
638
-
639
- if use_sharp:
640
- img = add_sharpening(img)
641
- hq = img.copy()
642
-
643
- if random.random() < shuffle_prob:
644
- shuffle_order = random.sample(range(13), 13)
645
- else:
646
- shuffle_order = list(range(13))
647
- # local shuffle for noise, JPEG is always the last one
648
- shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
649
- shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
650
-
651
- poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
652
-
653
- for i in shuffle_order:
654
- if i == 0:
655
- img = add_blur(img, sf=sf)
656
- elif i == 1:
657
- img = add_resize(img, sf=sf)
658
- elif i == 2:
659
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
660
- elif i == 3:
661
- if random.random() < poisson_prob:
662
- img = add_Poisson_noise(img)
663
- elif i == 4:
664
- if random.random() < speckle_prob:
665
- img = add_speckle_noise(img)
666
- elif i == 5:
667
- if random.random() < isp_prob and isp_model is not None:
668
- with torch.no_grad():
669
- img, hq = isp_model.forward(img.copy(), hq)
670
- elif i == 6:
671
- img = add_JPEG_noise(img)
672
- elif i == 7:
673
- img = add_blur(img, sf=sf)
674
- elif i == 8:
675
- img = add_resize(img, sf=sf)
676
- elif i == 9:
677
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
678
- elif i == 10:
679
- if random.random() < poisson_prob:
680
- img = add_Poisson_noise(img)
681
- elif i == 11:
682
- if random.random() < speckle_prob:
683
- img = add_speckle_noise(img)
684
- elif i == 12:
685
- if random.random() < isp_prob and isp_model is not None:
686
- with torch.no_grad():
687
- img, hq = isp_model.forward(img.copy(), hq)
688
- else:
689
- print('check the shuffle!')
690
-
691
- # resize to desired size
692
- img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
693
- interpolation=random.choice([1, 2, 3]))
694
-
695
- # add final JPEG compression noise
696
- img = add_JPEG_noise(img)
697
-
698
- # random crop
699
- img, hq = random_crop(img, hq, sf, lq_patchsize)
700
-
701
- return img, hq
702
-
703
-
704
- if __name__ == '__main__':
705
- print("hey")
706
- img = util.imread_uint('utils/test.png', 3)
707
- print(img)
708
- img = util.uint2single(img)
709
- print(img)
710
- img = img[:448, :448]
711
- h = img.shape[0] // 4
712
- print("resizing to", h)
713
- sf = 4
714
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
715
- for i in range(20):
716
- print(i)
717
- img_lq = deg_fn(img)
718
- print(img_lq)
719
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
720
- print(img_lq.shape)
721
- print("bicubic", img_lq_bicubic.shape)
722
- print(img_hq.shape)
723
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
724
- interpolation=0)
725
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
726
- interpolation=0)
727
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
728
- util.imsave(img_concat, str(i) + '.png')
729
-
730
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIlexDev/Einfach.Hintergrund/app.py DELETED
@@ -1,154 +0,0 @@
1
- import cv2
2
- import gradio as gr
3
- import os
4
- from PIL import Image
5
- import numpy as np
6
- import torch
7
- from torch.autograd import Variable
8
- from torchvision import transforms
9
- import torch.nn.functional as F
10
- import gdown
11
- import matplotlib.pyplot as plt
12
- import warnings
13
- warnings.filterwarnings("ignore")
14
-
15
- os.system("git clone https://github.com/xuebinqin/DIS")
16
- os.system("mv DIS/IS-Net/* .")
17
-
18
- # project imports
19
- from data_loader_cache import normalize, im_reader, im_preprocess
20
- from models import *
21
-
22
- #Helpers
23
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
24
-
25
- # Download official weights
26
- if not os.path.exists("saved_models"):
27
- os.mkdir("saved_models")
28
- MODEL_PATH_URL = "https://drive.google.com/uc?id=1KyMpRjewZdyYfxHPYcd-ZbanIXtin0Sn"
29
- gdown.download(MODEL_PATH_URL, "saved_models/isnet.pth", use_cookies=False)
30
-
31
- class GOSNormalize(object):
32
- '''
33
- Normalize the Image using torch.transforms
34
- '''
35
- def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]):
36
- self.mean = mean
37
- self.std = std
38
-
39
- def __call__(self,image):
40
- image = normalize(image,self.mean,self.std)
41
- return image
42
-
43
-
44
- transform = transforms.Compose([GOSNormalize([0.5,0.5,0.5],[1.0,1.0,1.0])])
45
-
46
- def load_image(im_path, hypar):
47
- im = im_reader(im_path)
48
- im, im_shp = im_preprocess(im, hypar["cache_size"])
49
- im = torch.divide(im,255.0)
50
- shape = torch.from_numpy(np.array(im_shp))
51
- return transform(im).unsqueeze(0), shape.unsqueeze(0) # make a batch of image, shape
52
-
53
-
54
- def build_model(hypar,device):
55
- net = hypar["model"]#GOSNETINC(3,1)
56
-
57
- # convert to half precision
58
- if(hypar["model_digit"]=="half"):
59
- net.half()
60
- for layer in net.modules():
61
- if isinstance(layer, nn.BatchNorm2d):
62
- layer.float()
63
-
64
- net.to(device)
65
-
66
- if(hypar["restore_model"]!=""):
67
- net.load_state_dict(torch.load(hypar["model_path"]+"/"+hypar["restore_model"], map_location=device))
68
- net.to(device)
69
- net.eval()
70
- return net
71
-
72
-
73
- def predict(net, inputs_val, shapes_val, hypar, device):
74
- '''
75
- Given an Image, predict the mask
76
- '''
77
- net.eval()
78
-
79
- if(hypar["model_digit"]=="full"):
80
- inputs_val = inputs_val.type(torch.FloatTensor)
81
- else:
82
- inputs_val = inputs_val.type(torch.HalfTensor)
83
-
84
-
85
- inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) # wrap inputs in Variable
86
-
87
- ds_val = net(inputs_val_v)[0] # list of 6 results
88
-
89
- pred_val = ds_val[0][0,:,:,:] # B x 1 x H x W # we want the first one which is the most accurate prediction
90
-
91
- ## recover the prediction spatial size to the orignal image size
92
- pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val,0),(shapes_val[0][0],shapes_val[0][1]),mode='bilinear'))
93
-
94
- ma = torch.max(pred_val)
95
- mi = torch.min(pred_val)
96
- pred_val = (pred_val-mi)/(ma-mi) # max = 1
97
-
98
- if device == 'cuda': torch.cuda.empty_cache()
99
- return (pred_val.detach().cpu().numpy()*255).astype(np.uint8) # it is the mask we need
100
-
101
- # Set Parameters
102
- hypar = {} # paramters for inferencing
103
-
104
-
105
- hypar["model_path"] ="./saved_models" ## load trained weights from this path
106
- hypar["restore_model"] = "isnet.pth" ## name of the to-be-loaded weights
107
- hypar["interm_sup"] = False ## indicate if activate intermediate feature supervision
108
-
109
- ## choose floating point accuracy --
110
- hypar["model_digit"] = "full" ## indicates "half" or "full" accuracy of float number
111
- hypar["seed"] = 0
112
-
113
- hypar["cache_size"] = [1024, 1024] ## cached input spatial resolution, can be configured into different size
114
-
115
- ## data augmentation parameters ---
116
- hypar["input_size"] = [1024, 1024] ## mdoel input spatial size, usually use the same value hypar["cache_size"], which means we don't further resize the images
117
- hypar["crop_size"] = [1024, 1024] ## random crop size from the input, it is usually set as smaller than hypar["cache_size"], e.g., [920,920] for data augmentation
118
-
119
- hypar["model"] = ISNetDIS()
120
-
121
- # Build Model
122
- net = build_model(hypar, device)
123
-
124
-
125
- def inference(image):
126
- image_path = image
127
-
128
- image_tensor, orig_size = load_image(image_path, hypar)
129
- mask = predict(net, image_tensor, orig_size, hypar, device)
130
-
131
- pil_mask = Image.fromarray(mask).convert('L')
132
- im_rgb = Image.open(image).convert("RGB")
133
-
134
- im_rgba = im_rgb.copy()
135
- im_rgba.putalpha(pil_mask)
136
-
137
- return [im_rgba, pil_mask]
138
-
139
-
140
- title = "Akkurater Hintergrund Entferner"
141
- description = ""
142
- article = "<div><center><img src='https://visitor-badge.glitch.me/badge?page_id=max_skobeev_dis_cmp_public' alt='visitor badge'></center></div>"
143
-
144
- interface = gr.Interface(
145
- fn=inference,
146
- inputs=gr.Image(type='filepath'),
147
- outputs=["image", "image"],
148
- examples=[['robot.png'], ['ship.png']],
149
- title=title,
150
- description=description,
151
- article=article,
152
- allow_flagging='never',
153
- cache_examples=False,
154
- ).queue(concurrency_count=1, api_open=True).launch(show_api=True, show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb32-120e_deepfashion2_sling_256x192.py DELETED
@@ -1,172 +0,0 @@
1
- _base_ = [
2
- '../../../_base_/default_runtime.py',
3
- '../../../_base_/datasets/deepfashion2.py'
4
- ]
5
-
6
- default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
7
-
8
- resume = False # 断点恢复
9
- load_from = None # 模型权重加载
10
- train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10) # 训练轮数,测试间隔
11
- param_scheduler = [
12
- dict( # warmup策略
13
- type='LinearLR',
14
- begin=0,
15
- end=500,
16
- start_factor=0.001,
17
- by_epoch=False),
18
- dict( # scheduler
19
- type='MultiStepLR',
20
- begin=0,
21
- end=120,
22
- milestones=[80, 100],
23
- gamma=0.1,
24
- by_epoch=True)
25
- ]
26
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
27
- auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
28
-
29
- backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
30
- dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
31
- data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
32
- data_root = 'data/deepfashion2/' # 数据存放路径
33
- # 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
34
- codec = dict(
35
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
36
-
37
- train_pipeline = [
38
- dict(type='LoadImage'),
39
- dict(type='GetBBoxCenterScale'),
40
- dict(type='RandomFlip', direction='horizontal'),
41
- dict(
42
- type='RandomBBoxTransform',
43
- shift_prob=0,
44
- rotate_factor=60,
45
- scale_factor=(0.75, 1.25)),
46
- dict(type='TopdownAffine', input_size=codec['input_size']),
47
- dict(type='GenerateTarget', encoder=codec),
48
- dict(type='PackPoseInputs')
49
- ]
50
- val_pipeline = [ # 测试时数据增强
51
- dict(type='LoadImage', backend_args=backend_args), # 加载图片
52
- dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
53
- dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
54
- dict(type='PackPoseInputs') # 对target进行打包用于训练
55
- ]
56
- train_dataloader = dict( # 训练数据加载
57
- batch_size=32, # 批次大小
58
- num_workers=6, # 数据加载进程数
59
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
60
- sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
61
- dataset=dict(
62
- type=dataset_type, # 数据集类名
63
- data_root=data_root, # 数据集路径
64
- data_mode=data_mode, # 算法类型
65
- ann_file='train/deepfashion2_sling.json', # 标注文件路径
66
- data_prefix=dict(img='train/image/'), # 图像路径
67
- pipeline=train_pipeline # 数据流水线
68
- ))
69
- val_dataloader = dict(
70
- batch_size=32,
71
- num_workers=6,
72
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
73
- drop_last=False,
74
- sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
75
- dataset=dict(
76
- type=dataset_type, # 数据集类名
77
- data_root=data_root, # 数据集路径
78
- data_mode=data_mode, # 算法类型
79
- ann_file='validation/deepfashion2_sling.json', # 标注文件路径
80
- data_prefix=dict(img='validation/image/'), # 图像路径
81
- test_mode=True, # 测试模式开关
82
- pipeline=val_pipeline # 数据流水线
83
- ))
84
- test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
85
-
86
- channel_cfg = dict(
87
- num_output_channels=294,
88
- dataset_joints=294,
89
- dataset_channel=[
90
- [
91
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
92
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
93
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
94
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
95
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
96
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
97
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
98
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
99
- 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
100
- 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
101
- 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
102
- 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
103
- 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
104
- 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
105
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
106
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
107
- 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
108
- 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
109
- 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
110
- 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
111
- 285, 286, 287, 288, 289, 290, 291, 292, 293
112
- ],
113
- ],
114
- inference_channel=[
115
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
116
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
117
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
118
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
119
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
120
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
121
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
122
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
123
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
124
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
125
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
126
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
127
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
128
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
129
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
130
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
131
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
132
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
133
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
134
- 290, 291, 292, 293
135
- ])
136
-
137
- model = dict(
138
- type='TopdownPoseEstimator', # 模型结构决定了算法流程
139
- data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
140
- type='PoseDataPreprocessor',
141
- mean=[123.675, 116.28, 103.53],
142
- std=[58.395, 57.12, 57.375],
143
- bgr_to_rgb=True),
144
- backbone=dict(
145
- type='ResNet',
146
- depth=50,
147
- init_cfg=dict(
148
- type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
149
- checkpoint='torchvision://resnet50')),
150
- head=dict( # 模型头部
151
- type='HeatmapHead',
152
- in_channels=2048,
153
- out_channels=channel_cfg['num_output_channels'],
154
- # deconv_out_channels=None,
155
- loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
156
- decoder=codec), # 解码器,将heatmap解码成坐标值
157
- test_cfg=dict(
158
- flip_test=True, # 开启测试时水平翻转集成
159
- flip_mode='heatmap', # 对heatmap进行翻转
160
- shift_heatmap=True, # 对翻转后的结果进行平移提高精度
161
- ))
162
-
163
- val_evaluator = [
164
- dict(type='PCKAccuracy', thr=0.2),
165
- dict(type='AUC'),
166
- dict(type='EPE'),
167
- ]
168
- test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
169
-
170
- visualizer = dict(
171
- vis_backends=[dict(type='LocalVisBackend'),
172
- dict(type='WandbVisBackend')])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/click/Factory.d.ts DELETED
@@ -1,7 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Click from "./Click";
3
-
4
- export default function (
5
- gameObject: Phaser.GameObjects.GameObject,
6
- config?: Click.IConfig
7
- ): Click;
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import DynamicText from "./DynamicText";
2
-
3
- export default function (
4
- config?: DynamicText.IConfig
5
- ): DynamicText;
 
 
 
 
 
 
spaces/Ajaymekala/gradiolangchainChatBotOpenAI-1/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """You are a helpful assistant to answer all user queries.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md DELETED
@@ -1,93 +0,0 @@
1
- ## Test Training Speed
2
-
3
- - Test Commands
4
-
5
- You need to use the following two commands to test the Partial FC training performance.
6
- The number of identites is **3 millions** (synthetic data), turn mixed precision training on, backbone is resnet50,
7
- batch size is 1024.
8
- ```shell
9
- # Model Parallel
10
- python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions
11
- # Partial FC 0.1
12
- python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions_pfc
13
- ```
14
-
15
- - GPU Memory
16
-
17
- ```
18
- # (Model Parallel) gpustat -i
19
- [0] Tesla V100-SXM2-32GB | 64'C, 94 % | 30338 / 32510 MB
20
- [1] Tesla V100-SXM2-32GB | 60'C, 99 % | 28876 / 32510 MB
21
- [2] Tesla V100-SXM2-32GB | 60'C, 99 % | 28872 / 32510 MB
22
- [3] Tesla V100-SXM2-32GB | 69'C, 99 % | 28872 / 32510 MB
23
- [4] Tesla V100-SXM2-32GB | 66'C, 99 % | 28888 / 32510 MB
24
- [5] Tesla V100-SXM2-32GB | 60'C, 99 % | 28932 / 32510 MB
25
- [6] Tesla V100-SXM2-32GB | 68'C, 100 % | 28916 / 32510 MB
26
- [7] Tesla V100-SXM2-32GB | 65'C, 99 % | 28860 / 32510 MB
27
-
28
- # (Partial FC 0.1) gpustat -i
29
- [0] Tesla V100-SXM2-32GB | 60'C, 95 % | 10488 / 32510 MB │·······················
30
- [1] Tesla V100-SXM2-32GB | 60'C, 97 % | 10344 / 32510 MB │·······················
31
- [2] Tesla V100-SXM2-32GB | 61'C, 95 % | 10340 / 32510 MB │·······················
32
- [3] Tesla V100-SXM2-32GB | 66'C, 95 % | 10340 / 32510 MB │·······················
33
- [4] Tesla V100-SXM2-32GB | 65'C, 94 % | 10356 / 32510 MB │·······················
34
- [5] Tesla V100-SXM2-32GB | 61'C, 95 % | 10400 / 32510 MB │·······················
35
- [6] Tesla V100-SXM2-32GB | 68'C, 96 % | 10384 / 32510 MB │·······················
36
- [7] Tesla V100-SXM2-32GB | 64'C, 95 % | 10328 / 32510 MB │·······················
37
- ```
38
-
39
- - Training Speed
40
-
41
- ```python
42
- # (Model Parallel) trainging.log
43
- Training: Speed 2271.33 samples/sec Loss 1.1624 LearningRate 0.2000 Epoch: 0 Global Step: 100
44
- Training: Speed 2269.94 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150
45
- Training: Speed 2272.67 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200
46
- Training: Speed 2266.55 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250
47
- Training: Speed 2272.54 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300
48
-
49
- # (Partial FC 0.1) trainging.log
50
- Training: Speed 5299.56 samples/sec Loss 1.0965 LearningRate 0.2000 Epoch: 0 Global Step: 100
51
- Training: Speed 5296.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150
52
- Training: Speed 5304.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200
53
- Training: Speed 5274.43 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250
54
- Training: Speed 5300.10 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300
55
- ```
56
-
57
- In this test case, Partial FC 0.1 only use1 1/3 of the GPU memory of the model parallel,
58
- and the training speed is 2.5 times faster than the model parallel.
59
-
60
-
61
- ## Speed Benchmark
62
-
63
- 1. Training speed of different parallel methods (samples/second), Tesla V100 32GB * 8. (Larger is better)
64
-
65
- | Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
66
- | :--- | :--- | :--- | :--- |
67
- |125000 | 4681 | 4824 | 5004 |
68
- |250000 | 4047 | 4521 | 4976 |
69
- |500000 | 3087 | 4013 | 4900 |
70
- |1000000 | 2090 | 3449 | 4803 |
71
- |1400000 | 1672 | 3043 | 4738 |
72
- |2000000 | - | 2593 | 4626 |
73
- |4000000 | - | 1748 | 4208 |
74
- |5500000 | - | 1389 | 3975 |
75
- |8000000 | - | - | 3565 |
76
- |16000000 | - | - | 2679 |
77
- |29000000 | - | - | 1855 |
78
-
79
- 2. GPU memory cost of different parallel methods (GB per GPU), Tesla V100 32GB * 8. (Smaller is better)
80
-
81
- | Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
82
- | :--- | :--- | :--- | :--- |
83
- |125000 | 7358 | 5306 | 4868 |
84
- |250000 | 9940 | 5826 | 5004 |
85
- |500000 | 14220 | 7114 | 5202 |
86
- |1000000 | 23708 | 9966 | 5620 |
87
- |1400000 | 32252 | 11178 | 6056 |
88
- |2000000 | - | 13978 | 6472 |
89
- |4000000 | - | 23238 | 8284 |
90
- |5500000 | - | 32188 | 9854 |
91
- |8000000 | - | - | 12310 |
92
- |16000000 | - | - | 19950 |
93
- |29000000 | - | - | 32324 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/ddim_inverse.md DELETED
@@ -1,21 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Inverse Denoising Diffusion Implicit Models (DDIMInverse)
14
-
15
- ## Overview
16
-
17
- This scheduler is the inverted scheduler of [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon.
18
- The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://arxiv.org/pdf/2211.09794.pdf)
19
-
20
- ## DDIMInverseScheduler
21
- [[autodoc]] DDIMInverseScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/check_copies.py DELETED
@@ -1,213 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import argparse
17
- import glob
18
- import importlib.util
19
- import os
20
- import re
21
-
22
- import black
23
- from doc_builder.style_doc import style_docstrings_in_code
24
-
25
-
26
- # All paths are set with the intent you should run this script from the root of the repo with the command
27
- # python utils/check_copies.py
28
- DIFFUSERS_PATH = "src/diffusers"
29
- REPO_PATH = "."
30
-
31
-
32
- # This is to make sure the diffusers module imported is the one in the repo.
33
- spec = importlib.util.spec_from_file_location(
34
- "diffusers",
35
- os.path.join(DIFFUSERS_PATH, "__init__.py"),
36
- submodule_search_locations=[DIFFUSERS_PATH],
37
- )
38
- diffusers_module = spec.loader.load_module()
39
-
40
-
41
- def _should_continue(line, indent):
42
- return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None
43
-
44
-
45
- def find_code_in_diffusers(object_name):
46
- """Find and return the code source code of `object_name`."""
47
- parts = object_name.split(".")
48
- i = 0
49
-
50
- # First let's find the module where our object lives.
51
- module = parts[i]
52
- while i < len(parts) and not os.path.isfile(os.path.join(DIFFUSERS_PATH, f"{module}.py")):
53
- i += 1
54
- if i < len(parts):
55
- module = os.path.join(module, parts[i])
56
- if i >= len(parts):
57
- raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.")
58
-
59
- with open(os.path.join(DIFFUSERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n") as f:
60
- lines = f.readlines()
61
-
62
- # Now let's find the class / func in the code!
63
- indent = ""
64
- line_index = 0
65
- for name in parts[i + 1 :]:
66
- while (
67
- line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None
68
- ):
69
- line_index += 1
70
- indent += " "
71
- line_index += 1
72
-
73
- if line_index >= len(lines):
74
- raise ValueError(f" {object_name} does not match any function or class in {module}.")
75
-
76
- # We found the beginning of the class / func, now let's find the end (when the indent diminishes).
77
- start_index = line_index
78
- while line_index < len(lines) and _should_continue(lines[line_index], indent):
79
- line_index += 1
80
- # Clean up empty lines at the end (if any).
81
- while len(lines[line_index - 1]) <= 1:
82
- line_index -= 1
83
-
84
- code_lines = lines[start_index:line_index]
85
- return "".join(code_lines)
86
-
87
-
88
- _re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
89
- _re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
90
- _re_fill_pattern = re.compile(r"<FILL\s+[^>]*>")
91
-
92
-
93
- def get_indent(code):
94
- lines = code.split("\n")
95
- idx = 0
96
- while idx < len(lines) and len(lines[idx]) == 0:
97
- idx += 1
98
- if idx < len(lines):
99
- return re.search(r"^(\s*)\S", lines[idx]).groups()[0]
100
- return ""
101
-
102
-
103
- def blackify(code):
104
- """
105
- Applies the black part of our `make style` command to `code`.
106
- """
107
- has_indent = len(get_indent(code)) > 0
108
- if has_indent:
109
- code = f"class Bla:\n{code}"
110
- mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=119, preview=True)
111
- result = black.format_str(code, mode=mode)
112
- result, _ = style_docstrings_in_code(result)
113
- return result[len("class Bla:\n") :] if has_indent else result
114
-
115
-
116
- def is_copy_consistent(filename, overwrite=False):
117
- """
118
- Check if the code commented as a copy in `filename` matches the original.
119
- Return the differences or overwrites the content depending on `overwrite`.
120
- """
121
- with open(filename, "r", encoding="utf-8", newline="\n") as f:
122
- lines = f.readlines()
123
- diffs = []
124
- line_index = 0
125
- # Not a for loop cause `lines` is going to change (if `overwrite=True`).
126
- while line_index < len(lines):
127
- search = _re_copy_warning.search(lines[line_index])
128
- if search is None:
129
- line_index += 1
130
- continue
131
-
132
- # There is some copied code here, let's retrieve the original.
133
- indent, object_name, replace_pattern = search.groups()
134
- theoretical_code = find_code_in_diffusers(object_name)
135
- theoretical_indent = get_indent(theoretical_code)
136
-
137
- start_index = line_index + 1 if indent == theoretical_indent else line_index + 2
138
- indent = theoretical_indent
139
- line_index = start_index
140
-
141
- # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
142
- should_continue = True
143
- while line_index < len(lines) and should_continue:
144
- line_index += 1
145
- if line_index >= len(lines):
146
- break
147
- line = lines[line_index]
148
- should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None
149
- # Clean up empty lines at the end (if any).
150
- while len(lines[line_index - 1]) <= 1:
151
- line_index -= 1
152
-
153
- observed_code_lines = lines[start_index:line_index]
154
- observed_code = "".join(observed_code_lines)
155
-
156
- # Remove any nested `Copied from` comments to avoid circular copies
157
- theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None]
158
- theoretical_code = "\n".join(theoretical_code)
159
-
160
- # Before comparing, use the `replace_pattern` on the original code.
161
- if len(replace_pattern) > 0:
162
- patterns = replace_pattern.replace("with", "").split(",")
163
- patterns = [_re_replace_pattern.search(p) for p in patterns]
164
- for pattern in patterns:
165
- if pattern is None:
166
- continue
167
- obj1, obj2, option = pattern.groups()
168
- theoretical_code = re.sub(obj1, obj2, theoretical_code)
169
- if option.strip() == "all-casing":
170
- theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code)
171
- theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code)
172
-
173
- # Blackify after replacement. To be able to do that, we need the header (class or function definition)
174
- # from the previous line
175
- theoretical_code = blackify(lines[start_index - 1] + theoretical_code)
176
- theoretical_code = theoretical_code[len(lines[start_index - 1]) :]
177
-
178
- # Test for a diff and act accordingly.
179
- if observed_code != theoretical_code:
180
- diffs.append([object_name, start_index])
181
- if overwrite:
182
- lines = lines[:start_index] + [theoretical_code] + lines[line_index:]
183
- line_index = start_index + 1
184
-
185
- if overwrite and len(diffs) > 0:
186
- # Warn the user a file has been modified.
187
- print(f"Detected changes, rewriting {filename}.")
188
- with open(filename, "w", encoding="utf-8", newline="\n") as f:
189
- f.writelines(lines)
190
- return diffs
191
-
192
-
193
- def check_copies(overwrite: bool = False):
194
- all_files = glob.glob(os.path.join(DIFFUSERS_PATH, "**/*.py"), recursive=True)
195
- diffs = []
196
- for filename in all_files:
197
- new_diffs = is_copy_consistent(filename, overwrite)
198
- diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
199
- if not overwrite and len(diffs) > 0:
200
- diff = "\n".join(diffs)
201
- raise Exception(
202
- "Found the following copy inconsistencies:\n"
203
- + diff
204
- + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
205
- )
206
-
207
-
208
- if __name__ == "__main__":
209
- parser = argparse.ArgumentParser()
210
- parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
211
- args = parser.parse_args()
212
-
213
- check_copies(args.fix_and_overwrite)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/hungarian_assigner.py DELETED
@@ -1,145 +0,0 @@
1
- import torch
2
-
3
- from ..builder import BBOX_ASSIGNERS
4
- from ..match_costs import build_match_cost
5
- from ..transforms import bbox_cxcywh_to_xyxy
6
- from .assign_result import AssignResult
7
- from .base_assigner import BaseAssigner
8
-
9
- try:
10
- from scipy.optimize import linear_sum_assignment
11
- except ImportError:
12
- linear_sum_assignment = None
13
-
14
-
15
- @BBOX_ASSIGNERS.register_module()
16
- class HungarianAssigner(BaseAssigner):
17
- """Computes one-to-one matching between predictions and ground truth.
18
-
19
- This class computes an assignment between the targets and the predictions
20
- based on the costs. The costs are weighted sum of three components:
21
- classification cost, regression L1 cost and regression iou cost. The
22
- targets don't include the no_object, so generally there are more
23
- predictions than targets. After the one-to-one matching, the un-matched
24
- are treated as backgrounds. Thus each query prediction will be assigned
25
- with `0` or a positive integer indicating the ground truth index:
26
-
27
- - 0: negative sample, no assigned gt
28
- - positive integer: positive sample, index (1-based) of assigned gt
29
-
30
- Args:
31
- cls_weight (int | float, optional): The scale factor for classification
32
- cost. Default 1.0.
33
- bbox_weight (int | float, optional): The scale factor for regression
34
- L1 cost. Default 1.0.
35
- iou_weight (int | float, optional): The scale factor for regression
36
- iou cost. Default 1.0.
37
- iou_calculator (dict | optional): The config for the iou calculation.
38
- Default type `BboxOverlaps2D`.
39
- iou_mode (str | optional): "iou" (intersection over union), "iof"
40
- (intersection over foreground), or "giou" (generalized
41
- intersection over union). Default "giou".
42
- """
43
-
44
- def __init__(self,
45
- cls_cost=dict(type='ClassificationCost', weight=1.),
46
- reg_cost=dict(type='BBoxL1Cost', weight=1.0),
47
- iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
48
- self.cls_cost = build_match_cost(cls_cost)
49
- self.reg_cost = build_match_cost(reg_cost)
50
- self.iou_cost = build_match_cost(iou_cost)
51
-
52
- def assign(self,
53
- bbox_pred,
54
- cls_pred,
55
- gt_bboxes,
56
- gt_labels,
57
- img_meta,
58
- gt_bboxes_ignore=None,
59
- eps=1e-7):
60
- """Computes one-to-one matching based on the weighted costs.
61
-
62
- This method assign each query prediction to a ground truth or
63
- background. The `assigned_gt_inds` with -1 means don't care,
64
- 0 means negative sample, and positive number is the index (1-based)
65
- of assigned gt.
66
- The assignment is done in the following steps, the order matters.
67
-
68
- 1. assign every prediction to -1
69
- 2. compute the weighted costs
70
- 3. do Hungarian matching on CPU based on the costs
71
- 4. assign all to 0 (background) first, then for each matched pair
72
- between predictions and gts, treat this prediction as foreground
73
- and assign the corresponding gt index (plus 1) to it.
74
-
75
- Args:
76
- bbox_pred (Tensor): Predicted boxes with normalized coordinates
77
- (cx, cy, w, h), which are all in range [0, 1]. Shape
78
- [num_query, 4].
79
- cls_pred (Tensor): Predicted classification logits, shape
80
- [num_query, num_class].
81
- gt_bboxes (Tensor): Ground truth boxes with unnormalized
82
- coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
83
- gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
84
- img_meta (dict): Meta information for current image.
85
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
86
- labelled as `ignored`. Default None.
87
- eps (int | float, optional): A value added to the denominator for
88
- numerical stability. Default 1e-7.
89
-
90
- Returns:
91
- :obj:`AssignResult`: The assigned result.
92
- """
93
- assert gt_bboxes_ignore is None, \
94
- 'Only case when gt_bboxes_ignore is None is supported.'
95
- num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
96
-
97
- # 1. assign -1 by default
98
- assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
99
- -1,
100
- dtype=torch.long)
101
- assigned_labels = bbox_pred.new_full((num_bboxes, ),
102
- -1,
103
- dtype=torch.long)
104
- if num_gts == 0 or num_bboxes == 0:
105
- # No ground truth or boxes, return empty assignment
106
- if num_gts == 0:
107
- # No ground truth, assign all to background
108
- assigned_gt_inds[:] = 0
109
- return AssignResult(
110
- num_gts, assigned_gt_inds, None, labels=assigned_labels)
111
- img_h, img_w, _ = img_meta['img_shape']
112
- factor = gt_bboxes.new_tensor([img_w, img_h, img_w,
113
- img_h]).unsqueeze(0)
114
-
115
- # 2. compute the weighted costs
116
- # classification and bboxcost.
117
- cls_cost = self.cls_cost(cls_pred, gt_labels)
118
- # regression L1 cost
119
- normalize_gt_bboxes = gt_bboxes / factor
120
- reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
121
- # regression iou cost, defaultly giou is used in official DETR.
122
- bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
123
- iou_cost = self.iou_cost(bboxes, gt_bboxes)
124
- # weighted sum of above three costs
125
- cost = cls_cost + reg_cost + iou_cost
126
-
127
- # 3. do Hungarian matching on CPU using linear_sum_assignment
128
- cost = cost.detach().cpu()
129
- if linear_sum_assignment is None:
130
- raise ImportError('Please run "pip install scipy" '
131
- 'to install scipy first.')
132
- matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
133
- matched_row_inds = torch.from_numpy(matched_row_inds).to(
134
- bbox_pred.device)
135
- matched_col_inds = torch.from_numpy(matched_col_inds).to(
136
- bbox_pred.device)
137
-
138
- # 4. assign backgrounds and foregrounds
139
- # assign all indices to backgrounds first
140
- assigned_gt_inds[:] = 0
141
- # assign foregrounds based on matching results
142
- assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
143
- assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
144
- return AssignResult(
145
- num_gts, assigned_gt_inds, None, labels=assigned_labels)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/deeplabv3plus_r50-d8.py',
3
- '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=60),
8
- auxiliary_head=dict(num_classes=60),
9
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
10
- optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
 
spaces/AnnonSubmission/xai-cl/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Xai Cl
3
- emoji: 🏢
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.10.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Annotation-AI/fast-segment-everything-with-image-prompt/app.py DELETED
@@ -1,17 +0,0 @@
1
- import os
2
-
3
-
4
- github_user = os.environ.get("GITHUB_USER")
5
- github_token = os.environ.get("GITHUB_TOKEN")
6
-
7
- repo_name = "annotation-ai/mlwiz-technical-demo"
8
-
9
- os.system(f"export GITHUB_USER={github_user}")
10
- os.system(f"export GITHUB_TOKEN={github_token}")
11
- os.system(f"git clone https://{github_user}:{github_token}@github.com/{repo_name}")
12
-
13
- cwd0 = os.getcwd()
14
- cwd1 = os.path.join(cwd0, "mlwiz-technical-demo/sam")
15
- os.chdir(cwd1)
16
- os.system("pip install -r requirements.txt")
17
- os.system("python app_everything_img.py")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/ball_query.py DELETED
@@ -1,55 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
- from torch.autograd import Function
4
-
5
- from ..utils import ext_loader
6
-
7
- ext_module = ext_loader.load_ext('_ext', ['ball_query_forward'])
8
-
9
-
10
- class BallQuery(Function):
11
- """Find nearby points in spherical space."""
12
-
13
- @staticmethod
14
- def forward(ctx, min_radius: float, max_radius: float, sample_num: int,
15
- xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor:
16
- """
17
- Args:
18
- min_radius (float): minimum radius of the balls.
19
- max_radius (float): maximum radius of the balls.
20
- sample_num (int): maximum number of features in the balls.
21
- xyz (Tensor): (B, N, 3) xyz coordinates of the features.
22
- center_xyz (Tensor): (B, npoint, 3) centers of the ball query.
23
-
24
- Returns:
25
- Tensor: (B, npoint, nsample) tensor with the indices of
26
- the features that form the query balls.
27
- """
28
- assert center_xyz.is_contiguous()
29
- assert xyz.is_contiguous()
30
- assert min_radius < max_radius
31
-
32
- B, N, _ = xyz.size()
33
- npoint = center_xyz.size(1)
34
- idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int)
35
-
36
- ext_module.ball_query_forward(
37
- center_xyz,
38
- xyz,
39
- idx,
40
- b=B,
41
- n=N,
42
- m=npoint,
43
- min_radius=min_radius,
44
- max_radius=max_radius,
45
- nsample=sample_num)
46
- if torch.__version__ != 'parrots':
47
- ctx.mark_non_differentiable(idx)
48
- return idx
49
-
50
- @staticmethod
51
- def backward(ctx, a=None):
52
- return None, None, None, None
53
-
54
-
55
- ball_query = BallQuery.apply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/models/common.py DELETED
@@ -1,860 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Common modules
4
- """
5
-
6
- import ast
7
- import contextlib
8
- import json
9
- import math
10
- import platform
11
- import warnings
12
- import zipfile
13
- from collections import OrderedDict, namedtuple
14
- from copy import copy
15
- from pathlib import Path
16
- from urllib.parse import urlparse
17
-
18
- import cv2
19
- import numpy as np
20
- import pandas as pd
21
- import requests
22
- import torch
23
- import torch.nn as nn
24
- from IPython.display import display
25
- from PIL import Image
26
- from torch.cuda import amp
27
-
28
- from utils import TryExcept
29
- from utils.dataloaders import exif_transpose, letterbox
30
- from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
31
- increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
32
- xyxy2xywh, yaml_load)
33
- from utils.plots import Annotator, colors, save_one_box
34
- from utils.torch_utils import copy_attr, smart_inference_mode
35
-
36
-
37
- def autopad(k, p=None, d=1): # kernel, padding, dilation
38
- # Pad to 'same' shape outputs
39
- if d > 1:
40
- k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
41
- if p is None:
42
- p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
43
- return p
44
-
45
-
46
- class Conv(nn.Module):
47
- # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
48
- default_act = nn.SiLU() # default activation
49
-
50
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
51
- super().__init__()
52
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
53
- self.bn = nn.BatchNorm2d(c2)
54
- self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
55
-
56
- def forward(self, x):
57
- return self.act(self.bn(self.conv(x)))
58
-
59
- def forward_fuse(self, x):
60
- return self.act(self.conv(x))
61
-
62
-
63
- class DWConv(Conv):
64
- # Depth-wise convolution
65
- def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
66
- super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
67
-
68
-
69
- class DWConvTranspose2d(nn.ConvTranspose2d):
70
- # Depth-wise transpose convolution
71
- def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
72
- super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
73
-
74
-
75
- class TransformerLayer(nn.Module):
76
- # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
77
- def __init__(self, c, num_heads):
78
- super().__init__()
79
- self.q = nn.Linear(c, c, bias=False)
80
- self.k = nn.Linear(c, c, bias=False)
81
- self.v = nn.Linear(c, c, bias=False)
82
- self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
83
- self.fc1 = nn.Linear(c, c, bias=False)
84
- self.fc2 = nn.Linear(c, c, bias=False)
85
-
86
- def forward(self, x):
87
- x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
88
- x = self.fc2(self.fc1(x)) + x
89
- return x
90
-
91
-
92
- class TransformerBlock(nn.Module):
93
- # Vision Transformer https://arxiv.org/abs/2010.11929
94
- def __init__(self, c1, c2, num_heads, num_layers):
95
- super().__init__()
96
- self.conv = None
97
- if c1 != c2:
98
- self.conv = Conv(c1, c2)
99
- self.linear = nn.Linear(c2, c2) # learnable position embedding
100
- self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
101
- self.c2 = c2
102
-
103
- def forward(self, x):
104
- if self.conv is not None:
105
- x = self.conv(x)
106
- b, _, w, h = x.shape
107
- p = x.flatten(2).permute(2, 0, 1)
108
- return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
109
-
110
-
111
- class Bottleneck(nn.Module):
112
- # Standard bottleneck
113
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
114
- super().__init__()
115
- c_ = int(c2 * e) # hidden channels
116
- self.cv1 = Conv(c1, c_, 1, 1)
117
- self.cv2 = Conv(c_, c2, 3, 1, g=g)
118
- self.add = shortcut and c1 == c2
119
-
120
- def forward(self, x):
121
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
122
-
123
-
124
- class BottleneckCSP(nn.Module):
125
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
126
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
127
- super().__init__()
128
- c_ = int(c2 * e) # hidden channels
129
- self.cv1 = Conv(c1, c_, 1, 1)
130
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
131
- self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
132
- self.cv4 = Conv(2 * c_, c2, 1, 1)
133
- self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
134
- self.act = nn.SiLU()
135
- self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
136
-
137
- def forward(self, x):
138
- y1 = self.cv3(self.m(self.cv1(x)))
139
- y2 = self.cv2(x)
140
- return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
141
-
142
-
143
- class CrossConv(nn.Module):
144
- # Cross Convolution Downsample
145
- def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
146
- # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
147
- super().__init__()
148
- c_ = int(c2 * e) # hidden channels
149
- self.cv1 = Conv(c1, c_, (1, k), (1, s))
150
- self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
151
- self.add = shortcut and c1 == c2
152
-
153
- def forward(self, x):
154
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
155
-
156
-
157
- class C3(nn.Module):
158
- # CSP Bottleneck with 3 convolutions
159
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
160
- super().__init__()
161
- c_ = int(c2 * e) # hidden channels
162
- self.cv1 = Conv(c1, c_, 1, 1)
163
- self.cv2 = Conv(c1, c_, 1, 1)
164
- self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
165
- self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
166
-
167
- def forward(self, x):
168
- return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
169
-
170
-
171
- class C3x(C3):
172
- # C3 module with cross-convolutions
173
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
174
- super().__init__(c1, c2, n, shortcut, g, e)
175
- c_ = int(c2 * e)
176
- self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
177
-
178
-
179
- class C3TR(C3):
180
- # C3 module with TransformerBlock()
181
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
182
- super().__init__(c1, c2, n, shortcut, g, e)
183
- c_ = int(c2 * e)
184
- self.m = TransformerBlock(c_, c_, 4, n)
185
-
186
-
187
- class C3SPP(C3):
188
- # C3 module with SPP()
189
- def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
190
- super().__init__(c1, c2, n, shortcut, g, e)
191
- c_ = int(c2 * e)
192
- self.m = SPP(c_, c_, k)
193
-
194
-
195
- class C3Ghost(C3):
196
- # C3 module with GhostBottleneck()
197
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
198
- super().__init__(c1, c2, n, shortcut, g, e)
199
- c_ = int(c2 * e) # hidden channels
200
- self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
201
-
202
-
203
- class SPP(nn.Module):
204
- # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
205
- def __init__(self, c1, c2, k=(5, 9, 13)):
206
- super().__init__()
207
- c_ = c1 // 2 # hidden channels
208
- self.cv1 = Conv(c1, c_, 1, 1)
209
- self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
210
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
211
-
212
- def forward(self, x):
213
- x = self.cv1(x)
214
- with warnings.catch_warnings():
215
- warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
216
- return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
217
-
218
-
219
- class SPPF(nn.Module):
220
- # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
221
- def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
222
- super().__init__()
223
- c_ = c1 // 2 # hidden channels
224
- self.cv1 = Conv(c1, c_, 1, 1)
225
- self.cv2 = Conv(c_ * 4, c2, 1, 1)
226
- self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
227
-
228
- def forward(self, x):
229
- x = self.cv1(x)
230
- with warnings.catch_warnings():
231
- warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
232
- y1 = self.m(x)
233
- y2 = self.m(y1)
234
- return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
235
-
236
-
237
- class Focus(nn.Module):
238
- # Focus wh information into c-space
239
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
240
- super().__init__()
241
- self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
242
- # self.contract = Contract(gain=2)
243
-
244
- def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
245
- return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
246
- # return self.conv(self.contract(x))
247
-
248
-
249
- class GhostConv(nn.Module):
250
- # Ghost Convolution https://github.com/huawei-noah/ghostnet
251
- def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
252
- super().__init__()
253
- c_ = c2 // 2 # hidden channels
254
- self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
255
- self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
256
-
257
- def forward(self, x):
258
- y = self.cv1(x)
259
- return torch.cat((y, self.cv2(y)), 1)
260
-
261
-
262
- class GhostBottleneck(nn.Module):
263
- # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
264
- def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
265
- super().__init__()
266
- c_ = c2 // 2
267
- self.conv = nn.Sequential(
268
- GhostConv(c1, c_, 1, 1), # pw
269
- DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
270
- GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
271
- self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
272
- act=False)) if s == 2 else nn.Identity()
273
-
274
- def forward(self, x):
275
- return self.conv(x) + self.shortcut(x)
276
-
277
-
278
- class Contract(nn.Module):
279
- # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
280
- def __init__(self, gain=2):
281
- super().__init__()
282
- self.gain = gain
283
-
284
- def forward(self, x):
285
- b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
286
- s = self.gain
287
- x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
288
- x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
289
- return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
290
-
291
-
292
- class Expand(nn.Module):
293
- # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
294
- def __init__(self, gain=2):
295
- super().__init__()
296
- self.gain = gain
297
-
298
- def forward(self, x):
299
- b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
300
- s = self.gain
301
- x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
302
- x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
303
- return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
304
-
305
-
306
- class Concat(nn.Module):
307
- # Concatenate a list of tensors along dimension
308
- def __init__(self, dimension=1):
309
- super().__init__()
310
- self.d = dimension
311
-
312
- def forward(self, x):
313
- return torch.cat(x, self.d)
314
-
315
-
316
- class DetectMultiBackend(nn.Module):
317
- # YOLOv5 MultiBackend class for python inference on various backends
318
- def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
319
- # Usage:
320
- # PyTorch: weights = *.pt
321
- # TorchScript: *.torchscript
322
- # ONNX Runtime: *.onnx
323
- # ONNX OpenCV DNN: *.onnx --dnn
324
- # OpenVINO: *_openvino_model
325
- # CoreML: *.mlmodel
326
- # TensorRT: *.engine
327
- # TensorFlow SavedModel: *_saved_model
328
- # TensorFlow GraphDef: *.pb
329
- # TensorFlow Lite: *.tflite
330
- # TensorFlow Edge TPU: *_edgetpu.tflite
331
- # PaddlePaddle: *_paddle_model
332
- from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
333
-
334
- super().__init__()
335
- w = str(weights[0] if isinstance(weights, list) else weights)
336
- pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
337
- fp16 &= pt or jit or onnx or engine # FP16
338
- nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
339
- stride = 32 # default stride
340
- cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
341
- if not (pt or triton):
342
- w = attempt_download(w) # download if not local
343
-
344
- if pt: # PyTorch
345
- model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
346
- stride = max(int(model.stride.max()), 32) # model stride
347
- names = model.module.names if hasattr(model, 'module') else model.names # get class names
348
- model.half() if fp16 else model.float()
349
- self.model = model # explicitly assign for to(), cpu(), cuda(), half()
350
- elif jit: # TorchScript
351
- LOGGER.info(f'Loading {w} for TorchScript inference...')
352
- extra_files = {'config.txt': ''} # model metadata
353
- model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
354
- model.half() if fp16 else model.float()
355
- if extra_files['config.txt']: # load metadata dict
356
- d = json.loads(extra_files['config.txt'],
357
- object_hook=lambda d: {int(k) if k.isdigit() else k: v
358
- for k, v in d.items()})
359
- stride, names = int(d['stride']), d['names']
360
- elif dnn: # ONNX OpenCV DNN
361
- LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
362
- check_requirements('opencv-python>=4.5.4')
363
- net = cv2.dnn.readNetFromONNX(w)
364
- elif onnx: # ONNX Runtime
365
- LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
366
- check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
367
- import onnxruntime
368
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
369
- session = onnxruntime.InferenceSession(w, providers=providers)
370
- output_names = [x.name for x in session.get_outputs()]
371
- meta = session.get_modelmeta().custom_metadata_map # metadata
372
- if 'stride' in meta:
373
- stride, names = int(meta['stride']), eval(meta['names'])
374
- elif xml: # OpenVINO
375
- LOGGER.info(f'Loading {w} for OpenVINO inference...')
376
- check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
377
- from openvino.runtime import Core, Layout, get_batch
378
- ie = Core()
379
- if not Path(w).is_file(): # if not *.xml
380
- w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
381
- network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
382
- if network.get_parameters()[0].get_layout().empty:
383
- network.get_parameters()[0].set_layout(Layout("NCHW"))
384
- batch_dim = get_batch(network)
385
- if batch_dim.is_static:
386
- batch_size = batch_dim.get_length()
387
- executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
388
- stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
389
- elif engine: # TensorRT
390
- LOGGER.info(f'Loading {w} for TensorRT inference...')
391
- import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
392
- check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
393
- if device.type == 'cpu':
394
- device = torch.device('cuda:0')
395
- Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
396
- logger = trt.Logger(trt.Logger.INFO)
397
- with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
398
- model = runtime.deserialize_cuda_engine(f.read())
399
- context = model.create_execution_context()
400
- bindings = OrderedDict()
401
- output_names = []
402
- fp16 = False # default updated below
403
- dynamic = False
404
- for i in range(model.num_bindings):
405
- name = model.get_binding_name(i)
406
- dtype = trt.nptype(model.get_binding_dtype(i))
407
- if model.binding_is_input(i):
408
- if -1 in tuple(model.get_binding_shape(i)): # dynamic
409
- dynamic = True
410
- context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
411
- if dtype == np.float16:
412
- fp16 = True
413
- else: # output
414
- output_names.append(name)
415
- shape = tuple(context.get_binding_shape(i))
416
- im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
417
- bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
418
- binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
419
- batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
420
- elif coreml: # CoreML
421
- LOGGER.info(f'Loading {w} for CoreML inference...')
422
- import coremltools as ct
423
- model = ct.models.MLModel(w)
424
- elif saved_model: # TF SavedModel
425
- LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
426
- import tensorflow as tf
427
- keras = False # assume TF1 saved_model
428
- model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
429
- elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
430
- LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
431
- import tensorflow as tf
432
-
433
- def wrap_frozen_graph(gd, inputs, outputs):
434
- x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
435
- ge = x.graph.as_graph_element
436
- return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
437
-
438
- def gd_outputs(gd):
439
- name_list, input_list = [], []
440
- for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
441
- name_list.append(node.name)
442
- input_list.extend(node.input)
443
- return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
444
-
445
- gd = tf.Graph().as_graph_def() # TF GraphDef
446
- with open(w, 'rb') as f:
447
- gd.ParseFromString(f.read())
448
- frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
449
- elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
450
- try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
451
- from tflite_runtime.interpreter import Interpreter, load_delegate
452
- except ImportError:
453
- import tensorflow as tf
454
- Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
455
- if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
456
- LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
457
- delegate = {
458
- 'Linux': 'libedgetpu.so.1',
459
- 'Darwin': 'libedgetpu.1.dylib',
460
- 'Windows': 'edgetpu.dll'}[platform.system()]
461
- interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
462
- else: # TFLite
463
- LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
464
- interpreter = Interpreter(model_path=w) # load TFLite model
465
- interpreter.allocate_tensors() # allocate
466
- input_details = interpreter.get_input_details() # inputs
467
- output_details = interpreter.get_output_details() # outputs
468
- # load metadata
469
- with contextlib.suppress(zipfile.BadZipFile):
470
- with zipfile.ZipFile(w, "r") as model:
471
- meta_file = model.namelist()[0]
472
- meta = ast.literal_eval(model.read(meta_file).decode("utf-8"))
473
- stride, names = int(meta['stride']), meta['names']
474
- elif tfjs: # TF.js
475
- raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
476
- elif paddle: # PaddlePaddle
477
- LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
478
- check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
479
- import paddle.inference as pdi
480
- if not Path(w).is_file(): # if not *.pdmodel
481
- w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
482
- weights = Path(w).with_suffix('.pdiparams')
483
- config = pdi.Config(str(w), str(weights))
484
- if cuda:
485
- config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
486
- predictor = pdi.create_predictor(config)
487
- input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
488
- output_names = predictor.get_output_names()
489
- elif triton: # NVIDIA Triton Inference Server
490
- LOGGER.info(f'Using {w} as Triton Inference Server...')
491
- check_requirements('tritonclient[all]')
492
- from utils.triton import TritonRemoteModel
493
- model = TritonRemoteModel(url=w)
494
- nhwc = model.runtime.startswith("tensorflow")
495
- else:
496
- raise NotImplementedError(f'ERROR: {w} is not a supported format')
497
-
498
- # class names
499
- if 'names' not in locals():
500
- names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
501
- if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
502
- names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
503
-
504
- self.__dict__.update(locals()) # assign all variables to self
505
-
506
- def forward(self, im, augment=False, visualize=False):
507
- # YOLOv5 MultiBackend inference
508
- b, ch, h, w = im.shape # batch, channel, height, width
509
- if self.fp16 and im.dtype != torch.float16:
510
- im = im.half() # to FP16
511
- if self.nhwc:
512
- im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
513
-
514
- if self.pt: # PyTorch
515
- y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
516
- elif self.jit: # TorchScript
517
- y = self.model(im)
518
- elif self.dnn: # ONNX OpenCV DNN
519
- im = im.cpu().numpy() # torch to numpy
520
- self.net.setInput(im)
521
- y = self.net.forward()
522
- elif self.onnx: # ONNX Runtime
523
- im = im.cpu().numpy() # torch to numpy
524
- y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
525
- elif self.xml: # OpenVINO
526
- im = im.cpu().numpy() # FP32
527
- y = list(self.executable_network([im]).values())
528
- elif self.engine: # TensorRT
529
- if self.dynamic and im.shape != self.bindings['images'].shape:
530
- i = self.model.get_binding_index('images')
531
- self.context.set_binding_shape(i, im.shape) # reshape if dynamic
532
- self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
533
- for name in self.output_names:
534
- i = self.model.get_binding_index(name)
535
- self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
536
- s = self.bindings['images'].shape
537
- assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
538
- self.binding_addrs['images'] = int(im.data_ptr())
539
- self.context.execute_v2(list(self.binding_addrs.values()))
540
- y = [self.bindings[x].data for x in sorted(self.output_names)]
541
- elif self.coreml: # CoreML
542
- im = im.cpu().numpy()
543
- im = Image.fromarray((im[0] * 255).astype('uint8'))
544
- # im = im.resize((192, 320), Image.ANTIALIAS)
545
- y = self.model.predict({'image': im}) # coordinates are xywh normalized
546
- if 'confidence' in y:
547
- box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
548
- conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
549
- y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
550
- else:
551
- y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
552
- elif self.paddle: # PaddlePaddle
553
- im = im.cpu().numpy().astype(np.float32)
554
- self.input_handle.copy_from_cpu(im)
555
- self.predictor.run()
556
- y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
557
- elif self.triton: # NVIDIA Triton Inference Server
558
- y = self.model(im)
559
- else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
560
- im = im.cpu().numpy()
561
- if self.saved_model: # SavedModel
562
- y = self.model(im, training=False) if self.keras else self.model(im)
563
- elif self.pb: # GraphDef
564
- y = self.frozen_func(x=self.tf.constant(im))
565
- else: # Lite or Edge TPU
566
- input = self.input_details[0]
567
- int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
568
- if int8:
569
- scale, zero_point = input['quantization']
570
- im = (im / scale + zero_point).astype(np.uint8) # de-scale
571
- self.interpreter.set_tensor(input['index'], im)
572
- self.interpreter.invoke()
573
- y = []
574
- for output in self.output_details:
575
- x = self.interpreter.get_tensor(output['index'])
576
- if int8:
577
- scale, zero_point = output['quantization']
578
- x = (x.astype(np.float32) - zero_point) * scale # re-scale
579
- y.append(x)
580
- y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
581
- y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
582
-
583
- if isinstance(y, (list, tuple)):
584
- return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
585
- else:
586
- return self.from_numpy(y)
587
-
588
- def from_numpy(self, x):
589
- return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
590
-
591
- def warmup(self, imgsz=(1, 3, 640, 640)):
592
- # Warmup model by running inference once
593
- warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
594
- if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
595
- im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
596
- for _ in range(2 if self.jit else 1): #
597
- self.forward(im) # warmup
598
-
599
- @staticmethod
600
- def _model_type(p='path/to/model.pt'):
601
- # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
602
- # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
603
- from export import export_formats
604
- from utils.downloads import is_url
605
- sf = list(export_formats().Suffix) # export suffixes
606
- if not is_url(p, check=False):
607
- check_suffix(p, sf) # checks
608
- url = urlparse(p) # if url may be Triton inference server
609
- types = [s in Path(p).name for s in sf]
610
- types[8] &= not types[9] # tflite &= not edgetpu
611
- triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc])
612
- return types + [triton]
613
-
614
- @staticmethod
615
- def _load_metadata(f=Path('path/to/meta.yaml')):
616
- # Load metadata from meta.yaml if it exists
617
- if f.exists():
618
- d = yaml_load(f)
619
- return d['stride'], d['names'] # assign stride, names
620
- return None, None
621
-
622
-
623
- class AutoShape(nn.Module):
624
- # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
625
- conf = 0.25 # NMS confidence threshold
626
- iou = 0.45 # NMS IoU threshold
627
- agnostic = False # NMS class-agnostic
628
- multi_label = False # NMS multiple labels per box
629
- classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
630
- max_det = 1000 # maximum number of detections per image
631
- amp = False # Automatic Mixed Precision (AMP) inference
632
-
633
- def __init__(self, model, verbose=True):
634
- super().__init__()
635
- if verbose:
636
- LOGGER.info('Adding AutoShape... ')
637
- copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
638
- self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
639
- self.pt = not self.dmb or model.pt # PyTorch model
640
- self.model = model.eval()
641
- if self.pt:
642
- m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
643
- m.inplace = False # Detect.inplace=False for safe multithread inference
644
- m.export = True # do not output loss values
645
-
646
- def _apply(self, fn):
647
- # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
648
- self = super()._apply(fn)
649
- if self.pt:
650
- m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
651
- m.stride = fn(m.stride)
652
- m.grid = list(map(fn, m.grid))
653
- if isinstance(m.anchor_grid, list):
654
- m.anchor_grid = list(map(fn, m.anchor_grid))
655
- return self
656
-
657
- @smart_inference_mode()
658
- def forward(self, ims, size=640, augment=False, profile=False):
659
- # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
660
- # file: ims = 'data/images/zidane.jpg' # str or PosixPath
661
- # URI: = 'https://ultralytics.com/images/zidane.jpg'
662
- # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
663
- # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
664
- # numpy: = np.zeros((640,1280,3)) # HWC
665
- # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
666
- # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
667
-
668
- dt = (Profile(), Profile(), Profile())
669
- with dt[0]:
670
- if isinstance(size, int): # expand
671
- size = (size, size)
672
- p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
673
- autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
674
- if isinstance(ims, torch.Tensor): # torch
675
- with amp.autocast(autocast):
676
- return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
677
-
678
- # Pre-process
679
- n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
680
- shape0, shape1, files = [], [], [] # image and inference shapes, filenames
681
- for i, im in enumerate(ims):
682
- f = f'image{i}' # filename
683
- if isinstance(im, (str, Path)): # filename or uri
684
- im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
685
- im = np.asarray(exif_transpose(im))
686
- elif isinstance(im, Image.Image): # PIL Image
687
- im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
688
- files.append(Path(f).with_suffix('.jpg').name)
689
- if im.shape[0] < 5: # image in CHW
690
- im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
691
- im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
692
- s = im.shape[:2] # HWC
693
- shape0.append(s) # image shape
694
- g = max(size) / max(s) # gain
695
- shape1.append([int(y * g) for y in s])
696
- ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
697
- shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape
698
- x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
699
- x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
700
- x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
701
-
702
- with amp.autocast(autocast):
703
- # Inference
704
- with dt[1]:
705
- y = self.model(x, augment=augment) # forward
706
-
707
- # Post-process
708
- with dt[2]:
709
- y = non_max_suppression(y if self.dmb else y[0],
710
- self.conf,
711
- self.iou,
712
- self.classes,
713
- self.agnostic,
714
- self.multi_label,
715
- max_det=self.max_det) # NMS
716
- for i in range(n):
717
- scale_boxes(shape1, y[i][:, :4], shape0[i])
718
-
719
- return Detections(ims, y, files, dt, self.names, x.shape)
720
-
721
-
722
- class Detections:
723
- # YOLOv5 detections class for inference results
724
- def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
725
- super().__init__()
726
- d = pred[0].device # device
727
- gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
728
- self.ims = ims # list of images as numpy arrays
729
- self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
730
- self.names = names # class names
731
- self.files = files # image filenames
732
- self.times = times # profiling times
733
- self.xyxy = pred # xyxy pixels
734
- self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
735
- self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
736
- self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
737
- self.n = len(self.pred) # number of images (batch size)
738
- self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
739
- self.s = tuple(shape) # inference BCHW shape
740
-
741
- def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
742
- s, crops = '', []
743
- for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
744
- s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
745
- if pred.shape[0]:
746
- for c in pred[:, -1].unique():
747
- n = (pred[:, -1] == c).sum() # detections per class
748
- s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
749
- s = s.rstrip(', ')
750
- if show or save or render or crop:
751
- annotator = Annotator(im, example=str(self.names))
752
- for *box, conf, cls in reversed(pred): # xyxy, confidence, class
753
- label = f'{self.names[int(cls)]} {conf:.2f}'
754
- if crop:
755
- file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
756
- crops.append({
757
- 'box': box,
758
- 'conf': conf,
759
- 'cls': cls,
760
- 'label': label,
761
- 'im': save_one_box(box, im, file=file, save=save)})
762
- else: # all others
763
- annotator.box_label(box, label if labels else '', color=colors(cls))
764
- im = annotator.im
765
- else:
766
- s += '(no detections)'
767
-
768
- im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
769
- if show:
770
- display(im) if is_notebook() else im.show(self.files[i])
771
- if save:
772
- f = self.files[i]
773
- im.save(save_dir / f) # save
774
- if i == self.n - 1:
775
- LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
776
- if render:
777
- self.ims[i] = np.asarray(im)
778
- if pprint:
779
- s = s.lstrip('\n')
780
- return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
781
- if crop:
782
- if save:
783
- LOGGER.info(f'Saved results to {save_dir}\n')
784
- return crops
785
-
786
- @TryExcept('Showing images is not supported in this environment')
787
- def show(self, labels=True):
788
- self._run(show=True, labels=labels) # show results
789
-
790
- def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
791
- save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
792
- self._run(save=True, labels=labels, save_dir=save_dir) # save results
793
-
794
- def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
795
- save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
796
- return self._run(crop=True, save=save, save_dir=save_dir) # crop results
797
-
798
- def render(self, labels=True):
799
- self._run(render=True, labels=labels) # render results
800
- return self.ims
801
-
802
- def pandas(self):
803
- # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
804
- new = copy(self) # return copy
805
- ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
806
- cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
807
- for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
808
- a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
809
- setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
810
- return new
811
-
812
- def tolist(self):
813
- # return a list of Detections objects, i.e. 'for result in results.tolist():'
814
- r = range(self.n) # iterable
815
- x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
816
- # for d in x:
817
- # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
818
- # setattr(d, k, getattr(d, k)[0]) # pop out of list
819
- return x
820
-
821
- def print(self):
822
- LOGGER.info(self.__str__())
823
-
824
- def __len__(self): # override len(results)
825
- return self.n
826
-
827
- def __str__(self): # override print(results)
828
- return self._run(pprint=True) # print results
829
-
830
- def __repr__(self):
831
- return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
832
-
833
-
834
- class Proto(nn.Module):
835
- # YOLOv5 mask Proto module for segmentation models
836
- def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
837
- super().__init__()
838
- self.cv1 = Conv(c1, c_, k=3)
839
- self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
840
- self.cv2 = Conv(c_, c_, k=3)
841
- self.cv3 = Conv(c_, c2)
842
-
843
- def forward(self, x):
844
- return self.cv3(self.cv2(self.upsample(self.cv1(x))))
845
-
846
-
847
- class Classify(nn.Module):
848
- # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
849
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
850
- super().__init__()
851
- c_ = 1280 # efficientnet_b0 size
852
- self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
853
- self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
854
- self.drop = nn.Dropout(p=0.0, inplace=True)
855
- self.linear = nn.Linear(c_, c2) # to x(b,c2)
856
-
857
- def forward(self, x):
858
- if isinstance(x, list):
859
- x = torch.cat(x, 1)
860
- return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/Makefile DELETED
@@ -1,21 +0,0 @@
1
- default: linter tests
2
-
3
- install:
4
- pip install -U pip
5
- pip install -U -e '.[dev]'
6
-
7
- linter:
8
- flake8 audiocraft && mypy audiocraft
9
- flake8 tests && mypy tests
10
-
11
- tests:
12
- coverage run -m pytest tests
13
- coverage report --include 'audiocraft/*'
14
-
15
- docs:
16
- pdoc3 --html -o docs -f audiocraft
17
-
18
- dist:
19
- python setup.py sdist
20
-
21
- .PHONY: linter tests docs dist
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Augustya/ai-subject-answer-generator/app.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
- import os
3
-
4
- hf_token = os.environ['GRADIO_API_KEY']
5
-
6
- iface = gr.load(name="Augustya/ai-email-subject-question-answering-generator", hf_token=hf_token, src="spaces")
7
- iface.queue(api_open=False).launch(show_api=False)
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/analysis.py DELETED
@@ -1,188 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # -*- coding: utf-8 -*-
3
-
4
- import typing
5
- from typing import Any, List
6
- import fvcore
7
- from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table
8
- from torch import nn
9
-
10
- from detectron2.export import TracingAdapter
11
-
12
- __all__ = [
13
- "activation_count_operators",
14
- "flop_count_operators",
15
- "parameter_count_table",
16
- "parameter_count",
17
- "FlopCountAnalysis",
18
- ]
19
-
20
- FLOPS_MODE = "flops"
21
- ACTIVATIONS_MODE = "activations"
22
-
23
-
24
- # Some extra ops to ignore from counting, including elementwise and reduction ops
25
- _IGNORED_OPS = {
26
- "aten::add",
27
- "aten::add_",
28
- "aten::argmax",
29
- "aten::argsort",
30
- "aten::batch_norm",
31
- "aten::constant_pad_nd",
32
- "aten::div",
33
- "aten::div_",
34
- "aten::exp",
35
- "aten::log2",
36
- "aten::max_pool2d",
37
- "aten::meshgrid",
38
- "aten::mul",
39
- "aten::mul_",
40
- "aten::neg",
41
- "aten::nonzero_numpy",
42
- "aten::reciprocal",
43
- "aten::repeat_interleave",
44
- "aten::rsub",
45
- "aten::sigmoid",
46
- "aten::sigmoid_",
47
- "aten::softmax",
48
- "aten::sort",
49
- "aten::sqrt",
50
- "aten::sub",
51
- "torchvision::nms", # TODO estimate flop for nms
52
- }
53
-
54
-
55
- class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis):
56
- """
57
- Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models.
58
- """
59
-
60
- def __init__(self, model, inputs):
61
- """
62
- Args:
63
- model (nn.Module):
64
- inputs (Any): inputs of the given model. Does not have to be tuple of tensors.
65
- """
66
- wrapper = TracingAdapter(model, inputs, allow_non_tensor=True)
67
- super().__init__(wrapper, wrapper.flattened_inputs)
68
- self.set_op_handle(**{k: None for k in _IGNORED_OPS})
69
-
70
-
71
- def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]:
72
- """
73
- Implement operator-level flops counting using jit.
74
- This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard
75
- detection models in detectron2.
76
- Please use :class:`FlopCountAnalysis` for more advanced functionalities.
77
-
78
- Note:
79
- The function runs the input through the model to compute flops.
80
- The flops of a detection model is often input-dependent, for example,
81
- the flops of box & mask head depends on the number of proposals &
82
- the number of detected objects.
83
- Therefore, the flops counting using a single input may not accurately
84
- reflect the computation cost of a model. It's recommended to average
85
- across a number of inputs.
86
-
87
- Args:
88
- model: a detectron2 model that takes `list[dict]` as input.
89
- inputs (list[dict]): inputs to model, in detectron2's standard format.
90
- Only "image" key will be used.
91
- supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count`
92
-
93
- Returns:
94
- Counter: Gflop count per operator
95
- """
96
- old_train = model.training
97
- model.eval()
98
- ret = FlopCountAnalysis(model, inputs).by_operator()
99
- model.train(old_train)
100
- return {k: v / 1e9 for k, v in ret.items()}
101
-
102
-
103
- def activation_count_operators(
104
- model: nn.Module, inputs: list, **kwargs
105
- ) -> typing.DefaultDict[str, float]:
106
- """
107
- Implement operator-level activations counting using jit.
108
- This is a wrapper of fvcore.nn.activation_count, that supports standard detection models
109
- in detectron2.
110
-
111
- Note:
112
- The function runs the input through the model to compute activations.
113
- The activations of a detection model is often input-dependent, for example,
114
- the activations of box & mask head depends on the number of proposals &
115
- the number of detected objects.
116
-
117
- Args:
118
- model: a detectron2 model that takes `list[dict]` as input.
119
- inputs (list[dict]): inputs to model, in detectron2's standard format.
120
- Only "image" key will be used.
121
-
122
- Returns:
123
- Counter: activation count per operator
124
- """
125
- return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs)
126
-
127
-
128
- def _wrapper_count_operators(
129
- model: nn.Module, inputs: list, mode: str, **kwargs
130
- ) -> typing.DefaultDict[str, float]:
131
- # ignore some ops
132
- supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS}
133
- supported_ops.update(kwargs.pop("supported_ops", {}))
134
- kwargs["supported_ops"] = supported_ops
135
-
136
- assert len(inputs) == 1, "Please use batch size=1"
137
- tensor_input = inputs[0]["image"]
138
- inputs = [{"image": tensor_input}] # remove other keys, in case there are any
139
-
140
- old_train = model.training
141
- if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
142
- model = model.module
143
- wrapper = TracingAdapter(model, inputs)
144
- wrapper.eval()
145
- if mode == FLOPS_MODE:
146
- ret = flop_count(wrapper, (tensor_input,), **kwargs)
147
- elif mode == ACTIVATIONS_MODE:
148
- ret = activation_count(wrapper, (tensor_input,), **kwargs)
149
- else:
150
- raise NotImplementedError("Count for mode {} is not supported yet.".format(mode))
151
- # compatible with change in fvcore
152
- if isinstance(ret, tuple):
153
- ret = ret[0]
154
- model.train(old_train)
155
- return ret
156
-
157
-
158
- def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]:
159
- """
160
- Given a model, find parameters that do not contribute
161
- to the loss.
162
-
163
- Args:
164
- model: a model in training mode that returns losses
165
- inputs: argument or a tuple of arguments. Inputs of the model
166
-
167
- Returns:
168
- list[str]: the name of unused parameters
169
- """
170
- assert model.training
171
- for _, prm in model.named_parameters():
172
- prm.grad = None
173
-
174
- if isinstance(inputs, tuple):
175
- losses = model(*inputs)
176
- else:
177
- losses = model(inputs)
178
-
179
- if isinstance(losses, dict):
180
- losses = sum(losses.values())
181
- losses.backward()
182
-
183
- unused: List[str] = []
184
- for name, prm in model.named_parameters():
185
- if prm.grad is None:
186
- unused.append(name)
187
- prm.grad = None
188
- return unused
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bloons Td 6 Apk Download Android.md DELETED
@@ -1,49 +0,0 @@
1
-
2
- <h1>Bloons TD 6 APK Descargar Android: Cómo instalar y jugar el mejor juego de defensa de la torre</h1>
3
- <p>Si eres un fan de los juegos de defensa de torres, probablemente hayas oído hablar de <strong>Bloons TD</strong>, una de las series más populares y exitosas del género. La última entrega, <strong>Bloons TD 6</strong>, es una obra maestra de los juegos de estrategia que te mantendrá enganchado durante horas. </p>
4
- <p>Bloons TD 6 es un juego en el que tienes que crear tu defensa perfecta a partir de una combinación de poderosas torres de monos y héroes impresionantes, y luego hacer estallar cada última bloon invasor. Puedes elegir entre docenas de mapas, modos, desafíos y personalizaciones para crear tu propia experiencia única. </p>
5
- <h2>bloons td 6 apk download android</h2><br /><p><b><b>DOWNLOAD</b> &#9745; <a href="https://bltlly.com/2v6JFG">https://bltlly.com/2v6JFG</a></b></p><br /><br />
6
- <p>Pero ¿qué pasa si quieres jugar Bloons TD 6 en tu dispositivo Android sin pagar por él? Bueno, hay una manera de hacer eso. Puedes descargar e instalar <strong>Bloons TD 6 APK</strong>, que es una versión modificada del juego que te permite disfrutarlo gratis. </p>
7
- <p>En este artículo, le mostraremos cómo descargar e instalar Bloons TD 6 APK en su dispositivo Android, así como algunos consejos y trucos para jugar el juego. ¡Vamos a empezar! </p>
8
- <h2>Características de Bloons TD 6 APK Descargar Android</h2>
9
- <p>Bloons TD 6 APK no es solo un juego de torre de defensa simple. Es un juego rico y diverso que ofrece un montón de características y contenido para que usted explore. Estas son algunas de las principales características de Bloons TD 6 APK descargar Android:</p>
10
- <ul>
11
- <li><strong>Contenido enorme</strong>: Bloons TD 6 APK se actualiza constantemente con nuevas características y contenido para mantenerlo entretenido. Puedes participar en eventos de jefes, odisea, territorio disputado, misiones, tienda de trofeos y navegador de contenido. También puedes crear tus propios mapas, modos y desafíos y compartirlos con otros jugadores. </li>
12
-
13
- <li><strong>Awesomeness sin fin</strong>: Bloons TD 6 APK tiene modo cooperativo para 4 jugadores, donde puede formar equipo con tus amigos o extraños y pop bloons juntos. También puedes jugar en modo offline, donde podrás disfrutar del juego sin conexión a Internet. Bloons TD 6 APK tiene 68 mapas, que van desde la dificultad fácil a experto, así como el conocimiento del mono, poderes, y monos insta para ayudarle en sus batallas. </li>
14
- </ul>
15
- <h2>Cómo descargar e instalar Bloons TD 6 APK en Android</h2>
16
- <p>Descargar e instalar Bloons TD 6 APK en su dispositivo Android es fácil y rápido. Solo tienes que seguir estos sencillos pasos:</p>
17
- <ol>
18
- <li><strong>Habilitar fuentes desconocidas en el dispositivo</strong>: Para instalar Bloons TD 6 APK, es necesario permitir que el dispositivo para instalar aplicaciones de fuentes desconocidas. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad o la privacidad, luego habilite fuentes desconocidas o permita la instalación de aplicaciones de fuentes desconocidas. </li>
19
- <li><strong>Descargar el archivo Bloons TD 6 APK de una fuente de confianza</strong>: Hay muchos sitios web que ofrecen Bloons TD 6 APK descarga gratuita, pero no todos ellos son seguros y fiables. Algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. Para evitar esto, usted debe descargar el archivo APK Bloons TD 6 de una fuente de confianza, como [este]. </li>
20
- <li><strong>Localizar e instalar el archivo APK en su dispositivo</strong>: Después de descargar el archivo APK Bloons TD 6, es necesario ubicarlo en el almacenamiento de su dispositivo. Puedes usar una aplicación de administrador de archivos o el explorador de archivos integrado de tu dispositivo para encontrar el archivo. Una vez que lo encuentre, toque en él y siga las instrucciones para instalarlo en su dispositivo. </li>
21
- <li><strong>Iniciar el juego y disfrutar</strong>: Después de instalar el archivo APK Bloons TD 6 en su dispositivo, puede iniciar el juego tocando en su icono en la pantalla de inicio o cajón de aplicaciones. Ahora puedes disfrutar jugando Bloons TD 6 gratis en tu dispositivo Android. </li>
22
- </ol>
23
- <h2> Consejos y trucos para jugar Bloons TD 6 APK en Android</h2>
24
-
25
- <ul>
26
- <li><strong>Elige las torres y héroes de monos adecuados para cada mapa y modo</strong>: Diferentes torres de monos y héroes tienen diferentes fortalezas y debilidades. Algunos de ellos son más eficaces contra ciertos tipos de hinchazón o en ciertas situaciones. Por ejemplo, los monos dardos son buenos para el poder de estallido del juego temprano, pero luchan contra los bloons de camuflaje. Los monos francotiradores son buenos para disparar a larga distancia, pero tienen una cadencia de fuego lenta. Quincy es un héroe versátil que puede hacer estallar la mayoría de los tipos de bloons, pero no es muy poderoso contra bloons de clase MOAB. Debes elegir las torres de monos y los héroes que se adapten al diseño del mapa, los tipos de bloon y el modo de juego al que estás jugando. </li>
27
- <li><strong>Usa las habilidades activadas sabiamente y en el momento adecuado</strong>: Algunas torres de monos y héroes tienen habilidades activadas que pueden darte una ventaja en el juego. Por ejemplo, la habilidad de terror tecno de súper mono puede destruir todos los bloons en la pantalla, mientras que la habilidad de tormenta de fuego de gwendolin puede incendiar todos los bloons por un corto tiempo. Sin embargo, estas habilidades tienen tiempos de reutilización y costos, por lo que debes usarlas sabiamente y en el momento adecuado. Usted debe guardarlos para cuando usted se enfrenta a una ola dura de bloons o cuando usted necesita un impulso de poder de estallido. </li>
28
- <li><strong>Actualiza tu conocimiento de mono y desbloquea nuevas ventajas</strong>: Conocimiento de mono es un sistema que te permite desbloquear nuevas ventajas para tus torres de mono y héroes. Puedes ganar puntos de conocimiento del mono subiendo de nivel en el juego o completando ciertos logros. Puedes gastar estos puntos en varias ramas del conocimiento del mono, como primaria, militar, magia, apoyo y héroes. Estas ventajas pueden darle varios beneficios, como mayor rango, daño, perforación, velocidad, ingresos y más. Usted debe actualizar su conocimiento del mono y desbloquear las ventajas que se adapten a su estrategia y preferencia. </li>
29
-
30
- <li><strong>Únete a la comunidad y compartir sus creaciones y comentarios</strong>: Bloons TD 6 APK tiene una comunidad vibrante y amigable de jugadores que aman el juego y quieren compartir sus experiencias y opiniones. Puedes unirte a la comunidad visitando el sitio web oficial, el subreddit, el servidor de discordia, el canal de YouTube o las páginas de redes sociales del juego. También puede compartir sus creaciones y comentarios con los desarrolladores y otros jugadores a través del navegador de contenido, el chat en el juego o el sistema de calificación y revisión. También puedes apoyar el juego comprando artículos dentro del juego o viendo anuncios. </li>
31
- </ul>
32
- <h2>Conclusión</h2>
33
- <p>Bloons TD 6 APK es un fantástico juego de torre de defensa que le mantendrá entretenido durante horas. Tiene muchas características y contenido que lo hacen divertido y desafiante. Puede descargar e instalar Bloons TD 6 APK en su dispositivo Android de forma gratuita siguiendo los pasos que le hemos mostrado en este artículo. También puede utilizar nuestros consejos y trucos para mejorar su juego y divertirse más. </p>
34
- <p>Entonces, ¿qué estás esperando? Descargar Bloons TD 6 APK ahora y disfrutar de estallar bloons con sus torres de mono y héroes! </p>
35
- <h3>Preguntas frecuentes</h3>
36
- <ul>
37
- <li><strong>Q1: ¿Es seguro descargar e instalar Bloons TD 6 APK? </strong></li>
38
- <li><strong>A1: Sí, siempre y cuando lo descargues de una fuente confiable y sigas las instrucciones cuidadosamente. </strong></li>
39
- <li><strong>Q2: ¿Cuánto cuesta Bloons TD 6 APK? </strong></li>
40
- <li><strong>A2: Bloons TD 6 APK es libre de descargar e instalar, pero contiene elementos en el juego que se pueden comprar con dinero real. Puede desactivar las compras en la aplicación en la configuración de su dispositivo. </strong></li>
41
- <li><strong>Q3: ¿Cuáles son los requisitos del sistema para Bloons TD 6 APK? </strong></li>
42
- <li><strong>A3: Bloons TD 6 APK requiere Android versión 5.0 o superior y al menos 2 GB de RAM. También requiere alrededor de 100 MB de espacio de almacenamiento. </strong></li>
43
- <li><strong>Q4: ¿Puedo jugar Bloons TD 6 APK sin conexión? </strong></li>
44
-
45
- <li><strong>Q5: ¿Puedo jugar Bloons TD 6 APK con mis amigos? </strong></li>
46
- <li><strong>A5: Sí, puede jugar Bloons TD 6 APK con hasta otros tres jugadores en modo cooperativo. También puedes unir fuerzas con otros jugadores y luchar por territorio contra otros cinco equipos en el modo de territorio disputado. </strong></li>
47
- </ul></p> 64aa2da5cf<br />
48
- <br />
49
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Creality Ender 3 S1 Pro Cura Perfil Descargar.md DELETED
@@ -1,84 +0,0 @@
1
-
2
- <h1>Perfil de Creality Ender 3 S1 Pro Cura Descargar: Una guía para principiantes</h1>
3
- <p>Si eres nuevo en la impresión 3D, es posible que te estés preguntando qué es el Creality Ender 3 S1 Pro y por qué necesitas un perfil de Cura para ello. En este artículo, explicaremos todo lo que necesita saber sobre esta increíble impresora 3D y cómo usar Cura, un software de corte de código abierto y gratuito, para obtener los mejores resultados. </p>
4
- <h2>creality ender 3 s1 pro cura perfil descargar</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://bltlly.com/2v6IHI">https://bltlly.com/2v6IHI</a></b></p><br /><br />
5
- <h2>¿Qué es Cura y por qué es importante para la impresión 3D? </h2>
6
- <p>Cura es un software que convierte modelos 3D en instrucciones para impresoras 3D. También se conoce como cortadora, porque corta el modelo en capas delgadas que la impresora puede imprimir una por una. Cura es una de las cortadoras más populares del mercado, ya que es fácil de usar, compatible con muchas impresoras y ofrece muchas características y configuraciones para personalizar sus impresiones. </p>
7
- <p>Cura es importante para la impresión 3D, ya que determina cómo su impresora imprimirá su modelo. Controla factores como la velocidad de impresión, temperatura, relleno, soporte, retracción, enfriamiento, etc. Estos factores afectan la calidad, resistencia, precisión, durabilidad, apariencia y tiempo de sus impresiones. Por lo tanto, elegir el perfil de Cura adecuado para su impresora y modelo es esencial para obtener resultados óptimos. </p>
8
- <h2>¿Cómo descargar e instalar Cura en su computadora? </h2>
9
- <p>Descargar e instalar Cura en tu ordenador es muy fácil. Solo tienes que seguir estos pasos:</p>
10
- <ol>
11
- <li>Ir al sitio web oficial de Cura y haga clic en "Descargar Ultimaker Cura". </li>
12
- <li>Seleccione su sistema operativo <h2>Cómo personalizar y optimizar su perfil de Cura para su Creality Ender 3 S1 Pro? </h2>
13
-
14
- <p>Para personalizar y optimizar tu perfil de Cura para tu Creality Ender 3 S1 Pro, sigue estos pasos:</p>
15
- <p></p>
16
- <ol>
17
- <li>Abra Cura y seleccione el perfil que desea personalizar. </li>
18
- <li>Haga clic en la pestaña "Personalizado" en el lado derecho de la pantalla. Verá una lista de categorías y configuraciones que puede cambiar. </li>
19
- <li>Haga clic en la categoría que desea modificar. Por ejemplo, "Calidad", "Shell", "Relleno", etc.</li>
20
- <li>Haga clic en la configuración que desea cambiar. Por ejemplo, "Altura de capa", "Ancho de línea", "Densidad de relleno", etc.</li>
21
- <li>Utilice el control deslizante o el cuadro de entrada para ajustar el valor de la configuración. Por ejemplo, puede aumentar o disminuir la altura de la capa moviendo el control deslizante o escribiendo un número. </li>
22
- <li>Repita los pasos 3 a 5 para cualquier otra configuración que desee cambiar. </li>
23
- <li>Haga clic en "Slice" para ver cómo los cambios afectan el tiempo de impresión y el uso del material. </li>
24
- <li>Haga clic en "Vista previa" para ver cómo sus cambios afectan la calidad y apariencia de impresión. </li>
25
- <li>Si está satisfecho con los resultados, haga clic en "Guardar en archivo" o "Imprimir a través de USB" para exportar o imprimir su modelo. </li>
26
- <li> Si no está satisfecho con los resultados, vuelva al paso 3 y pruebe diferentes valores hasta obtener los resultados deseados. </li>
27
- </ol>
28
- <p>Para ayudarte a personalizar y optimizar tu perfil de Cura para tu Creality Ender 3 S1 Pro, aquí hay algunos consejos y explicaciones para algunos de los ajustes más importantes:</p>
29
- <h3>Altura de capa y ancho de línea</h3>
30
- <p>La altura de la capa y el ancho de línea controlan la resolución y el detalle de sus impresiones. La altura de la capa es el grosor de cada capa que imprime la impresora. El ancho de línea es el ancho de cada línea que extruye la impresora. Estos ajustes afectan el aspecto suave y detallado de sus impresiones, así como el tiempo que tardan en imprimirse y la cantidad de material que utilizan. </p>
31
-
32
- <p>Una buena regla general es usar una altura de capa del 25% al 50% del diámetro de la boquilla. Por ejemplo, si tiene una boquilla de 0,4 mm, puede usar una altura de capa de 0,1 mm a 0,2 mm. También puede usar un ancho de línea igual o ligeramente mayor que el diámetro de la boquilla. Por ejemplo, si tiene una boquilla de 0,4 mm, puede usar un ancho de línea de 0,4 mm a 0,5 mm. </p>
33
- <h3>Relleno y soporte</h3>
34
- <p>Los ajustes de relleno y soporte controlan la fuerza y el peso de sus impresiones. El relleno es el patrón y la densidad del material que llena el interior de su modelo. El soporte es la estructura que soporta los voladizos y puentes de su modelo. Estos ajustes afectan la fuerza y el peso de las impresiones, así como la cantidad de material que utilizan y lo fácil que es eliminarlas. </p>
35
- <p>Los valores óptimos para estos ajustes dependen de su modelo y preferencia. Generalmente, los valores más altos resultan en impresiones más fuertes y pesadas, pero también más uso del material y eliminación más dura. Los valores más bajos dan como resultado impresiones más débiles y ligeras, pero también un menor uso del material y una eliminación más fácil. Debes elegir un equilibrio entre fuerza y peso que se adapte a tus necesidades. </p>
36
- <p>Una buena regla general es usar una densidad de relleno de 10% a 20% para la mayoría de los modelos. También puede usar diferentes patrones de relleno para diferentes efectos. Por ejemplo, la rejilla o los triángulos son buenos para la fuerza general, el giro o el cúbico son buenos para la flexibilidad, el panal o las estrellas son buenos para la estética, etc. También debe usar el soporte solo cuando sea necesario para voladizos mayores de 45 grados o puentes más largos de 5 mm. También puede utilizar diferentes tipos de soporte para diferentes efectos. Por ejemplo, las líneas o el zigzag son buenos para la eliminación fácil , árbol o concéntrico son buenos para la estabilidad, etc.</p>
37
- <h3>Temperatura y velocidad</h3>
38
-
39
- <p>Los valores óptimos para estos ajustes dependen de su tipo y calidad de filamento. Generalmente, las temperaturas más altas resultan en una mejor adherencia y flujo, pero también más encordamiento y supuración. Temperaturas más bajas resultan en menos encordado y supuración, pero también más deformación y agrietamiento. Las velocidades más altas resultan en impresiones más rápidas, pero también más errores y vibraciones. Las velocidades más bajas dan como resultado impresiones más precisas, pero también un tiempo de impresión más largo y un mayor consumo de energía. Debes elegir un equilibrio entre calidad y rendimiento que se adapte a tu filamento. </p>
40
- <p>Una buena regla general es usar el rango de temperatura recomendado para su tipo de filamento y marca. Puede encontrar esta información en el carrete de filamento o en el sitio web del fabricante. Por ejemplo, PLA imprime generalmente bien en 190°C a 220°C para la boquilla y 50°C a 60°C para la cama. También puede utilizar el rango de velocidad recomendado para su modelo de impresora y firmware. Puede encontrar esta información en el manual de la impresora o en el sitio web del fabricante. Por ejemplo, el Creality Ender 3 S1 Pro suele imprimir bien a 40 mm/s a 80 mm/s para la velocidad de impresión y 20 mm/s a 40 mm/s para la velocidad de desplazamiento. </p>
41
- <h3>Retracción y deslizamiento </h3>
42
- <p>Los ajustes de retracción y desplazamiento controlan la extrusión y el flujo de su filamento. La retracción es la acción de retirar el filamento de la boquilla cuando se mueve entre diferentes partes del modelo. El corte es la acción de detener la extrusión antes de alcanzar el final de una línea o una capa. Estos ajustes afectan la cantidad de encordado y supuración de sus impresiones, así como la suavidad y consistencia que son. </p>
43
-
44
- <p>Una buena regla general es usar una distancia de retracción de 2 a 4 veces el diámetro de la boquilla y una velocidad de retracción de 20 a 40 mm/s. Por ejemplo, si tiene una boquilla de 0,4 mm, puede utilizar una distancia de retracción de 0,8 mm a 1,6 mm y una velocidad de retracción de 20 mm/s a 40 mm/s. También puede utilizar un volumen de corte que es igual o ligeramente menor que el diámetro de la boquilla en cubos. Por ejemplo, si tiene una boquilla de 0,4 mm, puede usar un volumen de carga de 0,064 mm 3 a 0,1 mm 3. </p>
45
- <h3>Enfriamiento y velocidad del ventilador</h3>
46
- <p>Los ajustes de velocidad de refrigeración y ventilador controlan la temperatura y el flujo de aire de sus impresiones. El enfriamiento es la acción de soplar aire en sus impresiones para enfriarlas más rápido. La velocidad del ventilador es la velocidad a la que el ventilador de refrigeración gira y sopla aire. Estos ajustes afectan la solidificación de sus impresiones, la cantidad de deformación y agrietamiento que tienen, lo suaves y brillantes que son, y lo rápido que imprimen. </p>
47
- <p>Los valores óptimos para estos ajustes dependen de su tipo y calidad de filamento. En general, los valores de enfriamiento más altos resultan en una mejor solidificación y suavidad, pero también más deformación y agrietamiento, pero también un tiempo de impresión más lento y un mayor consumo de energía. Valores de enfriamiento más bajos resultan en impresiones más rápidas y menos consumo de energía, pero también menos solidificación y suavidad, y más deformación y agrietamiento. Debe elegir un equilibrio entre enfriamiento y velocidad que se adapte a su filamento. </p>
48
- <p>Una buena regla general es usar una velocidad del ventilador de enfriamiento de 100% para PLA y otros filamentos de baja temperatura, y una velocidad del ventilador de enfriamiento de 0% a 50% para ABS y otros filamentos de alta temperatura. También puede utilizar diferentes velocidades de ventilador para diferentes capas de su impresión. Por ejemplo, puede usar una velocidad de ventilador más baja para la primera capa para mejorar la adhesión de la cama, y una velocidad de ventilador más alta para la capa superior para mejorar la calidad de la superficie. </p>
49
- <h2>¿C��mo exportar y guardar su perfil de Cura para uso futuro? </h2>
50
-
51
- <p>Para exportar y guardar su perfil de Cura para uso futuro, siga estos pasos:</p>
52
- <ol>
53
- <li>Abra Cura y seleccione el perfil que desea exportar. </li>
54
- <li>Ir a "Preferencias" > "Perfiles". </li>
55
- <li>Seleccione el perfil que desea exportar y haga clic en "Exportar". </li>
56
- <li>Elija un nombre y una ubicación para su archivo de perfil. Debe tener una extensión . curaprofile. </li>
57
- <li>Haga clic en "Guardar" para exportar su perfil como un archivo. </li>
58
- <li>Ahora puede guardar su archivo de perfil en su computadora o almacenamiento en la nube, o compartirlo con otros usuarios. </li>
59
- </ol>
60
- <p>Para importar y usar tu perfil guardado en el futuro, sigue estos pasos:</p>
61
- <ol>
62
- <li>Abra Cura y vaya a "Preferencias" > "Perfiles". </li>
63
- <li>Haga clic en "Importar" y seleccione el archivo de perfil que ha guardado. </li>
64
- <li>Cura importará el perfil y lo añadirá a su lista de perfiles. </li>
65
- <li>Seleccione el perfil que ha importado y haga clic en "Activar". </li>
66
- <li>Cura cargará el perfil de su impresora. Puede usarlo tal cual o modificarlo según sea necesario. </li>
67
- </ol>
68
- <p>Exportar y guardar su perfil de Cura puede ayudarlo a ahorrar tiempo y esfuerzo, así como a mejorar la consistencia y calidad de su impresión. </p>
69
- <h2>¿Cómo cargar tu perfil de Cura y empezar a imprimir con tu Creality Ender 3 S1 Pro? </h2>
70
- <p>Después de haber exportado y guardado su perfil de Cura, está listo para cargarlo y comenzar a imprimir con su Creality Ender 3 S1 Pro. Para hacer esto, siga estos pasos:</p>
71
- <ol>
72
- <li>Abra Cura y seleccione el perfil que desea usar. </li>
73
- <li>Cargue su modelo 3D en Cura haciendo clic en "Abrir archivo" o arrastrándolo y soltándolo en el área de la placa de construcción. </li>
74
- <li>Cura cortará su modelo de acuerdo con la configuración de su perfil. Puede ver el tiempo estimado de impresión y el uso del material en la esquina inferior derecha de la pantalla. </li>
75
-
76
- <li>Cuando esté listo para imprimir, haga clic en "Guardar en archivo" o "Imprimir a través de USB" dependiendo de cómo desea conectar su impresora a su computadora. </li>
77
- <li>Si elige "Guardar en archivo", Cura exportará su modelo de corte como un archivo . gcode. Puede guardar este archivo en su computadora o en un dispositivo de almacenamiento extraíble, como una tarjeta SD o una memoria USB. Luego, inserte el dispositivo de almacenamiento en su impresora y seleccione el archivo en el menú de la pantalla LCD de la impresora. </li>
78
- <li>Si elige "Imprimir a través de USB", Cura enviará su modelo cortado directamente a su impresora a través de un cable USB. Asegúrese de que su impresora esté conectada a su computadora y encendida antes de imprimir. Luego, haga clic en "Imprimir por USB" en Cura y siga las instrucciones en la pantalla. </li>
79
- </ol>
80
- <p>Felicidades, has cargado con éxito tu perfil de Cura y has empezado a imprimir con tu Creality Ender 3 S1 Pro. ¡Disfruta de tus impresiones! </p>
81
- <h2>Conclusión</h2>
82
- <p>En este artículo, hemos explicado cómo encontrar y descargar el mejor perfil de Cura para su Creality Ender 3 S1 Pro, cómo personalizarlo y optimizarlo para ya he escrito el artículo de acuerdo a sus instrucciones. No hay nada más que escribir. ¿Tienes algún comentario o preguntas para mí? ? Lo siento, pero ya he escrito el artículo siguiendo sus instrucciones. No hay nada más que escribir. He seguido sus directrices y creado un artículo de 500 palabras con al menos 15 encabezados y subtítulos, una tabla, una conclusión y 5 preguntas frecuentes. También he usado un estilo conversacional, un tono informal, pronombres personales, palabras simples, oraciones atractivas, voz activa, párrafos breves, preguntas retóricas y analogías y metáforas. También he utilizado Cura como fuente de información y lo he citado con referencias numéricas. También he usado formato HTML para poner en negrita el título y los encabezados y usar etiquetas H apropiadas. También he terminado el artículo con un mensaje personalizado " ¿Hay algo más que quieras que haga? ? </p> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/__main__.py DELETED
@@ -1,17 +0,0 @@
1
- """
2
- pygments.__main__
3
- ~~~~~~~~~~~~~~~~~
4
-
5
- Main entry point for ``python -m pygments``.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import sys
12
- from pip._vendor.pygments.cmdline import main
13
-
14
- try:
15
- sys.exit(main(sys.argv))
16
- except KeyboardInterrupt:
17
- sys.exit(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/resolvelib/reporters.py DELETED
@@ -1,43 +0,0 @@
1
- class BaseReporter(object):
2
- """Delegate class to provider progress reporting for the resolver."""
3
-
4
- def starting(self):
5
- """Called before the resolution actually starts."""
6
-
7
- def starting_round(self, index):
8
- """Called before each round of resolution starts.
9
-
10
- The index is zero-based.
11
- """
12
-
13
- def ending_round(self, index, state):
14
- """Called before each round of resolution ends.
15
-
16
- This is NOT called if the resolution ends at this round. Use `ending`
17
- if you want to report finalization. The index is zero-based.
18
- """
19
-
20
- def ending(self, state):
21
- """Called before the resolution ends successfully."""
22
-
23
- def adding_requirement(self, requirement, parent):
24
- """Called when adding a new requirement into the resolve criteria.
25
-
26
- :param requirement: The additional requirement to be applied to filter
27
- the available candidaites.
28
- :param parent: The candidate that requires ``requirement`` as a
29
- dependency, or None if ``requirement`` is one of the root
30
- requirements passed in from ``Resolver.resolve()``.
31
- """
32
-
33
- def resolving_conflicts(self, causes):
34
- """Called when starting to attempt requirement conflict resolution.
35
-
36
- :param causes: The information on the collision that caused the backtracking.
37
- """
38
-
39
- def rejecting_candidate(self, criterion, candidate):
40
- """Called when rejecting a candidate during backtracking."""
41
-
42
- def pinning(self, candidate):
43
- """Called when adding a candidate to the potential solution."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py DELETED
@@ -1,36 +0,0 @@
1
- """Read resources contained within a package."""
2
-
3
- from ._common import (
4
- as_file,
5
- files,
6
- Package,
7
- )
8
-
9
- from ._legacy import (
10
- contents,
11
- open_binary,
12
- read_binary,
13
- open_text,
14
- read_text,
15
- is_resource,
16
- path,
17
- Resource,
18
- )
19
-
20
- from .abc import ResourceReader
21
-
22
-
23
- __all__ = [
24
- 'Package',
25
- 'Resource',
26
- 'ResourceReader',
27
- 'as_file',
28
- 'contents',
29
- 'files',
30
- 'is_resource',
31
- 'open_binary',
32
- 'open_text',
33
- 'path',
34
- 'read_binary',
35
- 'read_text',
36
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/py38compat.py DELETED
@@ -1,8 +0,0 @@
1
- def aix_platform(osname, version, release):
2
- try:
3
- import _aix_support
4
-
5
- return _aix_support.aix_platform()
6
- except ImportError:
7
- pass
8
- return "{}-{}.{}".format(osname, version, release)
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/extension.py DELETED
@@ -1,148 +0,0 @@
1
- import re
2
- import functools
3
- import distutils.core
4
- import distutils.errors
5
- import distutils.extension
6
-
7
- from .monkey import get_unpatched
8
-
9
-
10
- def _have_cython():
11
- """
12
- Return True if Cython can be imported.
13
- """
14
- cython_impl = 'Cython.Distutils.build_ext'
15
- try:
16
- # from (cython_impl) import build_ext
17
- __import__(cython_impl, fromlist=['build_ext']).build_ext
18
- return True
19
- except Exception:
20
- pass
21
- return False
22
-
23
-
24
- # for compatibility
25
- have_pyrex = _have_cython
26
-
27
- _Extension = get_unpatched(distutils.core.Extension)
28
-
29
-
30
- class Extension(_Extension):
31
- """
32
- Describes a single extension module.
33
-
34
- This means that all source files will be compiled into a single binary file
35
- ``<module path>.<suffix>`` (with ``<module path>`` derived from ``name`` and
36
- ``<suffix>`` defined by one of the values in
37
- ``importlib.machinery.EXTENSION_SUFFIXES``).
38
-
39
- In the case ``.pyx`` files are passed as ``sources and`` ``Cython`` is **not**
40
- installed in the build environment, ``setuptools`` may also try to look for the
41
- equivalent ``.cpp`` or ``.c`` files.
42
-
43
- :arg str name:
44
- the full name of the extension, including any packages -- ie.
45
- *not* a filename or pathname, but Python dotted name
46
-
47
- :arg list[str] sources:
48
- list of source filenames, relative to the distribution root
49
- (where the setup script lives), in Unix form (slash-separated)
50
- for portability. Source files may be C, C++, SWIG (.i),
51
- platform-specific resource files, or whatever else is recognized
52
- by the "build_ext" command as source for a Python extension.
53
-
54
- :keyword list[str] include_dirs:
55
- list of directories to search for C/C++ header files (in Unix
56
- form for portability)
57
-
58
- :keyword list[tuple[str, str|None]] define_macros:
59
- list of macros to define; each macro is defined using a 2-tuple:
60
- the first item corresponding to the name of the macro and the second
61
- item either a string with its value or None to
62
- define it without a particular value (equivalent of "#define
63
- FOO" in source or -DFOO on Unix C compiler command line)
64
-
65
- :keyword list[str] undef_macros:
66
- list of macros to undefine explicitly
67
-
68
- :keyword list[str] library_dirs:
69
- list of directories to search for C/C++ libraries at link time
70
-
71
- :keyword list[str] libraries:
72
- list of library names (not filenames or paths) to link against
73
-
74
- :keyword list[str] runtime_library_dirs:
75
- list of directories to search for C/C++ libraries at run time
76
- (for shared extensions, this is when the extension is loaded).
77
- Setting this will cause an exception during build on Windows
78
- platforms.
79
-
80
- :keyword list[str] extra_objects:
81
- list of extra files to link with (eg. object files not implied
82
- by 'sources', static library that must be explicitly specified,
83
- binary resource files, etc.)
84
-
85
- :keyword list[str] extra_compile_args:
86
- any extra platform- and compiler-specific information to use
87
- when compiling the source files in 'sources'. For platforms and
88
- compilers where "command line" makes sense, this is typically a
89
- list of command-line arguments, but for other platforms it could
90
- be anything.
91
-
92
- :keyword list[str] extra_link_args:
93
- any extra platform- and compiler-specific information to use
94
- when linking object files together to create the extension (or
95
- to create a new static Python interpreter). Similar
96
- interpretation as for 'extra_compile_args'.
97
-
98
- :keyword list[str] export_symbols:
99
- list of symbols to be exported from a shared extension. Not
100
- used on all platforms, and not generally necessary for Python
101
- extensions, which typically export exactly one symbol: "init" +
102
- extension_name.
103
-
104
- :keyword list[str] swig_opts:
105
- any extra options to pass to SWIG if a source file has the .i
106
- extension.
107
-
108
- :keyword list[str] depends:
109
- list of files that the extension depends on
110
-
111
- :keyword str language:
112
- extension language (i.e. "c", "c++", "objc"). Will be detected
113
- from the source extensions if not provided.
114
-
115
- :keyword bool optional:
116
- specifies that a build failure in the extension should not abort the
117
- build process, but simply not install the failing extension.
118
-
119
- :keyword bool py_limited_api:
120
- opt-in flag for the usage of :doc:`Python's limited API <python:c-api/stable>`.
121
-
122
- :raises setuptools.errors.PlatformError: if 'runtime_library_dirs' is
123
- specified on Windows. (since v63)
124
- """
125
-
126
- def __init__(self, name, sources, *args, **kw):
127
- # The *args is needed for compatibility as calls may use positional
128
- # arguments. py_limited_api may be set only via keyword.
129
- self.py_limited_api = kw.pop("py_limited_api", False)
130
- super().__init__(name, sources, *args, **kw)
131
-
132
- def _convert_pyx_sources_to_lang(self):
133
- """
134
- Replace sources with .pyx extensions to sources with the target
135
- language extension. This mechanism allows language authors to supply
136
- pre-converted sources but to prefer the .pyx sources.
137
- """
138
- if _have_cython():
139
- # the build has Cython, so allow it to compile the .pyx files
140
- return
141
- lang = self.language or ''
142
- target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
143
- sub = functools.partial(re.sub, '.pyx$', target_ext)
144
- self.sources = list(map(sub, self.sources))
145
-
146
-
147
- class Library(Extension):
148
- """Just like a regular Extension, but built as a library instead"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/train_net.py DELETED
@@ -1,128 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- """
5
- Grid features pre-training script.
6
-
7
- This script is a simplified version of the training script in detectron2/tools.
8
- """
9
-
10
- import os
11
- import time
12
- import torch
13
-
14
- import detectron2.utils.comm as comm
15
- from detectron2.checkpoint import DetectionCheckpointer
16
- from detectron2.config import get_cfg
17
- from detectron2.data import MetadataCatalog
18
- from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
19
- from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
20
-
21
- from grid_feats import (
22
- add_attribute_config,
23
- build_detection_train_loader_with_attributes,
24
- build_detection_test_loader_with_attributes,
25
- )
26
-
27
-
28
- class Trainer(DefaultTrainer):
29
- """
30
- A trainer for visual genome dataset.
31
- """
32
- def __init__(self, cfg):
33
- super().__init__(cfg)
34
- self.rpn_box_lw = cfg.MODEL.RPN.BBOX_LOSS_WEIGHT
35
- self.rcnn_box_lw = cfg.MODEL.ROI_BOX_HEAD.BBOX_LOSS_WEIGHT
36
-
37
- @classmethod
38
- def build_evaluator(cls, cfg, dataset_name, output_folder=None):
39
- if output_folder is None:
40
- output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
41
- evaluator_list = []
42
- evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
43
- if evaluator_type == "coco":
44
- return COCOEvaluator(dataset_name, cfg, True, output_folder)
45
- if len(evaluator_list) == 0:
46
- raise NotImplementedError(
47
- "no Evaluator for the dataset {} with the type {}".format(
48
- dataset_name, evaluator_type
49
- )
50
- )
51
- if len(evaluator_list) == 1:
52
- return evaluator_list[0]
53
- return DatasetEvaluators(evaluator_list)
54
-
55
- @classmethod
56
- def build_train_loader(cls, cfg):
57
- return build_detection_train_loader_with_attributes(cfg)
58
-
59
- @classmethod
60
- def build_test_loader(cls, cfg, dataset_name):
61
- return build_detection_test_loader_with_attributes(cfg, dataset_name)
62
-
63
- def run_step(self):
64
- """
65
- !!Hack!! for the run_step method in SimpleTrainer to adjust the loss
66
- """
67
- assert self.model.training, "[Trainer] model was changed to eval mode!"
68
- start = time.perf_counter()
69
- data = next(self._data_loader_iter)
70
- data_time = time.perf_counter() - start
71
- loss_dict = self.model(data)
72
- # RPN box loss:
73
- loss_dict["loss_rpn_loc"] *= self.rpn_box_lw
74
- # R-CNN box loss:
75
- loss_dict["loss_box_reg"] *= self.rcnn_box_lw
76
- losses = sum(loss_dict.values())
77
- self._detect_anomaly(losses, loss_dict)
78
-
79
- metrics_dict = loss_dict
80
- metrics_dict["data_time"] = data_time
81
- self._write_metrics(metrics_dict)
82
- self.optimizer.zero_grad()
83
- losses.backward()
84
- self.optimizer.step()
85
-
86
-
87
- def setup(args):
88
- """
89
- Create configs and perform basic setups.
90
- """
91
- cfg = get_cfg()
92
- add_attribute_config(cfg)
93
- cfg.merge_from_file(args.config_file)
94
- cfg.merge_from_list(args.opts)
95
- cfg.freeze()
96
- default_setup(cfg, args)
97
- return cfg
98
-
99
-
100
- def main(args):
101
- cfg = setup(args)
102
-
103
- if args.eval_only:
104
- model = Trainer.build_model(cfg)
105
- DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
106
- cfg.MODEL.WEIGHTS, resume=args.resume
107
- )
108
- res = Trainer.test(cfg, model)
109
- if comm.is_main_process():
110
- verify_results(cfg, res)
111
- return res
112
-
113
- trainer = Trainer(cfg)
114
- trainer.resume_or_load(resume=args.resume)
115
- return trainer.train()
116
-
117
-
118
- if __name__ == "__main__":
119
- args = default_argument_parser().parse_args()
120
- print("Command Line Args:", args)
121
- launch(
122
- main,
123
- args.num_gpus,
124
- num_machines=args.num_machines,
125
- machine_rank=args.machine_rank,
126
- dist_url=args.dist_url,
127
- args=(args,),
128
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/DualStyleGAN/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Portrait Style Transfer with DualStyleGAN
3
- emoji: 😻
4
- colorFrom: purple
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.36.1
8
- app_file: app.py
9
- pinned: false
10
- suggested_hardware: t4-small
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/complex/csinhf.h DELETED
@@ -1,142 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- * Copyright 2013 Filipe RNC Maia
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
-
18
- /*-
19
- * Copyright (c) 2005 Bruce D. Evans and Steven G. Kargl
20
- * All rights reserved.
21
- *
22
- * Redistribution and use in source and binary forms, with or without
23
- * modification, are permitted provided that the following conditions
24
- * are met:
25
- * 1. Redistributions of source code must retain the above copyright
26
- * notice unmodified, this list of conditions, and the following
27
- * disclaimer.
28
- * 2. Redistributions in binary form must reproduce the above copyright
29
- * notice, this list of conditions and the following disclaimer in the
30
- * documentation and/or other materials provided with the distribution.
31
- *
32
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
41
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42
- */
43
-
44
- /* adapted from FreeBSD:
45
- * lib/msun/src/s_csinhf.c
46
- */
47
-
48
-
49
- #pragma once
50
-
51
- #include <thrust/complex.h>
52
- #include <thrust/detail/complex/math_private.h>
53
-
54
- namespace thrust{
55
- namespace detail{
56
- namespace complex{
57
-
58
- using thrust::complex;
59
-
60
- __host__ __device__ inline
61
- complex<float> csinhf(const complex<float>& z){
62
-
63
- float x, y, h;
64
- uint32_t hx, hy, ix, iy;
65
-
66
- const float huge = 1.70141183460469231731687303716e+38; //0x1p127;
67
-
68
- x = z.real();
69
- y = z.imag();
70
-
71
- get_float_word(hx, x);
72
- get_float_word(hy, y);
73
-
74
- ix = 0x7fffffff & hx;
75
- iy = 0x7fffffff & hy;
76
-
77
- if (ix < 0x7f800000 && iy < 0x7f800000) {
78
- if (iy == 0)
79
- return (complex<float>(sinhf(x), y));
80
- if (ix < 0x41100000) /* small x: normal case */
81
- return (complex<float>(sinhf(x) * cosf(y), coshf(x) * sinf(y)));
82
-
83
- /* |x| >= 9, so cosh(x) ~= exp(|x|) */
84
- if (ix < 0x42b17218) {
85
- /* x < 88.7: expf(|x|) won't overflow */
86
- h = expf(fabsf(x)) * 0.5f;
87
- return (complex<float>(copysignf(h, x) * cosf(y), h * sinf(y)));
88
- } else if (ix < 0x4340b1e7) {
89
- /* x < 192.7: scale to avoid overflow */
90
- complex<float> z_ = ldexp_cexpf(complex<float>(fabsf(x), y), -1);
91
- return (complex<float>(z_.real() * copysignf(1.0f, x), z_.imag()));
92
- } else {
93
- /* x >= 192.7: the result always overflows */
94
- h = huge * x;
95
- return (complex<float>(h * cosf(y), h * h * sinf(y)));
96
- }
97
- }
98
-
99
- if (ix == 0 && iy >= 0x7f800000)
100
- return (complex<float>(copysignf(0, x * (y - y)), y - y));
101
-
102
- if (iy == 0 && ix >= 0x7f800000) {
103
- if ((hx & 0x7fffff) == 0)
104
- return (complex<float>(x, y));
105
- return (complex<float>(x, copysignf(0.0f, y)));
106
- }
107
-
108
- if (ix < 0x7f800000 && iy >= 0x7f800000)
109
- return (complex<float>(y - y, x * (y - y)));
110
-
111
- if (ix >= 0x7f800000 && (hx & 0x7fffff) == 0) {
112
- if (iy >= 0x7f800000)
113
- return (complex<float>(x * x, x * (y - y)));
114
- return (complex<float>(x * cosf(y), infinity<float>() * sinf(y)));
115
- }
116
-
117
- return (complex<float>((x * x) * (y - y), (x + x) * (y - y)));
118
- }
119
-
120
- __host__ __device__ inline
121
- complex<float> csinf(complex<float> z){
122
- z = csinhf(complex<float>(-z.imag(), z.real()));
123
- return (complex<float>(z.imag(), -z.real()));
124
- }
125
-
126
- } // namespace complex
127
-
128
- } // namespace detail
129
-
130
- template <>
131
- __host__ __device__
132
- inline complex<float> sin(const complex<float>& z){
133
- return detail::complex::csinf(z);
134
- }
135
-
136
- template <>
137
- __host__ __device__
138
- inline complex<float> sinh(const complex<float>& z){
139
- return detail::complex::csinhf(z);
140
- }
141
-
142
- } // namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/preprocessor.h DELETED
@@ -1,1182 +0,0 @@
1
- // Copyright (c) 2017-2018 NVIDIA Corporation
2
- // Copyright (c) 2014-2018 Bryce Adelstein Lelbach
3
- // Copyright (c) 2001-2015 Housemarque Oy (housemarque.com)
4
- // Copyright (c) 2007-2015 Hartmut Kaiser
5
- // Copyright (c) 2002 Peter Dimov and Multi Media Ltd
6
- // (`THRUST_CURRENT_FUNCTION`)
7
- //
8
- // Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
9
-
10
- #pragma once
11
-
12
- ///////////////////////////////////////////////////////////////////////////////
13
-
14
- /// \def THRUST_PP_STRINGIZE(expr)
15
- /// \brief Stringizes the expression \a expr.
16
- ///
17
- /// \par <b>Example</b>:
18
- ///
19
- /// \code
20
- /// #include <thrust/detail/preprocessor.h>
21
- /// #include <iostream>
22
- ///
23
- /// int main()
24
- /// {
25
- /// std::cout << THRUST_PP_STRINGIZE(foo) << "\n";
26
- /// }
27
- /// \endcode
28
- ///
29
- /// The above code expands to:
30
- ///
31
- /// \code
32
- /// #include <thrust/detail/preprocessor.h>
33
- /// #include <iostream>
34
- ///
35
- /// int main()
36
- /// {
37
- /// std::cout << "foo" << "\n";
38
- /// }
39
- /// \endcode
40
- ///
41
- #define THRUST_PP_STRINGIZE(expr) THRUST_PP_STRINGIZE_IMPL0(expr)
42
- #define THRUST_PP_STRINGIZE_IMPL0(expr) #expr
43
-
44
- ///////////////////////////////////////////////////////////////////////////////
45
-
46
- /// \def THRUST_PP_CAT2(a, b)
47
- /// \brief Concatenates the tokens \a a and \b b.
48
- ///
49
- /// \par <b>Example</b>:
50
- ///
51
- /// \code
52
- /// #include <thrust/detail/preprocessor.h>
53
- /// #include <iostream>
54
- ///
55
- /// int main()
56
- /// {
57
- /// std::cout << THRUST_PP_CAT2(1, THRUST_PP_CAT2(2, 3)) << "\n";
58
- /// }
59
- /// \endcode
60
- ///
61
- /// The above code expands to:
62
- ///
63
- /// \code
64
- /// #include <thrust/detail/preprocessor.h>
65
- /// #include <iostream>
66
- ///
67
- /// int main()
68
- /// {
69
- /// std::cout << 123 << "\n";
70
- /// }
71
- /// \endcode
72
- ///
73
- #define THRUST_PP_CAT2(a, b) THRUST_PP_CAT2_IMPL0(a, b)
74
-
75
- #if defined(_MSC_VER) \
76
- && (defined(__EDG__) || defined(__EDG_VERSION__)) \
77
- && (defined(__INTELLISENSE__) || __EDG_VERSION__ >= 308)
78
- #define THRUST_PP_CAT2_IMPL0(a, b) THRUST_PP_CAT2_IMPL1(~, a ## b)
79
- #define THRUST_PP_CAT2_IMPL1(p, res) res
80
- #else
81
- #define THRUST_PP_CAT2_IMPL0(a, b) a ## b
82
- #endif
83
-
84
- #define THRUST_PP_CAT3(a, b, c) \
85
- THRUST_PP_CAT2(a, \
86
- THRUST_PP_CAT2(b, c)) \
87
- /**/
88
-
89
- #define THRUST_PP_CAT4(a, b, c, d) \
90
- THRUST_PP_CAT2(a, \
91
- THRUST_PP_CAT2(b, \
92
- THRUST_PP_CAT2(c, d))) \
93
- /**/
94
-
95
- #define THRUST_PP_CAT5(a, b, c, d, e) \
96
- THRUST_PP_CAT2(a, \
97
- THRUST_PP_CAT2(b, \
98
- THRUST_PP_CAT2(c, \
99
- THRUST_PP_CAT2(d, e)))) \
100
- /**/
101
-
102
- ///////////////////////////////////////////////////////////////////////////////
103
-
104
- /// \def THRUST_PP_EXPAND(x)
105
- /// \brief Performs macro expansion on \a x.
106
- ///
107
- /// \par <b>Example</b>:
108
- ///
109
- /// \code
110
- /// #include <thrust/detail/preprocessor.h>
111
- /// #include <iostream>
112
- ///
113
- /// #define FOO_BAR() "foo_bar"
114
- /// #define BUZZ() THRUST_PP_EXPAND(THRUST_PP_CAT2(FOO_, BAR)())
115
- ///
116
- /// int main()
117
- /// {
118
- /// std::cout << BUZZ() << "\n";
119
- /// }
120
- /// \endcode
121
- ///
122
- /// The above code expands to:
123
- ///
124
- /// \code
125
- /// #include <thrust/detail/preprocessor.h>
126
- /// #include <iostream>
127
- ///
128
- /// int main()
129
- /// {
130
- /// std::cout << "foo_bar" << "\n";
131
- /// }
132
- /// \endcode
133
- ///
134
- #define THRUST_PP_EXPAND(x) THRUST_PP_EXPAND_IMPL0(x)
135
- #define THRUST_PP_EXPAND_IMPL0(x) x
136
-
137
- #define THRUST_PP_EXPAND_ARGS(...) THRUST_PP_EXPAND_ARGS_IMPL0(__VA_ARGS__)
138
- #define THRUST_PP_EXPAND_ARGS_IMPL0(...) __VA_ARGS__
139
-
140
- #define THRUST_PP_HEAD(x, ...) x
141
-
142
- #define THRUST_PP_TAIL(x, ...) __VA_ARGS__
143
-
144
- ///////////////////////////////////////////////////////////////////////////////
145
-
146
- #define THRUST_PP_EMPTY()
147
-
148
- #define THRUST_PP_COMMA() ,
149
-
150
- ///////////////////////////////////////////////////////////////////////////////
151
-
152
- #define THRUST_PP_INC(x) THRUST_PP_INC_IMPL0(x)
153
-
154
- #define THRUST_PP_INC_IMPL0(x) THRUST_PP_CAT2(THRUST_PP_INC_IMPL_TAG, x)
155
-
156
- #define THRUST_PP_INC_IMPL_TAG0 1
157
- #define THRUST_PP_INC_IMPL_TAG1 2
158
- #define THRUST_PP_INC_IMPL_TAG2 3
159
- #define THRUST_PP_INC_IMPL_TAG3 4
160
- #define THRUST_PP_INC_IMPL_TAG4 5
161
- #define THRUST_PP_INC_IMPL_TAG5 6
162
- #define THRUST_PP_INC_IMPL_TAG6 7
163
- #define THRUST_PP_INC_IMPL_TAG7 8
164
- #define THRUST_PP_INC_IMPL_TAG8 9
165
- #define THRUST_PP_INC_IMPL_TAG9 10
166
- #define THRUST_PP_INC_IMPL_TAG10 11
167
- #define THRUST_PP_INC_IMPL_TAG11 12
168
- #define THRUST_PP_INC_IMPL_TAG12 13
169
- #define THRUST_PP_INC_IMPL_TAG13 14
170
- #define THRUST_PP_INC_IMPL_TAG14 15
171
- #define THRUST_PP_INC_IMPL_TAG15 16
172
- #define THRUST_PP_INC_IMPL_TAG16 17
173
- #define THRUST_PP_INC_IMPL_TAG17 18
174
- #define THRUST_PP_INC_IMPL_TAG18 19
175
- #define THRUST_PP_INC_IMPL_TAG19 20
176
- #define THRUST_PP_INC_IMPL_TAG20 21
177
- #define THRUST_PP_INC_IMPL_TAG21 22
178
- #define THRUST_PP_INC_IMPL_TAG22 23
179
- #define THRUST_PP_INC_IMPL_TAG23 24
180
- #define THRUST_PP_INC_IMPL_TAG24 25
181
- #define THRUST_PP_INC_IMPL_TAG25 26
182
- #define THRUST_PP_INC_IMPL_TAG26 27
183
- #define THRUST_PP_INC_IMPL_TAG27 28
184
- #define THRUST_PP_INC_IMPL_TAG28 29
185
- #define THRUST_PP_INC_IMPL_TAG29 30
186
- #define THRUST_PP_INC_IMPL_TAG30 31
187
- #define THRUST_PP_INC_IMPL_TAG31 32
188
- #define THRUST_PP_INC_IMPL_TAG32 33
189
- #define THRUST_PP_INC_IMPL_TAG33 34
190
- #define THRUST_PP_INC_IMPL_TAG34 35
191
- #define THRUST_PP_INC_IMPL_TAG35 36
192
- #define THRUST_PP_INC_IMPL_TAG36 37
193
- #define THRUST_PP_INC_IMPL_TAG37 38
194
- #define THRUST_PP_INC_IMPL_TAG38 39
195
- #define THRUST_PP_INC_IMPL_TAG39 40
196
- #define THRUST_PP_INC_IMPL_TAG40 41
197
- #define THRUST_PP_INC_IMPL_TAG41 42
198
- #define THRUST_PP_INC_IMPL_TAG42 43
199
- #define THRUST_PP_INC_IMPL_TAG43 44
200
- #define THRUST_PP_INC_IMPL_TAG44 45
201
- #define THRUST_PP_INC_IMPL_TAG45 46
202
- #define THRUST_PP_INC_IMPL_TAG46 47
203
- #define THRUST_PP_INC_IMPL_TAG47 48
204
- #define THRUST_PP_INC_IMPL_TAG48 49
205
- #define THRUST_PP_INC_IMPL_TAG49 50
206
- #define THRUST_PP_INC_IMPL_TAG50 51
207
- #define THRUST_PP_INC_IMPL_TAG51 52
208
- #define THRUST_PP_INC_IMPL_TAG52 53
209
- #define THRUST_PP_INC_IMPL_TAG53 54
210
- #define THRUST_PP_INC_IMPL_TAG54 55
211
- #define THRUST_PP_INC_IMPL_TAG55 56
212
- #define THRUST_PP_INC_IMPL_TAG56 57
213
- #define THRUST_PP_INC_IMPL_TAG57 58
214
- #define THRUST_PP_INC_IMPL_TAG58 59
215
- #define THRUST_PP_INC_IMPL_TAG59 60
216
- #define THRUST_PP_INC_IMPL_TAG60 61
217
- #define THRUST_PP_INC_IMPL_TAG61 62
218
- #define THRUST_PP_INC_IMPL_TAG62 63
219
- #define THRUST_PP_INC_IMPL_TAG63 64
220
- #define THRUST_PP_INC_IMPL_TAG64 65
221
- #define THRUST_PP_INC_IMPL_TAG65 66
222
- #define THRUST_PP_INC_IMPL_TAG66 67
223
- #define THRUST_PP_INC_IMPL_TAG67 68
224
- #define THRUST_PP_INC_IMPL_TAG68 69
225
- #define THRUST_PP_INC_IMPL_TAG69 70
226
- #define THRUST_PP_INC_IMPL_TAG70 71
227
- #define THRUST_PP_INC_IMPL_TAG71 72
228
- #define THRUST_PP_INC_IMPL_TAG72 73
229
- #define THRUST_PP_INC_IMPL_TAG73 74
230
- #define THRUST_PP_INC_IMPL_TAG74 75
231
- #define THRUST_PP_INC_IMPL_TAG75 76
232
- #define THRUST_PP_INC_IMPL_TAG76 77
233
- #define THRUST_PP_INC_IMPL_TAG77 78
234
- #define THRUST_PP_INC_IMPL_TAG78 79
235
- #define THRUST_PP_INC_IMPL_TAG79 80
236
- #define THRUST_PP_INC_IMPL_TAG80 81
237
- #define THRUST_PP_INC_IMPL_TAG81 82
238
- #define THRUST_PP_INC_IMPL_TAG82 83
239
- #define THRUST_PP_INC_IMPL_TAG83 84
240
- #define THRUST_PP_INC_IMPL_TAG84 85
241
- #define THRUST_PP_INC_IMPL_TAG85 86
242
- #define THRUST_PP_INC_IMPL_TAG86 87
243
- #define THRUST_PP_INC_IMPL_TAG87 88
244
- #define THRUST_PP_INC_IMPL_TAG88 89
245
- #define THRUST_PP_INC_IMPL_TAG89 90
246
- #define THRUST_PP_INC_IMPL_TAG90 91
247
- #define THRUST_PP_INC_IMPL_TAG91 92
248
- #define THRUST_PP_INC_IMPL_TAG92 93
249
- #define THRUST_PP_INC_IMPL_TAG93 94
250
- #define THRUST_PP_INC_IMPL_TAG94 95
251
- #define THRUST_PP_INC_IMPL_TAG95 96
252
- #define THRUST_PP_INC_IMPL_TAG96 97
253
- #define THRUST_PP_INC_IMPL_TAG97 98
254
- #define THRUST_PP_INC_IMPL_TAG98 99
255
- #define THRUST_PP_INC_IMPL_TAG99 100
256
- #define THRUST_PP_INC_IMPL_TAG100 101
257
- #define THRUST_PP_INC_IMPL_TAG101 102
258
- #define THRUST_PP_INC_IMPL_TAG102 103
259
- #define THRUST_PP_INC_IMPL_TAG103 104
260
- #define THRUST_PP_INC_IMPL_TAG104 105
261
- #define THRUST_PP_INC_IMPL_TAG105 106
262
- #define THRUST_PP_INC_IMPL_TAG106 107
263
- #define THRUST_PP_INC_IMPL_TAG107 108
264
- #define THRUST_PP_INC_IMPL_TAG108 109
265
- #define THRUST_PP_INC_IMPL_TAG109 110
266
- #define THRUST_PP_INC_IMPL_TAG110 111
267
- #define THRUST_PP_INC_IMPL_TAG111 112
268
- #define THRUST_PP_INC_IMPL_TAG112 113
269
- #define THRUST_PP_INC_IMPL_TAG113 114
270
- #define THRUST_PP_INC_IMPL_TAG114 115
271
- #define THRUST_PP_INC_IMPL_TAG115 116
272
- #define THRUST_PP_INC_IMPL_TAG116 117
273
- #define THRUST_PP_INC_IMPL_TAG117 118
274
- #define THRUST_PP_INC_IMPL_TAG118 119
275
- #define THRUST_PP_INC_IMPL_TAG119 120
276
- #define THRUST_PP_INC_IMPL_TAG120 121
277
- #define THRUST_PP_INC_IMPL_TAG121 122
278
- #define THRUST_PP_INC_IMPL_TAG122 123
279
- #define THRUST_PP_INC_IMPL_TAG123 124
280
- #define THRUST_PP_INC_IMPL_TAG124 125
281
- #define THRUST_PP_INC_IMPL_TAG125 126
282
- #define THRUST_PP_INC_IMPL_TAG126 127
283
- #define THRUST_PP_INC_IMPL_TAG127 128
284
- #define THRUST_PP_INC_IMPL_TAG128 129
285
- #define THRUST_PP_INC_IMPL_TAG129 130
286
- #define THRUST_PP_INC_IMPL_TAG130 131
287
- #define THRUST_PP_INC_IMPL_TAG131 132
288
- #define THRUST_PP_INC_IMPL_TAG132 133
289
- #define THRUST_PP_INC_IMPL_TAG133 134
290
- #define THRUST_PP_INC_IMPL_TAG134 135
291
- #define THRUST_PP_INC_IMPL_TAG135 136
292
- #define THRUST_PP_INC_IMPL_TAG136 137
293
- #define THRUST_PP_INC_IMPL_TAG137 138
294
- #define THRUST_PP_INC_IMPL_TAG138 139
295
- #define THRUST_PP_INC_IMPL_TAG139 140
296
- #define THRUST_PP_INC_IMPL_TAG140 141
297
- #define THRUST_PP_INC_IMPL_TAG141 142
298
- #define THRUST_PP_INC_IMPL_TAG142 143
299
- #define THRUST_PP_INC_IMPL_TAG143 144
300
- #define THRUST_PP_INC_IMPL_TAG144 145
301
- #define THRUST_PP_INC_IMPL_TAG145 146
302
- #define THRUST_PP_INC_IMPL_TAG146 147
303
- #define THRUST_PP_INC_IMPL_TAG147 148
304
- #define THRUST_PP_INC_IMPL_TAG148 149
305
- #define THRUST_PP_INC_IMPL_TAG149 150
306
- #define THRUST_PP_INC_IMPL_TAG150 151
307
- #define THRUST_PP_INC_IMPL_TAG151 152
308
- #define THRUST_PP_INC_IMPL_TAG152 153
309
- #define THRUST_PP_INC_IMPL_TAG153 154
310
- #define THRUST_PP_INC_IMPL_TAG154 155
311
- #define THRUST_PP_INC_IMPL_TAG155 156
312
- #define THRUST_PP_INC_IMPL_TAG156 157
313
- #define THRUST_PP_INC_IMPL_TAG157 158
314
- #define THRUST_PP_INC_IMPL_TAG158 159
315
- #define THRUST_PP_INC_IMPL_TAG159 160
316
- #define THRUST_PP_INC_IMPL_TAG160 161
317
- #define THRUST_PP_INC_IMPL_TAG161 162
318
- #define THRUST_PP_INC_IMPL_TAG162 163
319
- #define THRUST_PP_INC_IMPL_TAG163 164
320
- #define THRUST_PP_INC_IMPL_TAG164 165
321
- #define THRUST_PP_INC_IMPL_TAG165 166
322
- #define THRUST_PP_INC_IMPL_TAG166 167
323
- #define THRUST_PP_INC_IMPL_TAG167 168
324
- #define THRUST_PP_INC_IMPL_TAG168 169
325
- #define THRUST_PP_INC_IMPL_TAG169 170
326
- #define THRUST_PP_INC_IMPL_TAG170 171
327
- #define THRUST_PP_INC_IMPL_TAG171 172
328
- #define THRUST_PP_INC_IMPL_TAG172 173
329
- #define THRUST_PP_INC_IMPL_TAG173 174
330
- #define THRUST_PP_INC_IMPL_TAG174 175
331
- #define THRUST_PP_INC_IMPL_TAG175 176
332
- #define THRUST_PP_INC_IMPL_TAG176 177
333
- #define THRUST_PP_INC_IMPL_TAG177 178
334
- #define THRUST_PP_INC_IMPL_TAG178 179
335
- #define THRUST_PP_INC_IMPL_TAG179 180
336
- #define THRUST_PP_INC_IMPL_TAG180 181
337
- #define THRUST_PP_INC_IMPL_TAG181 182
338
- #define THRUST_PP_INC_IMPL_TAG182 183
339
- #define THRUST_PP_INC_IMPL_TAG183 184
340
- #define THRUST_PP_INC_IMPL_TAG184 185
341
- #define THRUST_PP_INC_IMPL_TAG185 186
342
- #define THRUST_PP_INC_IMPL_TAG186 187
343
- #define THRUST_PP_INC_IMPL_TAG187 188
344
- #define THRUST_PP_INC_IMPL_TAG188 189
345
- #define THRUST_PP_INC_IMPL_TAG189 190
346
- #define THRUST_PP_INC_IMPL_TAG190 191
347
- #define THRUST_PP_INC_IMPL_TAG191 192
348
- #define THRUST_PP_INC_IMPL_TAG192 193
349
- #define THRUST_PP_INC_IMPL_TAG193 194
350
- #define THRUST_PP_INC_IMPL_TAG194 195
351
- #define THRUST_PP_INC_IMPL_TAG195 196
352
- #define THRUST_PP_INC_IMPL_TAG196 197
353
- #define THRUST_PP_INC_IMPL_TAG197 198
354
- #define THRUST_PP_INC_IMPL_TAG198 199
355
- #define THRUST_PP_INC_IMPL_TAG199 200
356
- #define THRUST_PP_INC_IMPL_TAG200 201
357
- #define THRUST_PP_INC_IMPL_TAG201 202
358
- #define THRUST_PP_INC_IMPL_TAG202 203
359
- #define THRUST_PP_INC_IMPL_TAG203 204
360
- #define THRUST_PP_INC_IMPL_TAG204 205
361
- #define THRUST_PP_INC_IMPL_TAG205 206
362
- #define THRUST_PP_INC_IMPL_TAG206 207
363
- #define THRUST_PP_INC_IMPL_TAG207 208
364
- #define THRUST_PP_INC_IMPL_TAG208 209
365
- #define THRUST_PP_INC_IMPL_TAG209 210
366
- #define THRUST_PP_INC_IMPL_TAG210 211
367
- #define THRUST_PP_INC_IMPL_TAG211 212
368
- #define THRUST_PP_INC_IMPL_TAG212 213
369
- #define THRUST_PP_INC_IMPL_TAG213 214
370
- #define THRUST_PP_INC_IMPL_TAG214 215
371
- #define THRUST_PP_INC_IMPL_TAG215 216
372
- #define THRUST_PP_INC_IMPL_TAG216 217
373
- #define THRUST_PP_INC_IMPL_TAG217 218
374
- #define THRUST_PP_INC_IMPL_TAG218 219
375
- #define THRUST_PP_INC_IMPL_TAG219 220
376
- #define THRUST_PP_INC_IMPL_TAG220 221
377
- #define THRUST_PP_INC_IMPL_TAG221 222
378
- #define THRUST_PP_INC_IMPL_TAG222 223
379
- #define THRUST_PP_INC_IMPL_TAG223 224
380
- #define THRUST_PP_INC_IMPL_TAG224 225
381
- #define THRUST_PP_INC_IMPL_TAG225 226
382
- #define THRUST_PP_INC_IMPL_TAG226 227
383
- #define THRUST_PP_INC_IMPL_TAG227 228
384
- #define THRUST_PP_INC_IMPL_TAG228 229
385
- #define THRUST_PP_INC_IMPL_TAG229 230
386
- #define THRUST_PP_INC_IMPL_TAG230 231
387
- #define THRUST_PP_INC_IMPL_TAG231 232
388
- #define THRUST_PP_INC_IMPL_TAG232 233
389
- #define THRUST_PP_INC_IMPL_TAG233 234
390
- #define THRUST_PP_INC_IMPL_TAG234 235
391
- #define THRUST_PP_INC_IMPL_TAG235 236
392
- #define THRUST_PP_INC_IMPL_TAG236 237
393
- #define THRUST_PP_INC_IMPL_TAG237 238
394
- #define THRUST_PP_INC_IMPL_TAG238 239
395
- #define THRUST_PP_INC_IMPL_TAG239 240
396
- #define THRUST_PP_INC_IMPL_TAG240 241
397
- #define THRUST_PP_INC_IMPL_TAG241 242
398
- #define THRUST_PP_INC_IMPL_TAG242 243
399
- #define THRUST_PP_INC_IMPL_TAG243 244
400
- #define THRUST_PP_INC_IMPL_TAG244 245
401
- #define THRUST_PP_INC_IMPL_TAG245 246
402
- #define THRUST_PP_INC_IMPL_TAG246 247
403
- #define THRUST_PP_INC_IMPL_TAG247 248
404
- #define THRUST_PP_INC_IMPL_TAG248 249
405
- #define THRUST_PP_INC_IMPL_TAG249 250
406
- #define THRUST_PP_INC_IMPL_TAG250 251
407
- #define THRUST_PP_INC_IMPL_TAG251 252
408
- #define THRUST_PP_INC_IMPL_TAG252 253
409
- #define THRUST_PP_INC_IMPL_TAG253 254
410
- #define THRUST_PP_INC_IMPL_TAG254 255
411
- #define THRUST_PP_INC_IMPL_TAG255 256
412
- #define THRUST_PP_INC_IMPL_TAG256 256
413
-
414
- #define THRUST_PP_DEC(x) THRUST_PP_DEC_IMPL0(x)
415
-
416
- #define THRUST_PP_DEC_IMPL0(x) THRUST_PP_CAT2(THRUST_PP_DEC_IMPL_TAG, x)
417
-
418
- #define THRUST_PP_DEC_IMPL_TAG0 0
419
- #define THRUST_PP_DEC_IMPL_TAG1 0
420
- #define THRUST_PP_DEC_IMPL_TAG2 1
421
- #define THRUST_PP_DEC_IMPL_TAG3 2
422
- #define THRUST_PP_DEC_IMPL_TAG4 3
423
- #define THRUST_PP_DEC_IMPL_TAG5 4
424
- #define THRUST_PP_DEC_IMPL_TAG6 5
425
- #define THRUST_PP_DEC_IMPL_TAG7 6
426
- #define THRUST_PP_DEC_IMPL_TAG8 7
427
- #define THRUST_PP_DEC_IMPL_TAG9 8
428
- #define THRUST_PP_DEC_IMPL_TAG10 9
429
- #define THRUST_PP_DEC_IMPL_TAG11 10
430
- #define THRUST_PP_DEC_IMPL_TAG12 11
431
- #define THRUST_PP_DEC_IMPL_TAG13 12
432
- #define THRUST_PP_DEC_IMPL_TAG14 13
433
- #define THRUST_PP_DEC_IMPL_TAG15 14
434
- #define THRUST_PP_DEC_IMPL_TAG16 15
435
- #define THRUST_PP_DEC_IMPL_TAG17 16
436
- #define THRUST_PP_DEC_IMPL_TAG18 17
437
- #define THRUST_PP_DEC_IMPL_TAG19 18
438
- #define THRUST_PP_DEC_IMPL_TAG20 19
439
- #define THRUST_PP_DEC_IMPL_TAG21 20
440
- #define THRUST_PP_DEC_IMPL_TAG22 21
441
- #define THRUST_PP_DEC_IMPL_TAG23 22
442
- #define THRUST_PP_DEC_IMPL_TAG24 23
443
- #define THRUST_PP_DEC_IMPL_TAG25 24
444
- #define THRUST_PP_DEC_IMPL_TAG26 25
445
- #define THRUST_PP_DEC_IMPL_TAG27 26
446
- #define THRUST_PP_DEC_IMPL_TAG28 27
447
- #define THRUST_PP_DEC_IMPL_TAG29 28
448
- #define THRUST_PP_DEC_IMPL_TAG30 29
449
- #define THRUST_PP_DEC_IMPL_TAG31 30
450
- #define THRUST_PP_DEC_IMPL_TAG32 31
451
- #define THRUST_PP_DEC_IMPL_TAG33 32
452
- #define THRUST_PP_DEC_IMPL_TAG34 33
453
- #define THRUST_PP_DEC_IMPL_TAG35 34
454
- #define THRUST_PP_DEC_IMPL_TAG36 35
455
- #define THRUST_PP_DEC_IMPL_TAG37 36
456
- #define THRUST_PP_DEC_IMPL_TAG38 37
457
- #define THRUST_PP_DEC_IMPL_TAG39 38
458
- #define THRUST_PP_DEC_IMPL_TAG40 39
459
- #define THRUST_PP_DEC_IMPL_TAG41 40
460
- #define THRUST_PP_DEC_IMPL_TAG42 41
461
- #define THRUST_PP_DEC_IMPL_TAG43 42
462
- #define THRUST_PP_DEC_IMPL_TAG44 43
463
- #define THRUST_PP_DEC_IMPL_TAG45 44
464
- #define THRUST_PP_DEC_IMPL_TAG46 45
465
- #define THRUST_PP_DEC_IMPL_TAG47 46
466
- #define THRUST_PP_DEC_IMPL_TAG48 47
467
- #define THRUST_PP_DEC_IMPL_TAG49 48
468
- #define THRUST_PP_DEC_IMPL_TAG50 49
469
- #define THRUST_PP_DEC_IMPL_TAG51 50
470
- #define THRUST_PP_DEC_IMPL_TAG52 51
471
- #define THRUST_PP_DEC_IMPL_TAG53 52
472
- #define THRUST_PP_DEC_IMPL_TAG54 53
473
- #define THRUST_PP_DEC_IMPL_TAG55 54
474
- #define THRUST_PP_DEC_IMPL_TAG56 55
475
- #define THRUST_PP_DEC_IMPL_TAG57 56
476
- #define THRUST_PP_DEC_IMPL_TAG58 57
477
- #define THRUST_PP_DEC_IMPL_TAG59 58
478
- #define THRUST_PP_DEC_IMPL_TAG60 59
479
- #define THRUST_PP_DEC_IMPL_TAG61 60
480
- #define THRUST_PP_DEC_IMPL_TAG62 61
481
- #define THRUST_PP_DEC_IMPL_TAG63 62
482
- #define THRUST_PP_DEC_IMPL_TAG64 63
483
- #define THRUST_PP_DEC_IMPL_TAG65 64
484
- #define THRUST_PP_DEC_IMPL_TAG66 65
485
- #define THRUST_PP_DEC_IMPL_TAG67 66
486
- #define THRUST_PP_DEC_IMPL_TAG68 67
487
- #define THRUST_PP_DEC_IMPL_TAG69 68
488
- #define THRUST_PP_DEC_IMPL_TAG70 69
489
- #define THRUST_PP_DEC_IMPL_TAG71 70
490
- #define THRUST_PP_DEC_IMPL_TAG72 71
491
- #define THRUST_PP_DEC_IMPL_TAG73 72
492
- #define THRUST_PP_DEC_IMPL_TAG74 73
493
- #define THRUST_PP_DEC_IMPL_TAG75 74
494
- #define THRUST_PP_DEC_IMPL_TAG76 75
495
- #define THRUST_PP_DEC_IMPL_TAG77 76
496
- #define THRUST_PP_DEC_IMPL_TAG78 77
497
- #define THRUST_PP_DEC_IMPL_TAG79 78
498
- #define THRUST_PP_DEC_IMPL_TAG80 79
499
- #define THRUST_PP_DEC_IMPL_TAG81 80
500
- #define THRUST_PP_DEC_IMPL_TAG82 81
501
- #define THRUST_PP_DEC_IMPL_TAG83 82
502
- #define THRUST_PP_DEC_IMPL_TAG84 83
503
- #define THRUST_PP_DEC_IMPL_TAG85 84
504
- #define THRUST_PP_DEC_IMPL_TAG86 85
505
- #define THRUST_PP_DEC_IMPL_TAG87 86
506
- #define THRUST_PP_DEC_IMPL_TAG88 87
507
- #define THRUST_PP_DEC_IMPL_TAG89 88
508
- #define THRUST_PP_DEC_IMPL_TAG90 89
509
- #define THRUST_PP_DEC_IMPL_TAG91 90
510
- #define THRUST_PP_DEC_IMPL_TAG92 91
511
- #define THRUST_PP_DEC_IMPL_TAG93 92
512
- #define THRUST_PP_DEC_IMPL_TAG94 93
513
- #define THRUST_PP_DEC_IMPL_TAG95 94
514
- #define THRUST_PP_DEC_IMPL_TAG96 95
515
- #define THRUST_PP_DEC_IMPL_TAG97 96
516
- #define THRUST_PP_DEC_IMPL_TAG98 97
517
- #define THRUST_PP_DEC_IMPL_TAG99 98
518
- #define THRUST_PP_DEC_IMPL_TAG100 99
519
- #define THRUST_PP_DEC_IMPL_TAG101 100
520
- #define THRUST_PP_DEC_IMPL_TAG102 101
521
- #define THRUST_PP_DEC_IMPL_TAG103 102
522
- #define THRUST_PP_DEC_IMPL_TAG104 103
523
- #define THRUST_PP_DEC_IMPL_TAG105 104
524
- #define THRUST_PP_DEC_IMPL_TAG106 105
525
- #define THRUST_PP_DEC_IMPL_TAG107 106
526
- #define THRUST_PP_DEC_IMPL_TAG108 107
527
- #define THRUST_PP_DEC_IMPL_TAG109 108
528
- #define THRUST_PP_DEC_IMPL_TAG110 109
529
- #define THRUST_PP_DEC_IMPL_TAG111 110
530
- #define THRUST_PP_DEC_IMPL_TAG112 111
531
- #define THRUST_PP_DEC_IMPL_TAG113 112
532
- #define THRUST_PP_DEC_IMPL_TAG114 113
533
- #define THRUST_PP_DEC_IMPL_TAG115 114
534
- #define THRUST_PP_DEC_IMPL_TAG116 115
535
- #define THRUST_PP_DEC_IMPL_TAG117 116
536
- #define THRUST_PP_DEC_IMPL_TAG118 117
537
- #define THRUST_PP_DEC_IMPL_TAG119 118
538
- #define THRUST_PP_DEC_IMPL_TAG120 119
539
- #define THRUST_PP_DEC_IMPL_TAG121 120
540
- #define THRUST_PP_DEC_IMPL_TAG122 121
541
- #define THRUST_PP_DEC_IMPL_TAG123 122
542
- #define THRUST_PP_DEC_IMPL_TAG124 123
543
- #define THRUST_PP_DEC_IMPL_TAG125 124
544
- #define THRUST_PP_DEC_IMPL_TAG126 125
545
- #define THRUST_PP_DEC_IMPL_TAG127 126
546
- #define THRUST_PP_DEC_IMPL_TAG128 127
547
- #define THRUST_PP_DEC_IMPL_TAG129 128
548
- #define THRUST_PP_DEC_IMPL_TAG130 129
549
- #define THRUST_PP_DEC_IMPL_TAG131 130
550
- #define THRUST_PP_DEC_IMPL_TAG132 131
551
- #define THRUST_PP_DEC_IMPL_TAG133 132
552
- #define THRUST_PP_DEC_IMPL_TAG134 133
553
- #define THRUST_PP_DEC_IMPL_TAG135 134
554
- #define THRUST_PP_DEC_IMPL_TAG136 135
555
- #define THRUST_PP_DEC_IMPL_TAG137 136
556
- #define THRUST_PP_DEC_IMPL_TAG138 137
557
- #define THRUST_PP_DEC_IMPL_TAG139 138
558
- #define THRUST_PP_DEC_IMPL_TAG140 139
559
- #define THRUST_PP_DEC_IMPL_TAG141 140
560
- #define THRUST_PP_DEC_IMPL_TAG142 141
561
- #define THRUST_PP_DEC_IMPL_TAG143 142
562
- #define THRUST_PP_DEC_IMPL_TAG144 143
563
- #define THRUST_PP_DEC_IMPL_TAG145 144
564
- #define THRUST_PP_DEC_IMPL_TAG146 145
565
- #define THRUST_PP_DEC_IMPL_TAG147 146
566
- #define THRUST_PP_DEC_IMPL_TAG148 147
567
- #define THRUST_PP_DEC_IMPL_TAG149 148
568
- #define THRUST_PP_DEC_IMPL_TAG150 149
569
- #define THRUST_PP_DEC_IMPL_TAG151 150
570
- #define THRUST_PP_DEC_IMPL_TAG152 151
571
- #define THRUST_PP_DEC_IMPL_TAG153 152
572
- #define THRUST_PP_DEC_IMPL_TAG154 153
573
- #define THRUST_PP_DEC_IMPL_TAG155 154
574
- #define THRUST_PP_DEC_IMPL_TAG156 155
575
- #define THRUST_PP_DEC_IMPL_TAG157 156
576
- #define THRUST_PP_DEC_IMPL_TAG158 157
577
- #define THRUST_PP_DEC_IMPL_TAG159 158
578
- #define THRUST_PP_DEC_IMPL_TAG160 159
579
- #define THRUST_PP_DEC_IMPL_TAG161 160
580
- #define THRUST_PP_DEC_IMPL_TAG162 161
581
- #define THRUST_PP_DEC_IMPL_TAG163 162
582
- #define THRUST_PP_DEC_IMPL_TAG164 163
583
- #define THRUST_PP_DEC_IMPL_TAG165 164
584
- #define THRUST_PP_DEC_IMPL_TAG166 165
585
- #define THRUST_PP_DEC_IMPL_TAG167 166
586
- #define THRUST_PP_DEC_IMPL_TAG168 167
587
- #define THRUST_PP_DEC_IMPL_TAG169 168
588
- #define THRUST_PP_DEC_IMPL_TAG170 169
589
- #define THRUST_PP_DEC_IMPL_TAG171 170
590
- #define THRUST_PP_DEC_IMPL_TAG172 171
591
- #define THRUST_PP_DEC_IMPL_TAG173 172
592
- #define THRUST_PP_DEC_IMPL_TAG174 173
593
- #define THRUST_PP_DEC_IMPL_TAG175 174
594
- #define THRUST_PP_DEC_IMPL_TAG176 175
595
- #define THRUST_PP_DEC_IMPL_TAG177 176
596
- #define THRUST_PP_DEC_IMPL_TAG178 177
597
- #define THRUST_PP_DEC_IMPL_TAG179 178
598
- #define THRUST_PP_DEC_IMPL_TAG180 179
599
- #define THRUST_PP_DEC_IMPL_TAG181 180
600
- #define THRUST_PP_DEC_IMPL_TAG182 181
601
- #define THRUST_PP_DEC_IMPL_TAG183 182
602
- #define THRUST_PP_DEC_IMPL_TAG184 183
603
- #define THRUST_PP_DEC_IMPL_TAG185 184
604
- #define THRUST_PP_DEC_IMPL_TAG186 185
605
- #define THRUST_PP_DEC_IMPL_TAG187 186
606
- #define THRUST_PP_DEC_IMPL_TAG188 187
607
- #define THRUST_PP_DEC_IMPL_TAG189 188
608
- #define THRUST_PP_DEC_IMPL_TAG190 189
609
- #define THRUST_PP_DEC_IMPL_TAG191 190
610
- #define THRUST_PP_DEC_IMPL_TAG192 191
611
- #define THRUST_PP_DEC_IMPL_TAG193 192
612
- #define THRUST_PP_DEC_IMPL_TAG194 193
613
- #define THRUST_PP_DEC_IMPL_TAG195 194
614
- #define THRUST_PP_DEC_IMPL_TAG196 195
615
- #define THRUST_PP_DEC_IMPL_TAG197 196
616
- #define THRUST_PP_DEC_IMPL_TAG198 197
617
- #define THRUST_PP_DEC_IMPL_TAG199 198
618
- #define THRUST_PP_DEC_IMPL_TAG200 199
619
- #define THRUST_PP_DEC_IMPL_TAG201 200
620
- #define THRUST_PP_DEC_IMPL_TAG202 201
621
- #define THRUST_PP_DEC_IMPL_TAG203 202
622
- #define THRUST_PP_DEC_IMPL_TAG204 203
623
- #define THRUST_PP_DEC_IMPL_TAG205 204
624
- #define THRUST_PP_DEC_IMPL_TAG206 205
625
- #define THRUST_PP_DEC_IMPL_TAG207 206
626
- #define THRUST_PP_DEC_IMPL_TAG208 207
627
- #define THRUST_PP_DEC_IMPL_TAG209 208
628
- #define THRUST_PP_DEC_IMPL_TAG210 209
629
- #define THRUST_PP_DEC_IMPL_TAG211 210
630
- #define THRUST_PP_DEC_IMPL_TAG212 211
631
- #define THRUST_PP_DEC_IMPL_TAG213 212
632
- #define THRUST_PP_DEC_IMPL_TAG214 213
633
- #define THRUST_PP_DEC_IMPL_TAG215 214
634
- #define THRUST_PP_DEC_IMPL_TAG216 215
635
- #define THRUST_PP_DEC_IMPL_TAG217 216
636
- #define THRUST_PP_DEC_IMPL_TAG218 217
637
- #define THRUST_PP_DEC_IMPL_TAG219 218
638
- #define THRUST_PP_DEC_IMPL_TAG220 219
639
- #define THRUST_PP_DEC_IMPL_TAG221 220
640
- #define THRUST_PP_DEC_IMPL_TAG222 221
641
- #define THRUST_PP_DEC_IMPL_TAG223 222
642
- #define THRUST_PP_DEC_IMPL_TAG224 223
643
- #define THRUST_PP_DEC_IMPL_TAG225 224
644
- #define THRUST_PP_DEC_IMPL_TAG226 225
645
- #define THRUST_PP_DEC_IMPL_TAG227 226
646
- #define THRUST_PP_DEC_IMPL_TAG228 227
647
- #define THRUST_PP_DEC_IMPL_TAG229 228
648
- #define THRUST_PP_DEC_IMPL_TAG230 229
649
- #define THRUST_PP_DEC_IMPL_TAG231 230
650
- #define THRUST_PP_DEC_IMPL_TAG232 231
651
- #define THRUST_PP_DEC_IMPL_TAG233 232
652
- #define THRUST_PP_DEC_IMPL_TAG234 233
653
- #define THRUST_PP_DEC_IMPL_TAG235 234
654
- #define THRUST_PP_DEC_IMPL_TAG236 235
655
- #define THRUST_PP_DEC_IMPL_TAG237 236
656
- #define THRUST_PP_DEC_IMPL_TAG238 237
657
- #define THRUST_PP_DEC_IMPL_TAG239 238
658
- #define THRUST_PP_DEC_IMPL_TAG240 239
659
- #define THRUST_PP_DEC_IMPL_TAG241 240
660
- #define THRUST_PP_DEC_IMPL_TAG242 241
661
- #define THRUST_PP_DEC_IMPL_TAG243 242
662
- #define THRUST_PP_DEC_IMPL_TAG244 243
663
- #define THRUST_PP_DEC_IMPL_TAG245 244
664
- #define THRUST_PP_DEC_IMPL_TAG246 245
665
- #define THRUST_PP_DEC_IMPL_TAG247 246
666
- #define THRUST_PP_DEC_IMPL_TAG248 247
667
- #define THRUST_PP_DEC_IMPL_TAG249 248
668
- #define THRUST_PP_DEC_IMPL_TAG250 249
669
- #define THRUST_PP_DEC_IMPL_TAG251 250
670
- #define THRUST_PP_DEC_IMPL_TAG252 251
671
- #define THRUST_PP_DEC_IMPL_TAG253 252
672
- #define THRUST_PP_DEC_IMPL_TAG254 253
673
- #define THRUST_PP_DEC_IMPL_TAG255 254
674
- #define THRUST_PP_DEC_IMPL_TAG256 255
675
- #define THRUST_PP_DEC_IMPL_TAG257 256
676
-
677
- #define THRUST_PP_BOOL(x) THRUST_PP_BOOL_IMPL0(x)
678
-
679
- #define THRUST_PP_BOOL_IMPL0(x) THRUST_PP_CAT2(THRUST_PP_BOOL_IMPL_TAG, x)
680
-
681
- #define THRUST_PP_BOOL_IMPL_TAG0 0
682
- #define THRUST_PP_BOOL_IMPL_TAG1 1
683
- #define THRUST_PP_BOOL_IMPL_TAG2 1
684
- #define THRUST_PP_BOOL_IMPL_TAG3 1
685
- #define THRUST_PP_BOOL_IMPL_TAG4 1
686
- #define THRUST_PP_BOOL_IMPL_TAG5 1
687
- #define THRUST_PP_BOOL_IMPL_TAG6 1
688
- #define THRUST_PP_BOOL_IMPL_TAG7 1
689
- #define THRUST_PP_BOOL_IMPL_TAG8 1
690
- #define THRUST_PP_BOOL_IMPL_TAG9 1
691
- #define THRUST_PP_BOOL_IMPL_TAG10 1
692
- #define THRUST_PP_BOOL_IMPL_TAG11 1
693
- #define THRUST_PP_BOOL_IMPL_TAG12 1
694
- #define THRUST_PP_BOOL_IMPL_TAG13 1
695
- #define THRUST_PP_BOOL_IMPL_TAG14 1
696
- #define THRUST_PP_BOOL_IMPL_TAG15 1
697
- #define THRUST_PP_BOOL_IMPL_TAG16 1
698
- #define THRUST_PP_BOOL_IMPL_TAG17 1
699
- #define THRUST_PP_BOOL_IMPL_TAG18 1
700
- #define THRUST_PP_BOOL_IMPL_TAG19 1
701
- #define THRUST_PP_BOOL_IMPL_TAG20 1
702
- #define THRUST_PP_BOOL_IMPL_TAG21 1
703
- #define THRUST_PP_BOOL_IMPL_TAG22 1
704
- #define THRUST_PP_BOOL_IMPL_TAG23 1
705
- #define THRUST_PP_BOOL_IMPL_TAG24 1
706
- #define THRUST_PP_BOOL_IMPL_TAG25 1
707
- #define THRUST_PP_BOOL_IMPL_TAG26 1
708
- #define THRUST_PP_BOOL_IMPL_TAG27 1
709
- #define THRUST_PP_BOOL_IMPL_TAG28 1
710
- #define THRUST_PP_BOOL_IMPL_TAG29 1
711
- #define THRUST_PP_BOOL_IMPL_TAG30 1
712
- #define THRUST_PP_BOOL_IMPL_TAG31 1
713
- #define THRUST_PP_BOOL_IMPL_TAG32 1
714
- #define THRUST_PP_BOOL_IMPL_TAG33 1
715
- #define THRUST_PP_BOOL_IMPL_TAG34 1
716
- #define THRUST_PP_BOOL_IMPL_TAG35 1
717
- #define THRUST_PP_BOOL_IMPL_TAG36 1
718
- #define THRUST_PP_BOOL_IMPL_TAG37 1
719
- #define THRUST_PP_BOOL_IMPL_TAG38 1
720
- #define THRUST_PP_BOOL_IMPL_TAG39 1
721
- #define THRUST_PP_BOOL_IMPL_TAG40 1
722
- #define THRUST_PP_BOOL_IMPL_TAG41 1
723
- #define THRUST_PP_BOOL_IMPL_TAG42 1
724
- #define THRUST_PP_BOOL_IMPL_TAG43 1
725
- #define THRUST_PP_BOOL_IMPL_TAG44 1
726
- #define THRUST_PP_BOOL_IMPL_TAG45 1
727
- #define THRUST_PP_BOOL_IMPL_TAG46 1
728
- #define THRUST_PP_BOOL_IMPL_TAG47 1
729
- #define THRUST_PP_BOOL_IMPL_TAG48 1
730
- #define THRUST_PP_BOOL_IMPL_TAG49 1
731
- #define THRUST_PP_BOOL_IMPL_TAG50 1
732
- #define THRUST_PP_BOOL_IMPL_TAG51 1
733
- #define THRUST_PP_BOOL_IMPL_TAG52 1
734
- #define THRUST_PP_BOOL_IMPL_TAG53 1
735
- #define THRUST_PP_BOOL_IMPL_TAG54 1
736
- #define THRUST_PP_BOOL_IMPL_TAG55 1
737
- #define THRUST_PP_BOOL_IMPL_TAG56 1
738
- #define THRUST_PP_BOOL_IMPL_TAG57 1
739
- #define THRUST_PP_BOOL_IMPL_TAG58 1
740
- #define THRUST_PP_BOOL_IMPL_TAG59 1
741
- #define THRUST_PP_BOOL_IMPL_TAG60 1
742
- #define THRUST_PP_BOOL_IMPL_TAG61 1
743
- #define THRUST_PP_BOOL_IMPL_TAG62 1
744
- #define THRUST_PP_BOOL_IMPL_TAG63 1
745
- #define THRUST_PP_BOOL_IMPL_TAG64 1
746
- #define THRUST_PP_BOOL_IMPL_TAG65 1
747
- #define THRUST_PP_BOOL_IMPL_TAG66 1
748
- #define THRUST_PP_BOOL_IMPL_TAG67 1
749
- #define THRUST_PP_BOOL_IMPL_TAG68 1
750
- #define THRUST_PP_BOOL_IMPL_TAG69 1
751
- #define THRUST_PP_BOOL_IMPL_TAG70 1
752
- #define THRUST_PP_BOOL_IMPL_TAG71 1
753
- #define THRUST_PP_BOOL_IMPL_TAG72 1
754
- #define THRUST_PP_BOOL_IMPL_TAG73 1
755
- #define THRUST_PP_BOOL_IMPL_TAG74 1
756
- #define THRUST_PP_BOOL_IMPL_TAG75 1
757
- #define THRUST_PP_BOOL_IMPL_TAG76 1
758
- #define THRUST_PP_BOOL_IMPL_TAG77 1
759
- #define THRUST_PP_BOOL_IMPL_TAG78 1
760
- #define THRUST_PP_BOOL_IMPL_TAG79 1
761
- #define THRUST_PP_BOOL_IMPL_TAG80 1
762
- #define THRUST_PP_BOOL_IMPL_TAG81 1
763
- #define THRUST_PP_BOOL_IMPL_TAG82 1
764
- #define THRUST_PP_BOOL_IMPL_TAG83 1
765
- #define THRUST_PP_BOOL_IMPL_TAG84 1
766
- #define THRUST_PP_BOOL_IMPL_TAG85 1
767
- #define THRUST_PP_BOOL_IMPL_TAG86 1
768
- #define THRUST_PP_BOOL_IMPL_TAG87 1
769
- #define THRUST_PP_BOOL_IMPL_TAG88 1
770
- #define THRUST_PP_BOOL_IMPL_TAG89 1
771
- #define THRUST_PP_BOOL_IMPL_TAG90 1
772
- #define THRUST_PP_BOOL_IMPL_TAG91 1
773
- #define THRUST_PP_BOOL_IMPL_TAG92 1
774
- #define THRUST_PP_BOOL_IMPL_TAG93 1
775
- #define THRUST_PP_BOOL_IMPL_TAG94 1
776
- #define THRUST_PP_BOOL_IMPL_TAG95 1
777
- #define THRUST_PP_BOOL_IMPL_TAG96 1
778
- #define THRUST_PP_BOOL_IMPL_TAG97 1
779
- #define THRUST_PP_BOOL_IMPL_TAG98 1
780
- #define THRUST_PP_BOOL_IMPL_TAG99 1
781
- #define THRUST_PP_BOOL_IMPL_TAG100 1
782
- #define THRUST_PP_BOOL_IMPL_TAG101 1
783
- #define THRUST_PP_BOOL_IMPL_TAG102 1
784
- #define THRUST_PP_BOOL_IMPL_TAG103 1
785
- #define THRUST_PP_BOOL_IMPL_TAG104 1
786
- #define THRUST_PP_BOOL_IMPL_TAG105 1
787
- #define THRUST_PP_BOOL_IMPL_TAG106 1
788
- #define THRUST_PP_BOOL_IMPL_TAG107 1
789
- #define THRUST_PP_BOOL_IMPL_TAG108 1
790
- #define THRUST_PP_BOOL_IMPL_TAG109 1
791
- #define THRUST_PP_BOOL_IMPL_TAG110 1
792
- #define THRUST_PP_BOOL_IMPL_TAG111 1
793
- #define THRUST_PP_BOOL_IMPL_TAG112 1
794
- #define THRUST_PP_BOOL_IMPL_TAG113 1
795
- #define THRUST_PP_BOOL_IMPL_TAG114 1
796
- #define THRUST_PP_BOOL_IMPL_TAG115 1
797
- #define THRUST_PP_BOOL_IMPL_TAG116 1
798
- #define THRUST_PP_BOOL_IMPL_TAG117 1
799
- #define THRUST_PP_BOOL_IMPL_TAG118 1
800
- #define THRUST_PP_BOOL_IMPL_TAG119 1
801
- #define THRUST_PP_BOOL_IMPL_TAG120 1
802
- #define THRUST_PP_BOOL_IMPL_TAG121 1
803
- #define THRUST_PP_BOOL_IMPL_TAG122 1
804
- #define THRUST_PP_BOOL_IMPL_TAG123 1
805
- #define THRUST_PP_BOOL_IMPL_TAG124 1
806
- #define THRUST_PP_BOOL_IMPL_TAG125 1
807
- #define THRUST_PP_BOOL_IMPL_TAG126 1
808
- #define THRUST_PP_BOOL_IMPL_TAG127 1
809
- #define THRUST_PP_BOOL_IMPL_TAG128 1
810
- #define THRUST_PP_BOOL_IMPL_TAG129 1
811
- #define THRUST_PP_BOOL_IMPL_TAG130 1
812
- #define THRUST_PP_BOOL_IMPL_TAG131 1
813
- #define THRUST_PP_BOOL_IMPL_TAG132 1
814
- #define THRUST_PP_BOOL_IMPL_TAG133 1
815
- #define THRUST_PP_BOOL_IMPL_TAG134 1
816
- #define THRUST_PP_BOOL_IMPL_TAG135 1
817
- #define THRUST_PP_BOOL_IMPL_TAG136 1
818
- #define THRUST_PP_BOOL_IMPL_TAG137 1
819
- #define THRUST_PP_BOOL_IMPL_TAG138 1
820
- #define THRUST_PP_BOOL_IMPL_TAG139 1
821
- #define THRUST_PP_BOOL_IMPL_TAG140 1
822
- #define THRUST_PP_BOOL_IMPL_TAG141 1
823
- #define THRUST_PP_BOOL_IMPL_TAG142 1
824
- #define THRUST_PP_BOOL_IMPL_TAG143 1
825
- #define THRUST_PP_BOOL_IMPL_TAG144 1
826
- #define THRUST_PP_BOOL_IMPL_TAG145 1
827
- #define THRUST_PP_BOOL_IMPL_TAG146 1
828
- #define THRUST_PP_BOOL_IMPL_TAG147 1
829
- #define THRUST_PP_BOOL_IMPL_TAG148 1
830
- #define THRUST_PP_BOOL_IMPL_TAG149 1
831
- #define THRUST_PP_BOOL_IMPL_TAG150 1
832
- #define THRUST_PP_BOOL_IMPL_TAG151 1
833
- #define THRUST_PP_BOOL_IMPL_TAG152 1
834
- #define THRUST_PP_BOOL_IMPL_TAG153 1
835
- #define THRUST_PP_BOOL_IMPL_TAG154 1
836
- #define THRUST_PP_BOOL_IMPL_TAG155 1
837
- #define THRUST_PP_BOOL_IMPL_TAG156 1
838
- #define THRUST_PP_BOOL_IMPL_TAG157 1
839
- #define THRUST_PP_BOOL_IMPL_TAG158 1
840
- #define THRUST_PP_BOOL_IMPL_TAG159 1
841
- #define THRUST_PP_BOOL_IMPL_TAG160 1
842
- #define THRUST_PP_BOOL_IMPL_TAG161 1
843
- #define THRUST_PP_BOOL_IMPL_TAG162 1
844
- #define THRUST_PP_BOOL_IMPL_TAG163 1
845
- #define THRUST_PP_BOOL_IMPL_TAG164 1
846
- #define THRUST_PP_BOOL_IMPL_TAG165 1
847
- #define THRUST_PP_BOOL_IMPL_TAG166 1
848
- #define THRUST_PP_BOOL_IMPL_TAG167 1
849
- #define THRUST_PP_BOOL_IMPL_TAG168 1
850
- #define THRUST_PP_BOOL_IMPL_TAG169 1
851
- #define THRUST_PP_BOOL_IMPL_TAG170 1
852
- #define THRUST_PP_BOOL_IMPL_TAG171 1
853
- #define THRUST_PP_BOOL_IMPL_TAG172 1
854
- #define THRUST_PP_BOOL_IMPL_TAG173 1
855
- #define THRUST_PP_BOOL_IMPL_TAG174 1
856
- #define THRUST_PP_BOOL_IMPL_TAG175 1
857
- #define THRUST_PP_BOOL_IMPL_TAG176 1
858
- #define THRUST_PP_BOOL_IMPL_TAG177 1
859
- #define THRUST_PP_BOOL_IMPL_TAG178 1
860
- #define THRUST_PP_BOOL_IMPL_TAG179 1
861
- #define THRUST_PP_BOOL_IMPL_TAG180 1
862
- #define THRUST_PP_BOOL_IMPL_TAG181 1
863
- #define THRUST_PP_BOOL_IMPL_TAG182 1
864
- #define THRUST_PP_BOOL_IMPL_TAG183 1
865
- #define THRUST_PP_BOOL_IMPL_TAG184 1
866
- #define THRUST_PP_BOOL_IMPL_TAG185 1
867
- #define THRUST_PP_BOOL_IMPL_TAG186 1
868
- #define THRUST_PP_BOOL_IMPL_TAG187 1
869
- #define THRUST_PP_BOOL_IMPL_TAG188 1
870
- #define THRUST_PP_BOOL_IMPL_TAG189 1
871
- #define THRUST_PP_BOOL_IMPL_TAG190 1
872
- #define THRUST_PP_BOOL_IMPL_TAG191 1
873
- #define THRUST_PP_BOOL_IMPL_TAG192 1
874
- #define THRUST_PP_BOOL_IMPL_TAG193 1
875
- #define THRUST_PP_BOOL_IMPL_TAG194 1
876
- #define THRUST_PP_BOOL_IMPL_TAG195 1
877
- #define THRUST_PP_BOOL_IMPL_TAG196 1
878
- #define THRUST_PP_BOOL_IMPL_TAG197 1
879
- #define THRUST_PP_BOOL_IMPL_TAG198 1
880
- #define THRUST_PP_BOOL_IMPL_TAG199 1
881
- #define THRUST_PP_BOOL_IMPL_TAG200 1
882
- #define THRUST_PP_BOOL_IMPL_TAG201 1
883
- #define THRUST_PP_BOOL_IMPL_TAG202 1
884
- #define THRUST_PP_BOOL_IMPL_TAG203 1
885
- #define THRUST_PP_BOOL_IMPL_TAG204 1
886
- #define THRUST_PP_BOOL_IMPL_TAG205 1
887
- #define THRUST_PP_BOOL_IMPL_TAG206 1
888
- #define THRUST_PP_BOOL_IMPL_TAG207 1
889
- #define THRUST_PP_BOOL_IMPL_TAG208 1
890
- #define THRUST_PP_BOOL_IMPL_TAG209 1
891
- #define THRUST_PP_BOOL_IMPL_TAG210 1
892
- #define THRUST_PP_BOOL_IMPL_TAG211 1
893
- #define THRUST_PP_BOOL_IMPL_TAG212 1
894
- #define THRUST_PP_BOOL_IMPL_TAG213 1
895
- #define THRUST_PP_BOOL_IMPL_TAG214 1
896
- #define THRUST_PP_BOOL_IMPL_TAG215 1
897
- #define THRUST_PP_BOOL_IMPL_TAG216 1
898
- #define THRUST_PP_BOOL_IMPL_TAG217 1
899
- #define THRUST_PP_BOOL_IMPL_TAG218 1
900
- #define THRUST_PP_BOOL_IMPL_TAG219 1
901
- #define THRUST_PP_BOOL_IMPL_TAG220 1
902
- #define THRUST_PP_BOOL_IMPL_TAG221 1
903
- #define THRUST_PP_BOOL_IMPL_TAG222 1
904
- #define THRUST_PP_BOOL_IMPL_TAG223 1
905
- #define THRUST_PP_BOOL_IMPL_TAG224 1
906
- #define THRUST_PP_BOOL_IMPL_TAG225 1
907
- #define THRUST_PP_BOOL_IMPL_TAG226 1
908
- #define THRUST_PP_BOOL_IMPL_TAG227 1
909
- #define THRUST_PP_BOOL_IMPL_TAG228 1
910
- #define THRUST_PP_BOOL_IMPL_TAG229 1
911
- #define THRUST_PP_BOOL_IMPL_TAG230 1
912
- #define THRUST_PP_BOOL_IMPL_TAG231 1
913
- #define THRUST_PP_BOOL_IMPL_TAG232 1
914
- #define THRUST_PP_BOOL_IMPL_TAG233 1
915
- #define THRUST_PP_BOOL_IMPL_TAG234 1
916
- #define THRUST_PP_BOOL_IMPL_TAG235 1
917
- #define THRUST_PP_BOOL_IMPL_TAG236 1
918
- #define THRUST_PP_BOOL_IMPL_TAG237 1
919
- #define THRUST_PP_BOOL_IMPL_TAG238 1
920
- #define THRUST_PP_BOOL_IMPL_TAG239 1
921
- #define THRUST_PP_BOOL_IMPL_TAG240 1
922
- #define THRUST_PP_BOOL_IMPL_TAG241 1
923
- #define THRUST_PP_BOOL_IMPL_TAG242 1
924
- #define THRUST_PP_BOOL_IMPL_TAG243 1
925
- #define THRUST_PP_BOOL_IMPL_TAG244 1
926
- #define THRUST_PP_BOOL_IMPL_TAG245 1
927
- #define THRUST_PP_BOOL_IMPL_TAG246 1
928
- #define THRUST_PP_BOOL_IMPL_TAG247 1
929
- #define THRUST_PP_BOOL_IMPL_TAG248 1
930
- #define THRUST_PP_BOOL_IMPL_TAG249 1
931
- #define THRUST_PP_BOOL_IMPL_TAG250 1
932
- #define THRUST_PP_BOOL_IMPL_TAG251 1
933
- #define THRUST_PP_BOOL_IMPL_TAG252 1
934
- #define THRUST_PP_BOOL_IMPL_TAG253 1
935
- #define THRUST_PP_BOOL_IMPL_TAG254 1
936
- #define THRUST_PP_BOOL_IMPL_TAG255 1
937
- #define THRUST_PP_BOOL_IMPL_TAG256 1
938
-
939
- ///////////////////////////////////////////////////////////////////////////////
940
-
941
- #define THRUST_PP_IIF(bit, t, f) THRUST_PP_IIF_IMPL0(bit, t, f)
942
-
943
- #if defined(_MSC_VER)
944
- #define THRUST_PP_IIF_IMPL0(bit, t, f) \
945
- THRUST_PP_IIF_IMPL1(THRUST_PP_CAT2(THRUST_PP_IIF_IMPL_TAG, bit(t, f))) \
946
- /**/
947
- #define THRUST_PP_IIF_IMPL1(id) id
948
- #else
949
- #define THRUST_PP_IIF_IMPL0(bit, t, f) \
950
- THRUST_PP_CAT2(THRUST_PP_IIF_IMPL_TAG, bit(t, f))
951
- /**/
952
- #endif
953
-
954
- #define THRUST_PP_IIF_IMPL_TAG0(t, f) f
955
- #define THRUST_PP_IIF_IMPL_TAG1(t, f) t
956
-
957
- #if defined(__EDG__)
958
- #define THRUST_PP_IF(cond, t, f) THRUST_PP_IF_IMPL0(cond, t, f)
959
- #define THRUST_PP_IF_IMPL0(cond, t, f) \
960
- THRUST_PP_IIF(THRUST_PP_BOOL(cond), t, f) \
961
- /**/
962
- #else
963
- #define THRUST_PP_IF(cond, t, f) THRUST_PP_IIF(THRUST_PP_BOOL(cond), t, f)
964
- #endif
965
-
966
- /// \def THRUST_COMMA_IF(cond)
967
- /// \brief If \a cond is true, expands to a comma. Otherwise, expands to nothing.
968
- ///
969
- /// \par <b>Example</b>:
970
- ///
971
- /// \code
972
- /// #include <thrust/detail/preprocessor.h>
973
- /// #include <iostream>
974
- ///
975
- /// int main()
976
- /// {
977
- /// std::cout << THRUST_PP_STRINGIZE(THRUST_COMMA_IF(0)) << "\n"
978
- /// << THRUST_PP_STRINGIZE(THRUST_COMMA_IF(1)) << "\n";
979
- /// }
980
- /// \endcode
981
- ///
982
- /// The above code expands to:
983
- ///
984
- /// \code
985
- /// #include <thrust/detail/preprocessor.h>
986
- /// #include <iostream>
987
- ///
988
- /// int main()
989
- /// {
990
- /// std::cout << "" << "\n"
991
- /// << "," << "\n";
992
- /// }
993
- /// \endcode
994
- ///
995
- #if defined(__EDG__)
996
- #define THRUST_PP_COMMA_IF(cond) THRUST_PP_COMMA_IF_IMPL0(cond)
997
- #define THRUST_PP_COMMA_IF_IMPL0(cond) \
998
- THRUST_PP_IF(cond, THRUST_PP_COMMA, THRUST_PP_EMPTY)() \
999
- /**/
1000
- #else
1001
- #define THRUST_PP_COMMA_IF(cond) \
1002
- THRUST_PP_IF(cond, THRUST_PP_COMMA, THRUST_PP_EMPTY)() \
1003
- /**/
1004
- #endif
1005
-
1006
- ///////////////////////////////////////////////////////////////////////////////
1007
-
1008
- // http://gustedt.wordpress.com/2010/06/08/detect-empty-macro-arguments
1009
-
1010
- #define THRUST_PP_64TH_ARG( \
1011
- _1, _2, _3, _4, _5, _6, _7, _8, _9,_10,_11,_12,_13,_14,_15,_16 \
1012
- , _17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,_32 \
1013
- , _33,_34,_35,_36,_37,_38,_39,_40,_41,_42,_43,_44,_45,_46,_47,_48 \
1014
- , _49,_50,_51,_52,_53,_54,_55,_56,_57,_58,_59,_60,_61,_62,_63, N \
1015
- , ... \
1016
- ) N \
1017
- /**/
1018
-
1019
- #define THRUST_PP_HAS_COMMA(...) \
1020
- THRUST_PP_EXPAND(THRUST_PP_64TH_ARG( \
1021
- __VA_ARGS__ \
1022
- , 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 \
1023
- , 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 \
1024
- , 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 \
1025
- , 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 \
1026
- )) \
1027
- /**/
1028
-
1029
- #define THRUST_PP_TRIGGER_PAREN(...) ,
1030
-
1031
- #define THRUST_PP_IS_VARIADIC_NULLARY(...) \
1032
- THRUST_PP_IS_VARIADIC_NULLARY_IMPL0( \
1033
- /* Test if there is just one argument, eventually an empty one. */ \
1034
- THRUST_PP_HAS_COMMA(__VA_ARGS__), \
1035
- /* Test if THRUST_PP_TRIGGER_PAREN together with the argument adds a */ \
1036
- /* comma. */ \
1037
- THRUST_PP_HAS_COMMA(THRUST_PP_TRIGGER_PAREN __VA_ARGS__), \
1038
- /* Test if the argument together with a parenthesis adds a comma. */ \
1039
- THRUST_PP_HAS_COMMA(__VA_ARGS__ (/*empty*/)), \
1040
- /* Test if placing it between THRUST_PP_TRIGGER_PAREN and the */ \
1041
- /* parenthesis adds a comma. */ \
1042
- THRUST_PP_HAS_COMMA(THRUST_PP_TRIGGER_PAREN __VA_ARGS__ (/*empty*/)) \
1043
- ) \
1044
- /**/
1045
-
1046
- #define THRUST_PP_IS_VARIADIC_NULLARY_IMPL0(_0, _1, _2, _3) \
1047
- THRUST_PP_HAS_COMMA( \
1048
- THRUST_PP_CAT5(THRUST_PP_IS_VARIADIC_NULLARY_IMPL_TAG, _0, _1, _2, _3) \
1049
- ) \
1050
-
1051
- #define THRUST_PP_IS_VARIADIC_NULLARY_IMPL_TAG0001 ,
1052
-
1053
- ///////////////////////////////////////////////////////////////////////////////
1054
-
1055
- /// \def THRUST_PP_ARITY(...)
1056
- /// \brief Returns the number of arguments that it was called with. Must be
1057
- /// called with less than 64 arguments.
1058
- ///
1059
- /// \par <b>Example</b>:
1060
- ///
1061
- /// \code
1062
- /// #include <thrust/detail/preprocessor.h>
1063
- /// #include <iostream>
1064
- ///
1065
- /// int main()
1066
- /// {
1067
- /// std::cout << THRUST_PP_ARITY() << "\n"
1068
- /// << THRUST_PP_ARITY(x) << "\n"
1069
- /// << THRUST_PP_ARITY(x, y) << "\n"
1070
- /// << THRUST_PP_ARITY(x, y, z) << "\n";
1071
- /// }
1072
- /// \endcode
1073
- ///
1074
- /// The above code expands to:
1075
- ///
1076
- /// \code
1077
- /// #include <thrust/detail/preprocessor.h>
1078
- /// #include <iostream>
1079
- ///
1080
- /// int main()
1081
- /// {
1082
- /// std::cout << 0 << "\n"
1083
- /// << 1 << "\n"
1084
- /// << 2 << "\n"
1085
- /// << 3 << "\n";
1086
- /// }
1087
- /// \endcode
1088
- ///
1089
- #define THRUST_PP_ARITY(...) \
1090
- THRUST_PP_EXPAND( \
1091
- THRUST_PP_IF( \
1092
- THRUST_PP_IS_VARIADIC_NULLARY(__VA_ARGS__) \
1093
- , 0 \
1094
- , THRUST_PP_64TH_ARG( \
1095
- __VA_ARGS__ \
1096
- , 63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48 \
1097
- , 47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32 \
1098
- , 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16 \
1099
- , 15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 \
1100
- ) \
1101
- ) \
1102
- ) \
1103
- /**/
1104
-
1105
- /// \def THRUST_PP_DISPATCH(basename, ...)
1106
- /// \brief Expands to <code>basenameN(...)</code>, where <code>N</code> is the
1107
- /// number of variadic arguments that \a THRUST_PP_DISPATCH was called
1108
- /// with. This macro can be used to implement "macro overloading".
1109
- ///
1110
- /// \par <b>Example</b>:
1111
- ///
1112
- /// \code
1113
- /// #include <thrust/detail/preprocessor.h>
1114
- /// #include <iostream>
1115
- ///
1116
- /// #define PLUS(...) THRUST_PP_DISPATCH(PLUS, __VA_ARGS__)
1117
- /// #define PLUS0() 0
1118
- /// #define PLUS1(x) x
1119
- /// #define PLUS2(x, y) x + y
1120
- /// #define PLUS3(x, y, z) x + y + z
1121
- ///
1122
- /// int main()
1123
- /// {
1124
- /// std::cout << PLUS() << "\n"
1125
- /// << PLUS(1) << "\n"
1126
- /// << PLUS(1, 2) << "\n"
1127
- /// << PLUS(1, 2, 3) << "\n";
1128
- /// }
1129
- /// \endcode
1130
- ///
1131
- /// The above code expands to:
1132
- ///
1133
- /// \code
1134
- /// #include <thrust/detail/preprocessor.h>
1135
- /// #include <iostream>
1136
- ///
1137
- /// int main()
1138
- /// {
1139
- /// std::cout << 0 << "\n"
1140
- /// << 1 << "\n"
1141
- /// << 1 + 2 << "\n"
1142
- /// << 1 + 2 + 3 << "\n";
1143
- /// }
1144
- /// \endcode
1145
- ///
1146
- #define THRUST_PP_DISPATCH(basename, ...) \
1147
- THRUST_PP_EXPAND( \
1148
- THRUST_PP_CAT2( \
1149
- basename, \
1150
- THRUST_PP_ARITY(__VA_ARGS__) \
1151
- )(__VA_ARGS__) \
1152
- ) \
1153
- /**/
1154
-
1155
- ///////////////////////////////////////////////////////////////////////////////
1156
-
1157
- /// \def THRUST_CURRENT_FUNCTION
1158
- /// \brief The name of the current function as a string.
1159
- ///
1160
- #if defined(__GNUC__) \
1161
- || (defined(__MWERKS__) && (__MWERKS__ >= 0x3000)) \
1162
- || (defined(__ICC) && (__ICC >= 600)) || defined(__ghs__)
1163
- #define THRUST_CURRENT_FUNCTION __PRETTY_FUNCTION__
1164
- #elif defined(__DMC__) && (__DMC__ >= 0x810)
1165
- #define THRUST_CURRENT_FUNCTION __PRETTY_FUNCTION__
1166
- #elif defined(__FUNCSIG__)
1167
- #define THRUST_CURRENT_FUNCTION __FUNCSIG__
1168
- #elif (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 600)) \
1169
- || (defined(__IBMCTHRUST_PP__) && (__IBMCTHRUST_PP__ >= 500))
1170
- #define THRUST_CURRENT_FUNCTION __FUNCTION__
1171
- #elif defined(__BORLANDC__) && (__BORLANDC__ >= 0x550)
1172
- #define THRUST_CURRENT_FUNCTION __FUNC__
1173
- #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)
1174
- #define THRUST_CURRENT_FUNCTION __func__
1175
- #elif defined(__cplusplus) && (__cplusplus >= 201103)
1176
- #define THRUST_CURRENT_FUNCTION __func__
1177
- #else
1178
- #define THRUST_CURRENT_FUNCTION "(unknown)"
1179
- #endif
1180
-
1181
- ///////////////////////////////////////////////////////////////////////////////
1182
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/use_default.h DELETED
@@ -1,27 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- namespace thrust
22
- {
23
-
24
- struct use_default {};
25
-
26
- } // end thrust
27
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/execution_policy.h DELETED
@@ -1,107 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/cpp/detail/execution_policy.h>
21
- #include <thrust/system/tbb/detail/execution_policy.h>
22
- #include <thrust/iterator/detail/any_system_tag.h>
23
- #include <thrust/detail/type_traits.h>
24
-
25
- namespace thrust
26
- {
27
- namespace system
28
- {
29
- // put the canonical tag in the same ns as the backend's entry points
30
- namespace omp
31
- {
32
- namespace detail
33
- {
34
-
35
- // this awkward sequence of definitions arise
36
- // from the desire both for tag to derive
37
- // from execution_policy and for execution_policy
38
- // to convert to tag (when execution_policy is not
39
- // an ancestor of tag)
40
-
41
- // forward declaration of tag
42
- struct tag;
43
-
44
- // forward declaration of execution_policy
45
- template<typename> struct execution_policy;
46
-
47
- // specialize execution_policy for tag
48
- template<>
49
- struct execution_policy<tag>
50
- : thrust::system::cpp::detail::execution_policy<tag>
51
- {};
52
-
53
- // tag's definition comes before the
54
- // generic definition of execution_policy
55
- struct tag : execution_policy<tag> {};
56
-
57
- // allow conversion to tag when it is not a successor
58
- template<typename Derived>
59
- struct execution_policy
60
- : thrust::system::cpp::detail::execution_policy<Derived>
61
- {
62
- typedef tag tag_type;
63
- operator tag() const { return tag(); }
64
- };
65
-
66
-
67
- // overloads of select_system
68
-
69
- // XXX select_system(tbb, omp) & select_system(omp, tbb) are ambiguous
70
- // because both convert to cpp without these overloads, which we
71
- // arbitrarily define in the omp backend
72
-
73
- template<typename System1, typename System2>
74
- inline __host__ __device__
75
- System1 select_system(execution_policy<System1> s, thrust::system::tbb::detail::execution_policy<System2>)
76
- {
77
- return thrust::detail::derived_cast(s);
78
- } // end select_system()
79
-
80
-
81
- template<typename System1, typename System2>
82
- inline __host__ __device__
83
- System2 select_system(thrust::system::tbb::detail::execution_policy<System1>, execution_policy<System2> s)
84
- {
85
- return thrust::detail::derived_cast(s);
86
- } // end select_system()
87
-
88
-
89
- } // end detail
90
-
91
- // alias execution_policy and tag here
92
- using thrust::system::omp::detail::execution_policy;
93
- using thrust::system::omp::detail::tag;
94
-
95
- } // end omp
96
- } // end system
97
-
98
- // alias items at top-level
99
- namespace omp
100
- {
101
-
102
- using thrust::system::omp::execution_policy;
103
- using thrust::system::omp::tag;
104
-
105
- } // end omp
106
- } // end thrust
107
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_scan.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits transform_scan
22
- #include <thrust/system/cpp/detail/transform_scan.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/modeling/test_time_augmentation.py DELETED
@@ -1,307 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import numpy as np
4
- from contextlib import contextmanager
5
- from itertools import count
6
- from typing import List
7
- import torch
8
- from fvcore.transforms import HFlipTransform, NoOpTransform
9
- from torch import nn
10
- from torch.nn.parallel import DistributedDataParallel
11
-
12
- from detectron2.config import configurable
13
- from detectron2.data.detection_utils import read_image
14
- from detectron2.data.transforms import (
15
- RandomFlip,
16
- ResizeShortestEdge,
17
- ResizeTransform,
18
- apply_augmentations,
19
- )
20
- from detectron2.structures import Boxes, Instances
21
-
22
- from .meta_arch import GeneralizedRCNN
23
- from .postprocessing import detector_postprocess
24
- from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image
25
-
26
- __all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"]
27
-
28
-
29
- class DatasetMapperTTA:
30
- """
31
- Implement test-time augmentation for detection data.
32
- It is a callable which takes a dataset dict from a detection dataset,
33
- and returns a list of dataset dicts where the images
34
- are augmented from the input image by the transformations defined in the config.
35
- This is used for test-time augmentation.
36
- """
37
-
38
- @configurable
39
- def __init__(self, min_sizes: List[int], max_size: int, flip: bool):
40
- """
41
- Args:
42
- min_sizes: list of short-edge size to resize the image to
43
- max_size: maximum height or width of resized images
44
- flip: whether to apply flipping augmentation
45
- """
46
- self.min_sizes = min_sizes
47
- self.max_size = max_size
48
- self.flip = flip
49
-
50
- @classmethod
51
- def from_config(cls, cfg):
52
- return {
53
- "min_sizes": cfg.TEST.AUG.MIN_SIZES,
54
- "max_size": cfg.TEST.AUG.MAX_SIZE,
55
- "flip": cfg.TEST.AUG.FLIP,
56
- }
57
-
58
- def __call__(self, dataset_dict):
59
- """
60
- Args:
61
- dict: a dict in standard model input format. See tutorials for details.
62
-
63
- Returns:
64
- list[dict]:
65
- a list of dicts, which contain augmented version of the input image.
66
- The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``.
67
- Each dict has field "transforms" which is a TransformList,
68
- containing the transforms that are used to generate this image.
69
- """
70
- numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy()
71
- shape = numpy_image.shape
72
- orig_shape = (dataset_dict["height"], dataset_dict["width"])
73
- if shape[:2] != orig_shape:
74
- # It transforms the "original" image in the dataset to the input image
75
- pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1])
76
- else:
77
- pre_tfm = NoOpTransform()
78
-
79
- # Create all combinations of augmentations to use
80
- aug_candidates = [] # each element is a list[Augmentation]
81
- for min_size in self.min_sizes:
82
- resize = ResizeShortestEdge(min_size, self.max_size)
83
- aug_candidates.append([resize]) # resize only
84
- if self.flip:
85
- flip = RandomFlip(prob=1.0)
86
- aug_candidates.append([resize, flip]) # resize + flip
87
-
88
- # Apply all the augmentations
89
- ret = []
90
- for aug in aug_candidates:
91
- new_image, tfms = apply_augmentations(aug, np.copy(numpy_image))
92
- torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1)))
93
-
94
- dic = copy.deepcopy(dataset_dict)
95
- dic["transforms"] = pre_tfm + tfms
96
- dic["image"] = torch_image
97
- ret.append(dic)
98
- return ret
99
-
100
-
101
- class GeneralizedRCNNWithTTA(nn.Module):
102
- """
103
- A GeneralizedRCNN with test-time augmentation enabled.
104
- Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`.
105
- """
106
-
107
- def __init__(self, cfg, model, tta_mapper=None, batch_size=3):
108
- """
109
- Args:
110
- cfg (CfgNode):
111
- model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
112
- tta_mapper (callable): takes a dataset dict and returns a list of
113
- augmented versions of the dataset dict. Defaults to
114
- `DatasetMapperTTA(cfg)`.
115
- batch_size (int): batch the augmented images into this batch size for inference.
116
- """
117
- super().__init__()
118
- if isinstance(model, DistributedDataParallel):
119
- model = model.module
120
- assert isinstance(
121
- model, GeneralizedRCNN
122
- ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model))
123
- self.cfg = cfg.clone()
124
- assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet"
125
- assert (
126
- not self.cfg.MODEL.LOAD_PROPOSALS
127
- ), "TTA for pre-computed proposals is not supported yet"
128
-
129
- self.model = model
130
-
131
- if tta_mapper is None:
132
- tta_mapper = DatasetMapperTTA(cfg)
133
- self.tta_mapper = tta_mapper
134
- self.batch_size = batch_size
135
-
136
- @contextmanager
137
- def _turn_off_roi_heads(self, attrs):
138
- """
139
- Open a context where some heads in `model.roi_heads` are temporarily turned off.
140
- Args:
141
- attr (list[str]): the attribute in `model.roi_heads` which can be used
142
- to turn off a specific head, e.g., "mask_on", "keypoint_on".
143
- """
144
- roi_heads = self.model.roi_heads
145
- old = {}
146
- for attr in attrs:
147
- try:
148
- old[attr] = getattr(roi_heads, attr)
149
- except AttributeError:
150
- # The head may not be implemented in certain ROIHeads
151
- pass
152
-
153
- if len(old.keys()) == 0:
154
- yield
155
- else:
156
- for attr in old.keys():
157
- setattr(roi_heads, attr, False)
158
- yield
159
- for attr in old.keys():
160
- setattr(roi_heads, attr, old[attr])
161
-
162
- def _batch_inference(self, batched_inputs, detected_instances=None):
163
- """
164
- Execute inference on a list of inputs,
165
- using batch size = self.batch_size, instead of the length of the list.
166
-
167
- Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference`
168
- """
169
- if detected_instances is None:
170
- detected_instances = [None] * len(batched_inputs)
171
-
172
- outputs = []
173
- inputs, instances = [], []
174
- for idx, input, instance in zip(count(), batched_inputs, detected_instances):
175
- inputs.append(input)
176
- instances.append(instance)
177
- if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:
178
- outputs.extend(
179
- self.model.inference(
180
- inputs,
181
- instances if instances[0] is not None else None,
182
- do_postprocess=False,
183
- )
184
- )
185
- inputs, instances = [], []
186
- return outputs
187
-
188
- def __call__(self, batched_inputs):
189
- """
190
- Same input/output format as :meth:`GeneralizedRCNN.forward`
191
- """
192
-
193
- def _maybe_read_image(dataset_dict):
194
- ret = copy.copy(dataset_dict)
195
- if "image" not in ret:
196
- image = read_image(ret.pop("file_name"), self.model.input_format)
197
- image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
198
- ret["image"] = image
199
- if "height" not in ret and "width" not in ret:
200
- ret["height"] = image.shape[1]
201
- ret["width"] = image.shape[2]
202
- return ret
203
-
204
- return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs]
205
-
206
- def _inference_one_image(self, input):
207
- """
208
- Args:
209
- input (dict): one dataset dict with "image" field being a CHW tensor
210
-
211
- Returns:
212
- dict: one output dict
213
- """
214
- orig_shape = (input["height"], input["width"])
215
- augmented_inputs, tfms = self._get_augmented_inputs(input)
216
- # Detect boxes from all augmented versions
217
- with self._turn_off_roi_heads(["mask_on", "keypoint_on"]):
218
- # temporarily disable roi heads
219
- all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
220
- # merge all detected boxes to obtain final predictions for boxes
221
- merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
222
-
223
- if self.cfg.MODEL.MASK_ON:
224
- # Use the detected boxes to obtain masks
225
- augmented_instances = self._rescale_detected_boxes(
226
- augmented_inputs, merged_instances, tfms
227
- )
228
- # run forward on the detected boxes
229
- outputs = self._batch_inference(augmented_inputs, augmented_instances)
230
- # Delete now useless variables to avoid being out of memory
231
- del augmented_inputs, augmented_instances
232
- # average the predictions
233
- merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
234
- merged_instances = detector_postprocess(merged_instances, *orig_shape)
235
- return {"instances": merged_instances}
236
- else:
237
- return {"instances": merged_instances}
238
-
239
- def _get_augmented_inputs(self, input):
240
- augmented_inputs = self.tta_mapper(input)
241
- tfms = [x.pop("transforms") for x in augmented_inputs]
242
- return augmented_inputs, tfms
243
-
244
- def _get_augmented_boxes(self, augmented_inputs, tfms):
245
- # 1: forward with all augmented images
246
- outputs = self._batch_inference(augmented_inputs)
247
- # 2: union the results
248
- all_boxes = []
249
- all_scores = []
250
- all_classes = []
251
- for output, tfm in zip(outputs, tfms):
252
- # Need to inverse the transforms on boxes, to obtain results on original image
253
- pred_boxes = output.pred_boxes.tensor
254
- original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy())
255
- all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device))
256
-
257
- all_scores.extend(output.scores)
258
- all_classes.extend(output.pred_classes)
259
- all_boxes = torch.cat(all_boxes, dim=0)
260
- return all_boxes, all_scores, all_classes
261
-
262
- def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):
263
- # select from the union of all results
264
- num_boxes = len(all_boxes)
265
- num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES
266
- # +1 because fast_rcnn_inference expects background scores as well
267
- all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device)
268
- for idx, cls, score in zip(count(), all_classes, all_scores):
269
- all_scores_2d[idx, cls] = score
270
-
271
- merged_instances, _ = fast_rcnn_inference_single_image(
272
- all_boxes,
273
- all_scores_2d,
274
- shape_hw,
275
- 1e-8,
276
- self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
277
- self.cfg.TEST.DETECTIONS_PER_IMAGE,
278
- )
279
-
280
- return merged_instances
281
-
282
- def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms):
283
- augmented_instances = []
284
- for input, tfm in zip(augmented_inputs, tfms):
285
- # Transform the target box to the augmented image's coordinate space
286
- pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy()
287
- pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes))
288
-
289
- aug_instances = Instances(
290
- image_size=input["image"].shape[1:3],
291
- pred_boxes=Boxes(pred_boxes),
292
- pred_classes=merged_instances.pred_classes,
293
- scores=merged_instances.scores,
294
- )
295
- augmented_instances.append(aug_instances)
296
- return augmented_instances
297
-
298
- def _reduce_pred_masks(self, outputs, tfms):
299
- # Should apply inverse transforms on masks.
300
- # We assume only resize & flip are used. pred_masks is a scale-invariant
301
- # representation, so we handle flip specially
302
- for output, tfm in zip(outputs, tfms):
303
- if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
304
- output.pred_masks = output.pred_masks.flip(dims=[3])
305
- all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0)
306
- avg_pred_masks = torch.mean(all_pred_masks, dim=0)
307
- return avg_pred_masks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CaliforniaHealthCollaborative/Emoji2KaktovicEncryptKey/EMOJILOGIC.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: README
3
- emoji: 🏢
4
- colorFrom: indigo
5
- colorTo: blue
6
- sdk: static
7
- pinned: true
8
- license: mit
9
- ---
10
-
11
- [![](https://mermaid.ink/img/pako:eNqVl81u00AUhV9lNIhdWtkz_kdiQWpVVYJUNVlBkDXY03SIY0e20za0XbBhwQokFuwq8RA8F4_A_KTNxFIk7qo55565dzxfGo3vcF4XHCd43rDVFZq-mVUIpe9nOF3Wn8QpW3I0LFnbzvAHXUFHR6_vZzjLRCW6LJvhe3Q2kvEzKQUrxWeOeJU3m1Un6ipb8A1iVYHmsk9W1vNel7ZjTZepouozOZV9JspCp9ray8rV2arhhchVZ5Ufn8v8uJ6jc8veW1OIdlWyjZ6QNbxdl12rVp5cyJUnpqhnoYun4t76vK6uudwhV2eRdXW2YIuuvhaLTD6Ueribuil0x6E6g6GJI310qKvRaBvXZzB6ju_N6Boxn_Mmy1lZqiMSueo3Hct-U1NCQ1lCY1PSa9v1R4NLT1KO7OfKFX8ff33bZqRDjPNl51DjfN05nnEed45vnJ87JzDO750TGufHzomM833nxMb5s3V4Vag_w5F6brlbWxBbUFt4tvBtEdgitEVki1iJ6dga-iSILagtPFv4tghsEdoisoUemrpbwrHzUiE1A1PSc_XklPZcvYXU67l6L6nfc_Wm0qDn6t2lYc_V20yjnmv2G2_dC_ltrZe64B4qkEMFeqjgHSr4hwrBoUJ4qKCfrhTVYtJtSo4c1HZNveDJC8LzInQHRh7diKK7Ssjq9tV-3gXmCTBPgXkPmPeB-QCYD4H5CJiPobzAgKGEXShiF8rYhUJ2oZRdKGYXytmFgnahpAmUNAH_L0NJEyhpAiVNoKQJlDSBkiZQ0gRKmkJJUyhpCv7ZhpKmUNIUSppCSVMoaQolTf-HNB7gJW-WTBTyjeNONZAX4CuuLvuJ_FjwSyav4TM8qx5klK27erKpcpx0zZoP8HpVsI6fCCYvv8t9My1EVzc4uWRlK80Vq97V9XNGSpzc4VucxPSYel7kByQgxAkdOsAbnHj-se86NPB8SrzQCR8G-LNe7hzHTuz6jk8iQjw_iAaY60lvzUuTfnd6-AdpI06c?type=png)](https://mermaid.live/edit#pako:eNqVl81u00AUhV9lNIhdWtkz_kdiQWpVVYJUNVlBkDXY03SIY0e20za0XbBhwQokFuwq8RA8F4_A_KTNxFIk7qo55565dzxfGo3vcF4XHCd43rDVFZq-mVUIpe9nOF3Wn8QpW3I0LFnbzvAHXUFHR6_vZzjLRCW6LJvhe3Q2kvEzKQUrxWeOeJU3m1Un6ipb8A1iVYHmsk9W1vNel7ZjTZepouozOZV9JspCp9ray8rV2arhhchVZ5Ufn8v8uJ6jc8veW1OIdlWyjZ6QNbxdl12rVp5cyJUnpqhnoYun4t76vK6uudwhV2eRdXW2YIuuvhaLTD6Ueribuil0x6E6g6GJI310qKvRaBvXZzB6ju_N6Boxn_Mmy1lZqiMSueo3Hct-U1NCQ1lCY1PSa9v1R4NLT1KO7OfKFX8ff33bZqRDjPNl51DjfN05nnEed45vnJ87JzDO750TGufHzomM833nxMb5s3V4Vag_w5F6brlbWxBbUFt4tvBtEdgitEVki1iJ6dga-iSILagtPFv4tghsEdoisoUemrpbwrHzUiE1A1PSc_XklPZcvYXU67l6L6nfc_Wm0qDn6t2lYc_V20yjnmv2G2_dC_ltrZe64B4qkEMFeqjgHSr4hwrBoUJ4qKCfrhTVYtJtSo4c1HZNveDJC8LzInQHRh7diKK7Ssjq9tV-3gXmCTBPgXkPmPeB-QCYD4H5CJiPobzAgKGEXShiF8rYhUJ2oZRdKGYXytmFgnahpAmUNAH_L0NJEyhpAiVNoKQJlDSBkiZQ0gRKmkJJUyhpCv7ZhpKmUNIUSppCSVMoaQolTf-HNB7gJW-WTBTyjeNONZAX4CuuLvuJ_FjwSyav4TM8qx5klK27erKpcpx0zZoP8HpVsI6fCCYvv8t9My1EVzc4uWRlK80Vq97V9XNGSpzc4VucxPSYel7kByQgxAkdOsAbnHj-se86NPB8SrzQCR8G-LNe7hzHTuz6jk8iQjw_iAaY60lvzUuTfnd6-AdpI06c)
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chomkwoy/Nilkessye/syllable_model.py DELETED
@@ -1,55 +0,0 @@
1
- import torch
2
- from torch.nn import CrossEntropyLoss
3
-
4
- from transformers import VisionEncoderDecoderModel
5
- from transformers import TrOCRProcessor, RobertaTokenizerFast
6
-
7
-
8
- class SyllableRecognizer:
9
- def __init__(self, model=None):
10
- if model is None:
11
- self.model: VisionEncoderDecoderModel = VisionEncoderDecoderModel.from_pretrained(
12
- "ckpt-syllable-3fonts-surrounded-real"
13
- )
14
- else:
15
- self.model: VisionEncoderDecoderModel = model
16
-
17
- self.processor = TrOCRProcessor.from_pretrained("Chomkwoy/nilkessye_tokenizer")
18
-
19
- def _preprocess_images(self, images):
20
- pixel_values = []
21
- for image in images:
22
- pixel_values.append(self.processor(image, return_tensors="pt").pixel_values)
23
- pixel_values = torch.cat(pixel_values, dim=0)
24
- return pixel_values
25
-
26
- def recognize(self, images):
27
- pixel_values = self._preprocess_images(images)
28
-
29
- generated_ids = self.model.generate(
30
- pixel_values.to(self.model.device),
31
- max_new_tokens=13,
32
- early_stopping=True,
33
- eos_token_id=self.processor.tokenizer.eos_token_id
34
- )
35
- generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
36
- return generated_text
37
-
38
- def loss(self, images, text):
39
- pixel_values = self._preprocess_images(images)
40
- tokens = self.processor.tokenizer(text, padding=True, return_tensors='pt')
41
- labels = tokens['input_ids']
42
- labels[labels == self.processor.tokenizer.pad_token_id] = -100
43
-
44
- with torch.no_grad():
45
- outputs = self.model(
46
- pixel_values=pixel_values.to(self.model.device),
47
- labels=labels.to(self.model.device),
48
- return_dict=True,
49
- )
50
-
51
- logits = outputs.logits.cpu()
52
- loss_fct = CrossEntropyLoss(reduction='none')
53
- loss = loss_fct(logits.permute(0, 2, 1), labels)
54
-
55
- return loss.sum(-1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cicooo/vits-uma-genshin-honkai/text/symbols.py DELETED
@@ -1,39 +0,0 @@
1
- '''
2
- Defines the set of symbols used in text input to the model.
3
- '''
4
-
5
- '''# japanese_cleaners
6
- _pad = '_'
7
- _punctuation = ',.!?-'
8
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
9
- '''
10
-
11
- '''# japanese_cleaners2
12
- _pad = '_'
13
- _punctuation = ',.!?-~…'
14
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
15
- '''
16
-
17
- '''# korean_cleaners
18
- _pad = '_'
19
- _punctuation = ',.!?…~'
20
- _letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
21
- '''
22
-
23
- '''# chinese_cleaners
24
- _pad = '_'
25
- _punctuation = ',。!?—…'
26
- _letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
27
- '''
28
-
29
- # zh_ja_mixture_cleaners
30
- _pad = '_'
31
- _punctuation = ',.!?-~…'
32
- _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
33
-
34
-
35
- # Export all symbols:
36
- symbols = [_pad] + list(_punctuation) + list(_letters)
37
-
38
- # Special symbol ids
39
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/other/version.js DELETED
@@ -1,27 +0,0 @@
1
- import { App, Common, Version } from '#miao'
2
-
3
- let app = App.init({
4
- id: 'version',
5
- name: '版本',
6
- desc: '版本'
7
- })
8
-
9
- app.reg({
10
- version: {
11
- rule: /^#版本$/,
12
- desc: '【#帮助】 版本介绍',
13
- fn: async function (e) {
14
- let { changelogs, currentVersion } = Version.readLogFile('root')
15
- return await Common.render('help/version-info', {
16
- currentVersion,
17
- changelogs,
18
- name: 'TRSS-Yunzai',
19
- elem: 'cryo',
20
- pluginName: false,
21
- pluginVersion: false
22
- }, { e, scale: 1.2 })
23
- }
24
- }
25
- })
26
-
27
- export const version = app.v3App()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/cover_face/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
-
8
- img_dir = Path(__file__).parent / "images"
9
-
10
-
11
- def cover_face(images: List[BuildImage], texts, args):
12
- points = ((15, 15), (448, 0), (445, 456), (0, 465))
13
- img = images[0].convert("RGBA").square().resize((450, 450)).perspective(points)
14
- frame = BuildImage.open(img_dir / "0.png")
15
- frame.paste(img, (120, 150), below=True)
16
- return frame.save_jpg()
17
-
18
-
19
- add_meme("cover_face", cover_face, min_images=1, max_images=1, keywords=["捂脸"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/check_proxy.py DELETED
@@ -1,151 +0,0 @@
1
-
2
- def check_proxy(proxies):
3
- import requests
4
- proxies_https = proxies['https'] if proxies is not None else '无'
5
- try:
6
- response = requests.get("https://ipapi.co/json/",
7
- proxies=proxies, timeout=4)
8
- data = response.json()
9
- print(f'查询代理的地理位置,返回的结果是{data}')
10
- if 'country_name' in data:
11
- country = data['country_name']
12
- result = f"代理配置 {proxies_https}, 代理所在地:{country}"
13
- elif 'error' in data:
14
- result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
15
- print(result)
16
- return result
17
- except:
18
- result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
19
- print(result)
20
- return result
21
-
22
-
23
- def backup_and_download(current_version, remote_version):
24
- """
25
- 一键更新协议:备份和下载
26
- """
27
- from toolbox import get_conf
28
- import shutil
29
- import os
30
- import requests
31
- import zipfile
32
- os.makedirs(f'./history', exist_ok=True)
33
- backup_dir = f'./history/backup-{current_version}/'
34
- new_version_dir = f'./history/new-version-{remote_version}/'
35
- if os.path.exists(new_version_dir):
36
- return new_version_dir
37
- os.makedirs(new_version_dir)
38
- shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
39
- proxies, = get_conf('proxies')
40
- r = requests.get(
41
- 'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
42
- zip_file_path = backup_dir+'/master.zip'
43
- with open(zip_file_path, 'wb+') as f:
44
- f.write(r.content)
45
- dst_path = new_version_dir
46
- with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
47
- for zip_info in zip_ref.infolist():
48
- dst_file_path = os.path.join(dst_path, zip_info.filename)
49
- if os.path.exists(dst_file_path):
50
- os.remove(dst_file_path)
51
- zip_ref.extract(zip_info, dst_path)
52
- return new_version_dir
53
-
54
-
55
- def patch_and_restart(path):
56
- """
57
- 一键更新协议:覆盖和重启
58
- """
59
- from distutils import dir_util
60
- import shutil
61
- import os
62
- import sys
63
- import time
64
- import glob
65
- from colorful import print亮黄, print亮绿, print亮红
66
- # if not using config_private, move origin config.py as config_private.py
67
- if not os.path.exists('config_private.py'):
68
- print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
69
- '另外您可以随时在history子文件夹下找回旧版的程序。')
70
- shutil.copyfile('config.py', 'config_private.py')
71
- path_new_version = glob.glob(path + '/*-master')[0]
72
- dir_util.copy_tree(path_new_version, './')
73
- print亮绿('代码已经更新,即将更新pip包依赖……')
74
- for i in reversed(range(5)): time.sleep(1); print(i)
75
- try:
76
- import subprocess
77
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
78
- except:
79
- print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
80
- print亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启')
81
- print亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
82
- print(' ------------------------------ -----------------------------------')
83
- for i in reversed(range(8)): time.sleep(1); print(i)
84
- os.execl(sys.executable, sys.executable, *sys.argv)
85
-
86
-
87
- def get_current_version():
88
- import json
89
- try:
90
- with open('./version', 'r', encoding='utf8') as f:
91
- current_version = json.loads(f.read())['version']
92
- except:
93
- current_version = ""
94
- return current_version
95
-
96
-
97
- def auto_update():
98
- """
99
- 一键更新协议:查询版本和用户意见
100
- """
101
- try:
102
- from toolbox import get_conf
103
- import requests
104
- import time
105
- import json
106
- proxies, = get_conf('proxies')
107
- response = requests.get(
108
- "https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
109
- remote_json_data = json.loads(response.text)
110
- remote_version = remote_json_data['version']
111
- if remote_json_data["show_feature"]:
112
- new_feature = "新功能:" + remote_json_data["new_feature"]
113
- else:
114
- new_feature = ""
115
- with open('./version', 'r', encoding='utf8') as f:
116
- current_version = f.read()
117
- current_version = json.loads(current_version)['version']
118
- if (remote_version - current_version) >= 0.01:
119
- from colorful import print亮��
120
- print亮黄(
121
- f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
122
- print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
123
- user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
124
- if user_instruction in ['Y', 'y']:
125
- path = backup_and_download(current_version, remote_version)
126
- try:
127
- patch_and_restart(path)
128
- except:
129
- print('更新失败。')
130
- else:
131
- print('自动更新程序:已禁用')
132
- return
133
- else:
134
- return
135
- except:
136
- print('自动更新程序:已禁用')
137
-
138
- def warm_up_modules():
139
- print('正在执行一些模块的预热...')
140
- from request_llm.bridge_all import model_info
141
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
142
- enc.encode("模块预热", disallowed_special=())
143
- enc = model_info["gpt-4"]['tokenizer']
144
- enc.encode("模块预热", disallowed_special=())
145
-
146
- if __name__ == '__main__':
147
- import os
148
- os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
149
- from toolbox import get_conf
150
- proxies, = get_conf('proxies')
151
- check_proxy(proxies)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/module-447425fe.js DELETED
@@ -1,9 +0,0 @@
1
- import{c as ar,a as ir,g as cr}from"./module-a3cf0cc4.js";import{g as nn}from"./index-3370be2a.js";const xt=new Set,ur=ar({encode:({call:e})=>async(t,n)=>{const r=await e("encode",{encoderId:t,timeslice:n});return xt.delete(t),r},instantiate:({call:e})=>async(t,n)=>{const r=ir(xt),o=await e("instantiate",{encoderId:r,mimeType:t,sampleRate:n});return{encoderId:r,port:o}},register:({call:e})=>t=>e("register",{port:t},[t])}),lr=e=>{const t=new Worker(e);return ur(t)},dr=`(()=>{var e={775:function(e,t,r){!function(e,t,r,n){"use strict";function o(e){return e&&"object"==typeof e&&"default"in e?e:{default:e}}var a=o(t),s=o(r),i=o(n),c=function(e,t){return void 0===t?e:t.reduce((function(e,t){if("capitalize"===t){var r=e.charAt(0).toUpperCase(),n=e.slice(1);return"".concat(r).concat(n)}return"dashify"===t?s.default(e):"prependIndefiniteArticle"===t?"".concat(i.default(e)," ").concat(e):e}),e)},u=function(e){var t=e.name+e.modifiers.map((function(e){return"\\\\.".concat(e,"\\\\(\\\\)")})).join("");return new RegExp("\\\\$\\\\{".concat(t,"}"),"g")},l=function(e,t){for(var r=/\\\${([^.}]+)((\\.[^(]+\\(\\))*)}/g,n=[],o=r.exec(e);null!==o;){var s={modifiers:[],name:o[1]};if(void 0!==o[3])for(var i=/\\.[^(]+\\(\\)/g,l=i.exec(o[2]);null!==l;)s.modifiers.push(l[0].slice(1,-2)),l=i.exec(o[2]);n.push(s),o=r.exec(e)}var d=n.reduce((function(e,r){return e.map((function(e){return"string"==typeof e?e.split(u(r)).reduce((function(e,n,o){return 0===o?[n]:r.name in t?[].concat(a.default(e),[c(t[r.name],r.modifiers),n]):[].concat(a.default(e),[function(e){return c(e[r.name],r.modifiers)},n])}),[]):[e]})).reduce((function(e,t){return[].concat(a.default(e),a.default(t))}),[])}),[e]);return function(e){return d.reduce((function(t,r){return[].concat(a.default(t),"string"==typeof r?[r]:[r(e)])}),[]).join("")}},d=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=void 0===e.code?void 0:l(e.code,t),n=void 0===e.message?void 0:l(e.message,t);function o(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=arguments.length>1?arguments[1]:void 0,a=void 0===o&&(t instanceof Error||void 0!==t.code&&"Exception"===t.code.slice(-9))?{cause:t,missingParameters:{}}:{cause:o,missingParameters:t},s=a.cause,i=a.missingParameters,c=void 0===n?new Error:new Error(n(i));return null!==s&&(c.cause=s),void 0!==r&&(c.code=r(i)),void 0!==e.status&&(c.status=e.status),c}return o};e.compile=d,Object.defineProperty(e,"__esModule",{value:!0})}(t,r(106),r(881),r(507))},881:e=>{"use strict";e.exports=(e,t)=>{if("string"!=typeof e)throw new TypeError("expected a string");return e.trim().replace(/([a-z])([A-Z])/g,"$1-$2").replace(/\\W/g,(e=>/[À-ž]/.test(e)?e:"-")).replace(/^-+|-+$/g,"").replace(/-{2,}/g,(e=>t&&t.condense?"-":e)).toLowerCase()}},107:function(e,t){!function(e){"use strict";var t=function(e){return function(t){var r=e(t);return t.add(r),r}},r=function(e){return function(t,r){return e.set(t,r),r}},n=void 0===Number.MAX_SAFE_INTEGER?9007199254740991:Number.MAX_SAFE_INTEGER,o=536870912,a=2*o,s=function(e,t){return function(r){var s=t.get(r),i=void 0===s?r.size:s<a?s+1:0;if(!r.has(i))return e(r,i);if(r.size<o){for(;r.has(i);)i=Math.floor(Math.random()*a);return e(r,i)}if(r.size>n)throw new Error("Congratulations, you created a collection of unique numbers which uses all available integers!");for(;r.has(i);)i=Math.floor(Math.random()*n);return e(r,i)}},i=new WeakMap,c=r(i),u=s(c,i),l=t(u);e.addUniqueNumber=l,e.generateUniqueNumber=u,Object.defineProperty(e,"__esModule",{value:!0})}(t)},507:e=>{var t=function(e){var t,r,n=/\\w+/.exec(e);if(!n)return"an";var o=(r=n[0]).toLowerCase(),a=["honest","hour","hono"];for(t in a)if(0==o.indexOf(a[t]))return"an";if(1==o.length)return"aedhilmnorsx".indexOf(o)>=0?"an":"a";if(r.match(/(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]/))return"an";var s=[/^e[uw]/,/^onc?e\\b/,/^uni([^nmd]|mo)/,/^u[bcfhjkqrst][aeiou]/];for(t=0;t<s.length;t++)if(o.match(s[t]))return"a";return r.match(/^U[NK][AIEO]/)?"a":r==r.toUpperCase()?"aedhilmnorsx".indexOf(o[0])>=0?"an":"a":"aeiou".indexOf(o[0])>=0||o.match(/^y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)/)?"an":"a"};void 0!==e.exports?e.exports=t:window.indefiniteArticle=t},768:e=>{e.exports=function(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r<t;r++)n[r]=e[r];return n},e.exports.__esModule=!0,e.exports.default=e.exports},907:(e,t,r)=>{var n=r(768);e.exports=function(e){if(Array.isArray(e))return n(e)},e.exports.__esModule=!0,e.exports.default=e.exports},642:e=>{e.exports=function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)},e.exports.__esModule=!0,e.exports.default=e.exports},344:e=>{e.exports=function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")},e.exports.__esModule=!0,e.exports.default=e.exports},106:(e,t,r)=>{var n=r(907),o=r(642),a=r(906),s=r(344);e.exports=function(e){return n(e)||o(e)||a(e)||s()},e.exports.__esModule=!0,e.exports.default=e.exports},906:(e,t,r)=>{var n=r(768);e.exports=function(e,t){if(e){if("string"==typeof e)return n(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);return"Object"===r&&e.constructor&&(r=e.constructor.name),"Map"===r||"Set"===r?Array.from(e):"Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r)?n(e,t):void 0}},e.exports.__esModule=!0,e.exports.default=e.exports}},t={};function r(n){var o=t[n];if(void 0!==o)return o.exports;var a=t[n]={exports:{}};return e[n].call(a.exports,a,a.exports,r),a.exports}(()=>{"use strict";var e=r(775);const t=-32603,n=-32602,o=-32601,a=(0,e.compile)({message:'The requested method called "\${method}" is not supported.',status:o}),s=(0,e.compile)({message:'The handler of the method called "\${method}" returned no required result.',status:t}),i=(0,e.compile)({message:'The handler of the method called "\${method}" returned an unexpected result.',status:t}),c=(0,e.compile)({message:'The specified parameter called "portId" with the given value "\${portId}" does not identify a port connected to this worker.',status:n}),u=(e,t)=>async r=>{let{data:{id:n,method:o,params:c}}=r;const u=t[o];try{if(void 0===u)throw a({method:o});const t=void 0===c?u():u(c);if(void 0===t)throw s({method:o});const r=t instanceof Promise?await t:t;if(null===n){if(void 0!==r.result)throw i({method:o})}else{if(void 0===r.result)throw i({method:o});const{result:t,transferables:a=[]}=r;e.postMessage({id:n,result:t},a)}}catch(t){const{message:r,status:o=-32603}=t;e.postMessage({error:{code:o,message:r},id:n})}};var l=r(107);const d=new Map,f=(e,t,r)=>({...t,connect:r=>{let{port:n}=r;n.start();const o=e(n,t),a=(0,l.generateUniqueNumber)(d);return d.set(a,(()=>{o(),n.close(),d.delete(a)})),{result:a}},disconnect:e=>{let{portId:t}=e;const r=d.get(t);if(void 0===r)throw c({portId:t.toString()});return r(),{result:null}},isSupported:async()=>{if(await new Promise((e=>{const t=new ArrayBuffer(0),{port1:r,port2:n}=new MessageChannel;r.onmessage=t=>{let{data:r}=t;return e(null!==r)},n.postMessage(t,[t])}))){const e=r();return{result:e instanceof Promise?await e:e}}return{result:!1}}}),p=function(e,t){let r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:()=>!0;const n=f(p,t,r),o=u(e,n);return e.addEventListener("message",o),()=>e.removeEventListener("message",o)},m=e=>{e.onmessage=null,e.close()},h=new WeakMap,g=new WeakMap,v=(e=>{const t=(r=e,{...r,connect:e=>{let{call:t}=e;return async()=>{const{port1:e,port2:r}=new MessageChannel,n=await t("connect",{port:e},[e]);return h.set(r,n),r}},disconnect:e=>{let{call:t}=e;return async e=>{const r=h.get(e);if(void 0===r)throw new Error("The given port is not connected.");await t("disconnect",{portId:r})}},isSupported:e=>{let{call:t}=e;return()=>t("isSupported")}});var r;return e=>{const r=(e=>{if(g.has(e))return g.get(e);const t=new Map;return g.set(e,t),t})(e);e.addEventListener("message",(e=>{let{data:t}=e;const{id:n}=t;if(null!==n&&r.has(n)){const{reject:e,resolve:o}=r.get(n);r.delete(n),void 0===t.error?o(t.result):e(new Error(t.error.message))}})),(e=>"function"==typeof e.start)(e)&&e.start();const n=function(t){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null,o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return new Promise(((a,s)=>{const i=(0,l.generateUniqueNumber)(r);r.set(i,{reject:s,resolve:a}),null===n?e.postMessage({id:i,method:t},o):e.postMessage({id:i,method:t,params:n},o)}))},o=function(t,r){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];e.postMessage({id:null,method:t,params:r},n)};let a={};for(const[e,r]of Object.entries(t))a={...a,[e]:r({call:n,notify:o})};return{...a}}})({characterize:e=>{let{call:t}=e;return()=>t("characterize")},encode:e=>{let{call:t}=e;return(e,r)=>t("encode",{recordingId:e,timeslice:r})},record:e=>{let{call:t}=e;return async(e,r,n)=>{await t("record",{recordingId:e,sampleRate:r,typedArrays:n},n.map((e=>{let{buffer:t}=e;return t})))}}}),w=async(e,t)=>{const r=v(t),n=await r.characterize(),o=n.toString();if(e.has(o))throw new Error("There is already an encoder stored which handles exactly the same mime types.");return e.set(o,[n,r]),n},x=new Map,y=(e=>t=>{const r=e.get(t);if(void 0===r)throw new Error("There was no instance of an encoder stored with the given id.");return r})(x),M=((e,t)=>r=>{const n=t(r);return e.delete(r),n})(x,y),b=new Map,E=((e,t)=>r=>{const[n,o,a,s]=t(r);return a?new Promise((t=>{o.onmessage=a=>{let{data:i}=a;0===i.length?(e(o),t(n.encode(r,null))):n.record(r,s,i)}})):n.encode(r,null)})(m,M),A=(e=>t=>{for(const[r,n]of Array.from(e.values()))if(r.test(t))return n;throw new Error("There is no encoder registered which could handle the given mimeType.")})(b),_=((e,t,r)=>(n,o,a)=>{if(t.has(n))throw new Error('There is already an encoder registered with an id called "'.concat(n,'".'));const s=r(o),{port1:i,port2:c}=new MessageChannel,u=[s,i,!0,a];return t.set(n,u),i.onmessage=t=>{let{data:r}=t;0===r.length?(e(i),u[2]=!1):s.record(n,a,r)},c})(m,x,A),I=(e=>(t,r)=>{const[n]=e(t);return n.encode(t,r)})(y);p(self,{encode:async e=>{let{encoderId:t,timeslice:r}=e;const n=null===r?await E(t):await I(t,r);return{result:n,transferables:n}},instantiate:e=>{let{encoderId:t,mimeType:r,sampleRate:n}=e;const o=_(t,r,n);return{result:o,transferables:[o]}},register:async e=>{let{port:t}=e;return{result:await w(b,t)}}})})()})();`,fr=new Blob([dr],{type:"application/javascript; charset=utf-8"}),rn=URL.createObjectURL(fr),vt=lr(rn),Ue=vt.encode,on=vt.instantiate,hr=vt.register;URL.revokeObjectURL(rn);const pr=e=>(t,n)=>{if(e===null)throw new Error("A native BlobEvent could not be created.");return new e(t,n)},mr=(e,t)=>(n,r,o)=>{const s=[];let a=r,c=0;for(;c<n.byteLength;)if(a===null){const i=t(n,c);if(i===null)break;const{length:u,type:d}=i;a=d,c+=u}else{const i=e(n,c,a,o);if(i===null)break;const{content:u,length:d}=i;a=null,c+=d,u!==null&&s.push(u)}return{contents:s,currentElementType:a,offset:c}},gr=(e,t)=>class{constructor(r=null){this._listeners=new WeakMap,this._nativeEventTarget=r===null?e():r}addEventListener(r,o,s){if(o!==null){let a=this._listeners.get(o);a===void 0&&(a=t(this,o),typeof o=="function"&&this._listeners.set(o,a)),this._nativeEventTarget.addEventListener(r,a,s)}}dispatchEvent(r){return this._nativeEventTarget.dispatchEvent(r)}removeEventListener(r,o,s){const a=o===null?void 0:this._listeners.get(o);this._nativeEventTarget.removeEventListener(r,a===void 0?null:a,s)}},wr=e=>()=>{if(e===null)throw new Error("A native EventTarget could not be created.");return e.document.createElement("p")},_t=(e="")=>{try{return new DOMException(e,"InvalidModificationError")}catch(t){return t.code=13,t.message=e,t.name="InvalidModificationError",t}},vr=()=>{try{return new DOMException("","InvalidStateError")}catch(e){return e.code=11,e.name="InvalidStateError",e}},_r=e=>e!==null&&e.BlobEvent!==void 0&&e.MediaStream!==void 0&&(e.MediaRecorder===void 0||e.MediaRecorder.isTypeSupported!==void 0)?new Promise(t=>{if(e.MediaRecorder===void 0)return t(!0);const n=e.document.createElement("canvas");if(n.getContext("2d"),typeof n.captureStream!="function")return t(!1);const r=n.captureStream(),o="audio/webm";try{const s=new e.MediaRecorder(r,{mimeType:o});s.addEventListener("dataavailable",({data:a})=>t(a.type===o)),s.start(),setTimeout(()=>s.stop(),10)}catch(s){t(s.name==="NotSupportedError")}}):Promise.resolve(!1),yr=(e,t,n,r,o,s,a)=>class extends s{constructor(i,u={}){const{mimeType:d}=u;if(a!==null&&(d===void 0||a.isTypeSupported!==void 0&&a.isTypeSupported(d))){const l=e(a,i,u);super(l),this._internalMediaRecorder=l}else if(d!==void 0&&o.some(l=>l.test(d)))super(),a!==null&&a.isTypeSupported!==void 0&&a.isTypeSupported("audio/webm;codecs=pcm")?this._internalMediaRecorder=r(this,a,i,d):this._internalMediaRecorder=n(this,i,d);else throw a!==null&&e(a,i,u),t();this._ondataavailable=null,this._onerror=null,this._onpause=null,this._onresume=null,this._onstart=null,this._onstop=null}get mimeType(){return this._internalMediaRecorder.mimeType}get ondataavailable(){return this._ondataavailable===null?this._ondataavailable:this._ondataavailable[0]}set ondataavailable(i){if(this._ondataavailable!==null&&this.removeEventListener("dataavailable",this._ondataavailable[1]),typeof i=="function"){const u=i.bind(this);this.addEventListener("dataavailable",u),this._ondataavailable=[i,u]}else this._ondataavailable=null}get onerror(){return this._onerror===null?this._onerror:this._onerror[0]}set onerror(i){if(this._onerror!==null&&this.removeEventListener("error",this._onerror[1]),typeof i=="function"){const u=i.bind(this);this.addEventListener("error",u),this._onerror=[i,u]}else this._onerror=null}get onpause(){return this._onpause===null?this._onpause:this._onpause[0]}set onpause(i){if(this._onpause!==null&&this.removeEventListener("pause",this._onpause[1]),typeof i=="function"){const u=i.bind(this);this.addEventListener("pause",u),this._onpause=[i,u]}else this._onpause=null}get onresume(){return this._onresume===null?this._onresume:this._onresume[0]}set onresume(i){if(this._onresume!==null&&this.removeEventListener("resume",this._onresume[1]),typeof i=="function"){const u=i.bind(this);this.addEventListener("resume",u),this._onresume=[i,u]}else this._onresume=null}get onstart(){return this._onstart===null?this._onstart:this._onstart[0]}set onstart(i){if(this._onstart!==null&&this.removeEventListener("start",this._onstart[1]),typeof i=="function"){const u=i.bind(this);this.addEventListener("start",u),this._onstart=[i,u]}else this._onstart=null}get onstop(){return this._onstop===null?this._onstop:this._onstop[0]}set onstop(i){if(this._onstop!==null&&this.removeEventListener("stop",this._onstop[1]),typeof i=="function"){const u=i.bind(this);this.addEventListener("stop",u),this._onstop=[i,u]}else this._onstop=null}get state(){return this._internalMediaRecorder.state}pause(){return this._internalMediaRecorder.pause()}resume(){return this._internalMediaRecorder.resume()}start(i){return this._internalMediaRecorder.start(i)}stop(){return this._internalMediaRecorder.stop()}static isTypeSupported(i){return a!==null&&a.isTypeSupported!==void 0&&a.isTypeSupported(i)||o.some(u=>u.test(i))}},Er=e=>e!==null&&e.BlobEvent!==void 0?e.BlobEvent:null,Ar=(e,t)=>(n,r,o)=>{const s=[],a=new WeakMap,c=new WeakMap,i=new n(r,o),u=new WeakMap;let d=!0;return i.addEventListener=(l=>(h,m,w)=>{let f=m;return typeof m=="function"&&(h==="dataavailable"?(f=p=>{setTimeout(()=>{if(d&&i.state==="inactive")s.push(p.data);else{if(s.length>0){const g=p.data;Object.defineProperty(p,"data",{value:new Blob([...s,g],{type:g.type})}),s.length=0}m.call(i,p)}})},a.set(m,f)):h==="error"?(f=p=>{if(p.error===void 0)m.call(i,new ErrorEvent("error",{error:e()}));else if(p.error.name==="UnknownError"){const g=p.error.message;m.call(i,new ErrorEvent("error",{error:e(g)}))}else p instanceof ErrorEvent?m.call(i,p):m.call(i,new ErrorEvent("error",{error:p.error}))},c.set(m,f)):h==="stop"&&(f=p=>{d=!1,setTimeout(()=>m.call(i,p))},u.set(m,f))),l.call(i,h,f,w)})(i.addEventListener),i.dispatchEvent=(l=>h=>{let m;setTimeout(()=>{m=d,d=!1});const w=l.call(i,h);return setTimeout(()=>d=m),w})(i.dispatchEvent),i.removeEventListener=(l=>(h,m,w)=>{let f=m;if(typeof m=="function"){if(h==="dataavailable"){const p=a.get(m);p!==void 0&&(f=p)}else if(h==="error"){const p=c.get(m);p!==void 0&&(f=p)}else if(h==="stop"){const p=u.get(m);p!==void 0&&(f=p)}}return l.call(i,h,f,w)})(i.removeEventListener),i.start=(l=>h=>{if(o.mimeType!==void 0&&o.mimeType.startsWith("audio/")&&r.getVideoTracks().length>0)throw t();return d=h!==void 0,h===void 0?l.call(i):l.call(i,h)})(i.start),i},br=e=>e===null||e.MediaRecorder===void 0?null:e.MediaRecorder,$e=()=>{try{return new DOMException("","NotSupportedError")}catch(e){return e.code=9,e.name="NotSupportedError",e}},Cr=e=>(t,n,r,o=2)=>{const s=e(t,n);if(s===null)return s;const{length:a,value:c}=s;if(r==="master")return{content:null,length:a};if(n+a+c>t.byteLength)return null;if(r==="binary"){const i=(c/Float32Array.BYTES_PER_ELEMENT-1)/o,u=Array.from({length:o},()=>new Float32Array(i));for(let d=0;d<i;d+=1){const l=d*o+1;for(let h=0;h<o;h+=1)u[h][d]=t.getFloat32(n+a+(l+h)*Float32Array.BYTES_PER_ELEMENT,!0)}return{content:u,length:a+c}}return{content:null,length:a+c}},Tr=e=>(t,n)=>{const r=e(t,n);if(r===null)return r;const{length:o,value:s}=r;return s===35?{length:o,type:"binary"}:s===46||s===97||s===88713574||s===106212971||s===139690087||s===172351395||s===256095861?{length:o,type:"master"}:{length:o,type:"unknown"}},Nr=e=>(t,n)=>{const r=e(t,n);if(r===null)return r;const o=n+Math.floor((r-1)/8);if(o+r>t.byteLength)return null;let a=t.getUint8(o)&(1<<8-r%8)-1;for(let c=1;c<r;c+=1)a=(a<<8)+t.getUint8(o+c);return{length:r,value:a}},Ut=Symbol.observable||"@@observable";function Mr(e){return Symbol.observable||(typeof e=="function"&&e.prototype&&e.prototype[Symbol.observable]?(e.prototype[Ut]=e.prototype[Symbol.observable],delete e.prototype[Symbol.observable]):(e[Ut]=e[Symbol.observable],delete e[Symbol.observable])),e}const ke=()=>{},Bt=e=>{throw e};function Or(e){return e?e.next&&e.error&&e.complete?e:{complete:(e.complete??ke).bind(e),error:(e.error??Bt).bind(e),next:(e.next??ke).bind(e)}:{complete:ke,error:Bt,next:ke}}const Sr=e=>(t,n,r)=>e(o=>{const s=a=>o.next(a);return t.addEventListener(n,s,r),()=>t.removeEventListener(n,s,r)}),Rr=(e,t)=>{const n=()=>{},r=o=>typeof o[0]=="function";return o=>{const s=(...a)=>{const c=o(r(a)?t({next:a[0]}):t(...a));return c!==void 0?c:n};return s[Symbol.observable]=()=>({subscribe:(...a)=>({unsubscribe:s(...a)})}),e(s)}},Ir=Rr(Mr,Or),sn=Sr(Ir);/*!
2
- * dashify <https://github.com/jonschlinkert/dashify>
3
- *
4
- * Copyright (c) 2015-2017, Jon Schlinkert.
5
- * Released under the MIT License.
6
- */var kr=(e,t)=>{if(typeof e!="string")throw new TypeError("expected a string");return e.trim().replace(/([a-z])([A-Z])/g,"$1-$2").replace(/\W/g,n=>/[À-ž]/.test(n)?n:"-").replace(/^-+|-+$/g,"").replace(/-{2,}/g,n=>t&&t.condense?"-":n).toLowerCase()};const Lr=nn(kr);var an={exports:{}};(function(e){var t=function(n){var r,o,s=/\w+/.exec(n);if(s)o=s[0];else return"an";var a=o.toLowerCase(),c=["honest","hour","hono"];for(r in c)if(a.indexOf(c[r])==0)return"an";if(a.length==1)return"aedhilmnorsx".indexOf(a)>=0?"an":"a";if(o.match(/(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]/))return"an";var i=[/^e[uw]/,/^onc?e\b/,/^uni([^nmd]|mo)/,/^u[bcfhjkqrst][aeiou]/];for(r=0;r<i.length;r++)if(a.match(i[r]))return"a";return o.match(/^U[NK][AIEO]/)?"a":o==o.toUpperCase()?"aedhilmnorsx".indexOf(a[0])>=0?"an":"a":"aeiou".indexOf(a[0])>=0||a.match(/^y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)/)?"an":"a"};e.exports=t})(an);var Pr=an.exports;const xr=nn(Pr),Dt=(e,t)=>t===void 0?e:t.reduce((n,r)=>{if(r==="capitalize"){const o=n.charAt(0).toUpperCase(),s=n.slice(1);return`${o}${s}`}return r==="dashify"?Lr(n):r==="prependIndefiniteArticle"?`${xr(n)} ${n}`:n},e),Ur=e=>{const t=e.name+e.modifiers.map(n=>`\\.${n}\\(\\)`).join("");return new RegExp(`\\$\\{${t}}`,"g")},Wt=(e,t)=>{const n=/\${([^.}]+)((\.[^(]+\(\))*)}/g,r=[];let o=n.exec(e);for(;o!==null;){const a={modifiers:[],name:o[1]};if(o[3]!==void 0){const c=/\.[^(]+\(\)/g;let i=c.exec(o[2]);for(;i!==null;)a.modifiers.push(i[0].slice(1,-2)),i=c.exec(o[2])}r.push(a),o=n.exec(e)}const s=r.reduce((a,c)=>a.map(i=>typeof i=="string"?i.split(Ur(c)).reduce((u,d,l)=>l===0?[d]:c.name in t?[...u,Dt(t[c.name],c.modifiers),d]:[...u,h=>Dt(h[c.name],c.modifiers),d],[]):[i]).reduce((i,u)=>[...i,...u],[]),[e]);return a=>s.reduce((c,i)=>typeof i=="string"?[...c,i]:[...c,i(a)],[]).join("")},Ge=(e,t={})=>{const n=e.code===void 0?void 0:Wt(e.code,t),r=e.message===void 0?void 0:Wt(e.message,t);function o(s={},a){const c=a===void 0&&(s instanceof Error||s.code!==void 0&&s.code.slice(-9)==="Exception"),{cause:i,missingParameters:u}=c?{cause:s,missingParameters:{}}:{cause:a,missingParameters:s},d=r===void 0?new Error:new Error(r(u));return i!==null&&(d.cause=i),n!==void 0&&(d.code=n(u)),e.status!==void 0&&(d.status=e.status),d}return o},ze={INTERNAL_ERROR:-32603,INVALID_PARAMS:-32602,METHOD_NOT_FOUND:-32601};Ge({message:'The requested method called "${method}" is not supported.',status:ze.METHOD_NOT_FOUND});Ge({message:'The handler of the method called "${method}" returned no required result.',status:ze.INTERNAL_ERROR});Ge({message:'The handler of the method called "${method}" returned an unexpected result.',status:ze.INTERNAL_ERROR});Ge({message:'The specified parameter called "portId" with the given value "${portId}" does not identify a port connected to this worker.',status:ze.INVALID_PARAMS});const Br=(e,t,n)=>async r=>{const o=new e([n],{type:"application/javascript; charset=utf-8"}),s=t.createObjectURL(o);try{await r(s)}finally{t.revokeObjectURL(s)}},Dr=e=>({data:t})=>{const{id:n}=t;if(n!==null){const r=e.get(n);if(r!==void 0){const{reject:o,resolve:s}=r;e.delete(n),t.error===void 0?s(t.result):o(new Error(t.error.message))}}},Wr=e=>(t,n)=>(r,o=[])=>new Promise((s,a)=>{const c=e(t);t.set(c,{reject:a,resolve:s}),n.postMessage({id:c,...r},o)}),Vr=(e,t,n,r)=>(o,s,a={})=>{const c=new o(s,"recorder-audio-worklet-processor",{...a,channelCountMode:"explicit",numberOfInputs:1,numberOfOutputs:0}),i=new Map,u=t(i,c.port),d=n(c.port,"message")(e(i));c.port.start();let l="inactive";return Object.defineProperties(c,{pause:{get(){return async()=>(r(["recording"],l),l="paused",u({method:"pause"}))}},port:{get(){throw new Error("The port of a RecorderAudioWorkletNode can't be accessed.")}},record:{get(){return async h=>(r(["inactive"],l),l="recording",u({method:"record",params:{encoderPort:h}},[h]))}},resume:{get(){return async()=>(r(["paused"],l),l="recording",u({method:"resume"}))}},stop:{get(){return async()=>{r(["paused","recording"],l),l="stopped";try{await u({method:"stop"})}finally{d()}}}}}),c},Fr=(e,t)=>{if(!e.includes(t))throw new Error(`Expected the state to be ${e.map(n=>`"${n}"`).join(" or ")} but it was "${t}".`)},jr='(()=>{"use strict";class e extends AudioWorkletProcessor{constructor(){super(),this._encoderPort=null,this._state="inactive",this.port.onmessage=e=>{let{data:t}=e;"pause"===t.method?"active"===this._state||"recording"===this._state?(this._state="paused",this._sendAcknowledgement(t.id)):this._sendUnexpectedStateError(t.id):"record"===t.method?"inactive"===this._state?(this._encoderPort=t.params.encoderPort,this._state="active",this._sendAcknowledgement(t.id)):this._sendUnexpectedStateError(t.id):"resume"===t.method?"paused"===this._state?(this._state="active",this._sendAcknowledgement(t.id)):this._sendUnexpectedStateError(t.id):"stop"===t.method?"active"!==this._state&&"paused"!==this._state&&"recording"!==this._state||null===this._encoderPort?this._sendUnexpectedStateError(t.id):(this._stop(this._encoderPort),this._sendAcknowledgement(t.id)):"number"==typeof t.id&&this.port.postMessage({error:{code:-32601,message:"The requested method is not supported."},id:t.id})}}process(e){let[t]=e;if("inactive"===this._state||"paused"===this._state)return!0;if("active"===this._state){if(void 0===t)throw new Error("No channelData was received for the first input.");if(0===t.length)return!0;this._state="recording"}if("recording"===this._state&&null!==this._encoderPort){if(void 0===t)throw new Error("No channelData was received for the first input.");if(0!==t.length)return this._encoderPort.postMessage(t,t.map((e=>{let{buffer:t}=e;return t}))),!0;this._stop(this._encoderPort)}return!1}_sendAcknowledgement(e){this.port.postMessage({id:e,result:null})}_sendUnexpectedStateError(e){this.port.postMessage({error:{code:-32603,message:"The internal state does not allow to process the given message."},id:e})}_stop(e){e.postMessage([]),e.close(),this._encoderPort=null,this._state="stopped"}}e.parameterDescriptors=[],registerProcessor("recorder-audio-worklet-processor",e)})();',$r=Br(Blob,URL,jr),Gr=Vr(Dr,Wr(cr),sn,Fr),Vt=(e,t,n)=>({endTime:t,insertTime:n,type:"exponentialRampToValue",value:e}),Ft=(e,t,n)=>({endTime:t,insertTime:n,type:"linearRampToValue",value:e}),at=(e,t)=>({startTime:t,type:"setValue",value:e}),cn=(e,t,n)=>({duration:n,startTime:t,type:"setValueCurve",values:e}),un=(e,t,{startTime:n,target:r,timeConstant:o})=>r+(t-r)*Math.exp((n-e)/o),ge=e=>e.type==="exponentialRampToValue",Be=e=>e.type==="linearRampToValue",oe=e=>ge(e)||Be(e),yt=e=>e.type==="setValue",te=e=>e.type==="setValueCurve",De=(e,t,n,r)=>{const o=e[t];return o===void 0?r:oe(o)||yt(o)?o.value:te(o)?o.values[o.values.length-1]:un(n,De(e,t-1,o.startTime,r),o)},jt=(e,t,n,r,o)=>n===void 0?[r.insertTime,o]:oe(n)?[n.endTime,n.value]:yt(n)?[n.startTime,n.value]:te(n)?[n.startTime+n.duration,n.values[n.values.length-1]]:[n.startTime,De(e,t-1,n.startTime,o)],it=e=>e.type==="cancelAndHold",ct=e=>e.type==="cancelScheduledValues",re=e=>it(e)||ct(e)?e.cancelTime:ge(e)||Be(e)?e.endTime:e.startTime,$t=(e,t,n,{endTime:r,value:o})=>n===o?o:0<n&&0<o||n<0&&o<0?n*(o/n)**((e-t)/(r-t)):0,Gt=(e,t,n,{endTime:r,value:o})=>n+(e-t)/(r-t)*(o-n),zr=(e,t)=>{const n=Math.floor(t),r=Math.ceil(t);return n===r?e[n]:(1-(t-n))*e[n]+(1-(r-t))*e[r]},qr=(e,{duration:t,startTime:n,values:r})=>{const o=(e-n)/t*(r.length-1);return zr(r,o)},Le=e=>e.type==="setTarget";class Hr{constructor(t){this._automationEvents=[],this._currenTime=0,this._defaultValue=t}[Symbol.iterator](){return this._automationEvents[Symbol.iterator]()}add(t){const n=re(t);if(it(t)||ct(t)){const r=this._automationEvents.findIndex(s=>ct(t)&&te(s)?s.startTime+s.duration>=n:re(s)>=n),o=this._automationEvents[r];if(r!==-1&&(this._automationEvents=this._automationEvents.slice(0,r)),it(t)){const s=this._automationEvents[this._automationEvents.length-1];if(o!==void 0&&oe(o)){if(Le(s))throw new Error("The internal list is malformed.");const a=te(s)?s.startTime+s.duration:re(s),c=te(s)?s.values[s.values.length-1]:s.value,i=ge(o)?$t(n,a,c,o):Gt(n,a,c,o),u=ge(o)?Vt(i,n,this._currenTime):Ft(i,n,this._currenTime);this._automationEvents.push(u)}s!==void 0&&Le(s)&&this._automationEvents.push(at(this.getValue(n),n)),s!==void 0&&te(s)&&s.startTime+s.duration>n&&(this._automationEvents[this._automationEvents.length-1]=cn(new Float32Array([6,7]),s.startTime,n-s.startTime))}}else{const r=this._automationEvents.findIndex(a=>re(a)>n),o=r===-1?this._automationEvents[this._automationEvents.length-1]:this._automationEvents[r-1];if(o!==void 0&&te(o)&&re(o)+o.duration>n)return!1;const s=ge(t)?Vt(t.value,t.endTime,this._currenTime):Be(t)?Ft(t.value,n,this._currenTime):t;if(r===-1)this._automationEvents.push(s);else{if(te(t)&&n+t.duration>re(this._automationEvents[r]))return!1;this._automationEvents.splice(r,0,s)}}return!0}flush(t){const n=this._automationEvents.findIndex(r=>re(r)>t);if(n>1){const r=this._automationEvents.slice(n-1),o=r[0];Le(o)&&r.unshift(at(De(this._automationEvents,n-2,o.startTime,this._defaultValue),o.startTime)),this._automationEvents=r}}getValue(t){if(this._automationEvents.length===0)return this._defaultValue;const n=this._automationEvents.findIndex(a=>re(a)>t),r=this._automationEvents[n],o=(n===-1?this._automationEvents.length:n)-1,s=this._automationEvents[o];if(s!==void 0&&Le(s)&&(r===void 0||!oe(r)||r.insertTime>t))return un(t,De(this._automationEvents,o-1,s.startTime,this._defaultValue),s);if(s!==void 0&&yt(s)&&(r===void 0||!oe(r)))return s.value;if(s!==void 0&&te(s)&&(r===void 0||!oe(r)||s.startTime+s.duration>t))return t<s.startTime+s.duration?qr(t,s):s.values[s.values.length-1];if(s!==void 0&&oe(s)&&(r===void 0||!oe(r)))return s.value;if(r!==void 0&&ge(r)){const[a,c]=jt(this._automationEvents,o,s,r,this._defaultValue);return $t(t,a,c,r)}if(r!==void 0&&Be(r)){const[a,c]=jt(this._automationEvents,o,s,r,this._defaultValue);return Gt(t,a,c,r)}return this._defaultValue}}const Yr=e=>({cancelTime:e,type:"cancelAndHold"}),Xr=e=>({cancelTime:e,type:"cancelScheduledValues"}),Zr=(e,t)=>({endTime:t,type:"exponentialRampToValue",value:e}),Kr=(e,t)=>({endTime:t,type:"linearRampToValue",value:e}),Jr=(e,t,n)=>({startTime:t,target:e,timeConstant:n,type:"setTarget"}),Qr=()=>new DOMException("","AbortError"),eo=e=>(t,n,[r,o,s],a)=>{e(t[o],[n,r,s],c=>c[0]===n&&c[1]===r,a)},to=e=>(t,n,r)=>{const o=[];for(let s=0;s<r.numberOfInputs;s+=1)o.push(new Set);e.set(t,{activeInputs:o,outputs:new Set,passiveInputs:new WeakMap,renderer:n})},no=e=>(t,n)=>{e.set(t,{activeInputs:new Set,passiveInputs:new WeakMap,renderer:n})},we=new WeakSet,ln=new WeakMap,dn=new WeakMap,fn=new WeakMap,hn=new WeakMap,pn=new WeakMap,mn=new WeakMap,ut=new WeakMap,lt=new WeakMap,dt=new WeakMap,gn={construct(){return gn}},ro=e=>{try{const t=new Proxy(e,gn);new t}catch{return!1}return!0},zt=/^import(?:(?:[\s]+[\w]+|(?:[\s]+[\w]+[\s]*,)?[\s]*\{[\s]*[\w]+(?:[\s]+as[\s]+[\w]+)?(?:[\s]*,[\s]*[\w]+(?:[\s]+as[\s]+[\w]+)?)*[\s]*}|(?:[\s]+[\w]+[\s]*,)?[\s]*\*[\s]+as[\s]+[\w]+)[\s]+from)?(?:[\s]*)("([^"\\]|\\.)+"|'([^'\\]|\\.)+')(?:[\s]*);?/,qt=(e,t)=>{const n=[];let r=e.replace(/^[\s]+/,""),o=r.match(zt);for(;o!==null;){const s=o[1].slice(1,-1),a=o[0].replace(/([\s]+)?;?$/,"").replace(s,new URL(s,t).toString());n.push(a),r=r.slice(o[0].length).replace(/^[\s]+/,""),o=r.match(zt)}return[n.join(";"),r]},Ht=e=>{if(e!==void 0&&!Array.isArray(e))throw new TypeError("The parameterDescriptors property of given value for processorCtor is not an array.")},Yt=e=>{if(!ro(e))throw new TypeError("The given value for processorCtor should be a constructor.");if(e.prototype===null||typeof e.prototype!="object")throw new TypeError("The given value for processorCtor should have a prototype.")},oo=(e,t,n,r,o,s,a,c,i,u,d,l,h)=>{let m=0;return(w,f,p={credentials:"omit"})=>{const g=d.get(w);if(g!==void 0&&g.has(f))return Promise.resolve();const v=u.get(w);if(v!==void 0){const _=v.get(f);if(_!==void 0)return _}const A=s(w),T=A.audioWorklet===void 0?o(f).then(([_,E])=>{const[y,C]=qt(_,E),M=`${y};((a,b)=>{(a[b]=a[b]||[]).push((AudioWorkletProcessor,global,registerProcessor,sampleRate,self,window)=>{${C}
7
- })})(window,'_AWGS')`;return n(M)}).then(()=>{const _=h._AWGS.pop();if(_===void 0)throw new SyntaxError;r(A.currentTime,A.sampleRate,()=>_(class{},void 0,(E,y)=>{if(E.trim()==="")throw t();const C=lt.get(A);if(C!==void 0){if(C.has(E))throw t();Yt(y),Ht(y.parameterDescriptors),C.set(E,y)}else Yt(y),Ht(y.parameterDescriptors),lt.set(A,new Map([[E,y]]))},A.sampleRate,void 0,void 0))}):Promise.all([o(f),Promise.resolve(e(l,l))]).then(([[_,E],y])=>{const C=m+1;m=C;const[M,I]=qt(_,E),B=`${M};((AudioWorkletProcessor,registerProcessor)=>{${I}
8
- })(${y?"AudioWorkletProcessor":"class extends AudioWorkletProcessor {__b=new WeakSet();constructor(){super();(p=>p.postMessage=(q=>(m,t)=>q.call(p,m,t?t.filter(u=>!this.__b.has(u)):t))(p.postMessage))(this.port)}}"},(n,p)=>registerProcessor(n,class extends p{${y?"":"__c = (a) => a.forEach(e=>this.__b.add(e.buffer));"}process(i,o,p){${y?"":"i.forEach(this.__c);o.forEach(this.__c);this.__c(Object.values(p));"}return super.process(i.map(j=>j.some(k=>k.length===0)?[]:j),o,p)}}));registerProcessor('__sac${C}',class extends AudioWorkletProcessor{process(){return !1}})`,U=new Blob([B],{type:"application/javascript; charset=utf-8"}),R=URL.createObjectURL(U);return A.audioWorklet.addModule(R,p).then(()=>{if(c(A))return A;const x=a(A);return x.audioWorklet.addModule(R,p).then(()=>x)}).then(x=>{if(i===null)throw new SyntaxError;try{new i(x,`__sac${C}`)}catch{throw new SyntaxError}}).finally(()=>URL.revokeObjectURL(R))});return v===void 0?u.set(w,new Map([[f,T]])):v.set(f,T),T.then(()=>{const _=d.get(w);_===void 0?d.set(w,new Set([f])):_.add(f)}).finally(()=>{const _=u.get(w);_!==void 0&&_.delete(f)}),T}},K=(e,t)=>{const n=e.get(t);if(n===void 0)throw new Error("A value with the given key could not be found.");return n},qe=(e,t)=>{const n=Array.from(e).filter(t);if(n.length>1)throw Error("More than one element was found.");if(n.length===0)throw Error("No element was found.");const[r]=n;return e.delete(r),r},wn=(e,t,n,r)=>{const o=K(e,t),s=qe(o,a=>a[0]===n&&a[1]===r);return o.size===0&&e.delete(t),s},be=e=>K(mn,e),ye=e=>{if(we.has(e))throw new Error("The AudioNode is already stored.");we.add(e),be(e).forEach(t=>t(!0))},vn=e=>"port"in e,He=e=>{if(!we.has(e))throw new Error("The AudioNode is not stored.");we.delete(e),be(e).forEach(t=>t(!1))},ft=(e,t)=>{!vn(e)&&t.every(n=>n.size===0)&&He(e)},so=(e,t,n,r,o,s,a,c,i,u,d,l,h)=>{const m=new WeakMap;return(w,f,p,g,v)=>{const{activeInputs:A,passiveInputs:T}=s(f),{outputs:_}=s(w),E=c(w),y=C=>{const M=i(f),I=i(w);if(C){const N=wn(T,w,p,g);e(A,w,N,!1),!v&&!l(w)&&n(I,M,p,g),h(f)&&ye(f)}else{const N=r(A,w,p,g);t(T,g,N,!1),!v&&!l(w)&&o(I,M,p,g);const P=a(f);if(P===0)d(f)&&ft(f,A);else{const k=m.get(f);k!==void 0&&clearTimeout(k),m.set(f,setTimeout(()=>{d(f)&&ft(f,A)},P*1e3))}}};return u(_,[f,p,g],C=>C[0]===f&&C[1]===p&&C[2]===g,!0)?(E.add(y),d(w)?e(A,w,[p,g,y],!0):t(T,g,[w,p,y],!0),!0):!1}},ao=e=>(t,n,[r,o,s],a)=>{const c=t.get(r);c===void 0?t.set(r,new Set([[o,n,s]])):e(c,[o,n,s],i=>i[0]===o&&i[1]===n,a)},io=e=>(t,n)=>{const r=e(t,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});n.connect(r).connect(t.destination);const o=()=>{n.removeEventListener("ended",o),n.disconnect(r),r.disconnect()};n.addEventListener("ended",o)},co=e=>(t,n)=>{e(t).add(n)},Et=(e,t)=>e.context===t,ht=e=>{try{e.copyToChannel(new Float32Array(1),0,-1)}catch{return!1}return!0},ie=()=>new DOMException("","IndexSizeError"),_n=e=>{e.getChannelData=(t=>n=>{try{return t.call(e,n)}catch(r){throw r.code===12?ie():r}})(e.getChannelData)},uo={numberOfChannels:1},lo=(e,t,n,r,o,s,a,c)=>{let i=null;return class yn{constructor(d){if(o===null)throw new Error("Missing the native OfflineAudioContext constructor.");const{length:l,numberOfChannels:h,sampleRate:m}={...uo,...d};i===null&&(i=new o(1,1,44100));const w=r!==null&&t(s,s)?new r({length:l,numberOfChannels:h,sampleRate:m}):i.createBuffer(h,l,m);if(w.numberOfChannels===0)throw n();return typeof w.copyFromChannel!="function"?(a(w),_n(w)):t(ht,()=>ht(w))||c(w),e.add(w),w}static[Symbol.hasInstance](d){return d!==null&&typeof d=="object"&&Object.getPrototypeOf(d)===yn.prototype||e.has(d)}}},Ce=-34028234663852886e22,Ye=-Ce,se=e=>we.has(e),fo={buffer:null,channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",loop:!1,loopEnd:0,loopStart:0,playbackRate:1},ho=(e,t,n,r,o,s,a,c)=>class extends e{constructor(u,d){const l=s(u),h={...fo,...d},m=o(l,h),w=a(l),f=w?t():null;super(u,!1,m,f),this._audioBufferSourceNodeRenderer=f,this._isBufferNullified=!1,this._isBufferSet=h.buffer!==null,this._nativeAudioBufferSourceNode=m,this._onended=null,this._playbackRate=n(this,w,m.playbackRate,Ye,Ce)}get buffer(){return this._isBufferNullified?null:this._nativeAudioBufferSourceNode.buffer}set buffer(u){if(this._nativeAudioBufferSourceNode.buffer=u,u!==null){if(this._isBufferSet)throw r();this._isBufferSet=!0}}get loop(){return this._nativeAudioBufferSourceNode.loop}set loop(u){this._nativeAudioBufferSourceNode.loop=u}get loopEnd(){return this._nativeAudioBufferSourceNode.loopEnd}set loopEnd(u){this._nativeAudioBufferSourceNode.loopEnd=u}get loopStart(){return this._nativeAudioBufferSourceNode.loopStart}set loopStart(u){this._nativeAudioBufferSourceNode.loopStart=u}get onended(){return this._onended}set onended(u){const d=typeof u=="function"?c(this,u):null;this._nativeAudioBufferSourceNode.onended=d;const l=this._nativeAudioBufferSourceNode.onended;this._onended=l!==null&&l===d?u:l}get playbackRate(){return this._playbackRate}start(u=0,d=0,l){if(this._nativeAudioBufferSourceNode.start(u,d,l),this._audioBufferSourceNodeRenderer!==null&&(this._audioBufferSourceNodeRenderer.start=l===void 0?[u,d]:[u,d,l]),this.context.state!=="closed"){ye(this);const h=()=>{this._nativeAudioBufferSourceNode.removeEventListener("ended",h),se(this)&&He(this)};this._nativeAudioBufferSourceNode.addEventListener("ended",h)}}stop(u=0){this._nativeAudioBufferSourceNode.stop(u),this._audioBufferSourceNodeRenderer!==null&&(this._audioBufferSourceNodeRenderer.stop=u)}},po=(e,t,n,r,o)=>()=>{const s=new WeakMap;let a=null,c=null;const i=async(u,d)=>{let l=n(u);const h=Et(l,d);if(!h){const m={buffer:l.buffer,channelCount:l.channelCount,channelCountMode:l.channelCountMode,channelInterpretation:l.channelInterpretation,loop:l.loop,loopEnd:l.loopEnd,loopStart:l.loopStart,playbackRate:l.playbackRate.value};l=t(d,m),a!==null&&l.start(...a),c!==null&&l.stop(c)}return s.set(d,l),h?await e(d,u.playbackRate,l.playbackRate):await r(d,u.playbackRate,l.playbackRate),await o(u,d,l),l};return{set start(u){a=u},set stop(u){c=u},render(u,d){const l=s.get(d);return l!==void 0?Promise.resolve(l):i(u,d)}}},mo=e=>"playbackRate"in e,go=e=>"frequency"in e&&"gain"in e,wo=e=>"offset"in e,vo=e=>!("frequency"in e)&&"gain"in e,_o=e=>"detune"in e&&"frequency"in e,yo=e=>"pan"in e,q=e=>K(ln,e),Te=e=>K(fn,e),pt=(e,t)=>{const{activeInputs:n}=q(e);n.forEach(o=>o.forEach(([s])=>{t.includes(e)||pt(s,[...t,e])}));const r=mo(e)?[e.playbackRate]:vn(e)?Array.from(e.parameters.values()):go(e)?[e.Q,e.detune,e.frequency,e.gain]:wo(e)?[e.offset]:vo(e)?[e.gain]:_o(e)?[e.detune,e.frequency]:yo(e)?[e.pan]:[];for(const o of r){const s=Te(o);s!==void 0&&s.activeInputs.forEach(([a])=>pt(a,t))}se(e)&&He(e)},Eo=e=>{pt(e.destination,[])},Ao=e=>e===void 0||typeof e=="number"||typeof e=="string"&&(e==="balanced"||e==="interactive"||e==="playback"),bo=(e,t,n,r,o,s,a,c)=>class extends e{constructor(u,d){const l=s(u),h=a(l),m=o(l,d,h),w=h?t(c):null;super(u,!1,m,w),this._isNodeOfNativeOfflineAudioContext=h,this._nativeAudioDestinationNode=m}get channelCount(){return this._nativeAudioDestinationNode.channelCount}set channelCount(u){if(this._isNodeOfNativeOfflineAudioContext)throw r();if(u>this._nativeAudioDestinationNode.maxChannelCount)throw n();this._nativeAudioDestinationNode.channelCount=u}get channelCountMode(){return this._nativeAudioDestinationNode.channelCountMode}set channelCountMode(u){if(this._isNodeOfNativeOfflineAudioContext)throw r();this._nativeAudioDestinationNode.channelCountMode=u}get maxChannelCount(){return this._nativeAudioDestinationNode.maxChannelCount}},Co=e=>{const t=new WeakMap,n=async(r,o)=>{const s=o.destination;return t.set(o,s),await e(r,o,s),s};return{render(r,o){const s=t.get(o);return s!==void 0?Promise.resolve(s):n(r,o)}}},To=(e,t,n,r,o,s,a,c)=>(i,u)=>{const d=u.listener,l=()=>{const _=new Float32Array(1),E=t(u,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:9}),y=a(u);let C=!1,M=[0,0,-1,0,1,0],I=[0,0,0];const N=()=>{if(C)return;C=!0;const U=r(u,256,9,0);U.onaudioprocess=({inputBuffer:R})=>{const x=[s(R,_,0),s(R,_,1),s(R,_,2),s(R,_,3),s(R,_,4),s(R,_,5)];x.some((O,L)=>O!==M[L])&&(d.setOrientation(...x),M=x);const D=[s(R,_,6),s(R,_,7),s(R,_,8)];D.some((O,L)=>O!==I[L])&&(d.setPosition(...D),I=D)},E.connect(U)},P=U=>R=>{R!==M[U]&&(M[U]=R,d.setOrientation(...M))},k=U=>R=>{R!==I[U]&&(I[U]=R,d.setPosition(...I))},B=(U,R,x)=>{const D=n(u,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",offset:R});D.connect(E,0,U),D.start(),Object.defineProperty(D.offset,"defaultValue",{get(){return R}});const O=e({context:i},y,D.offset,Ye,Ce);return c(O,"value",L=>()=>L.call(O),L=>W=>{try{L.call(O,W)}catch(G){if(G.code!==9)throw G}N(),y&&x(W)}),O.cancelAndHoldAtTime=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.cancelAndHoldAtTime),O.cancelScheduledValues=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.cancelScheduledValues),O.exponentialRampToValueAtTime=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.exponentialRampToValueAtTime),O.linearRampToValueAtTime=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.linearRampToValueAtTime),O.setTargetAtTime=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.setTargetAtTime),O.setValueAtTime=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.setValueAtTime),O.setValueCurveAtTime=(L=>y?()=>{throw o()}:(...W)=>{const G=L.apply(O,W);return N(),G})(O.setValueCurveAtTime),O};return{forwardX:B(0,0,P(0)),forwardY:B(1,0,P(1)),forwardZ:B(2,-1,P(2)),positionX:B(6,0,k(0)),positionY:B(7,0,k(1)),positionZ:B(8,0,k(2)),upX:B(3,0,P(3)),upY:B(4,1,P(4)),upZ:B(5,0,P(5))}},{forwardX:h,forwardY:m,forwardZ:w,positionX:f,positionY:p,positionZ:g,upX:v,upY:A,upZ:T}=d.forwardX===void 0?l():d;return{get forwardX(){return h},get forwardY(){return m},get forwardZ(){return w},get positionX(){return f},get positionY(){return p},get positionZ(){return g},get upX(){return v},get upY(){return A},get upZ(){return T}}},We=e=>"context"in e,Ne=e=>We(e[0]),le=(e,t,n,r)=>{for(const o of e)if(n(o)){if(r)return!1;throw Error("The set contains at least one similar element.")}return e.add(t),!0},Xt=(e,t,[n,r],o)=>{le(e,[t,n,r],s=>s[0]===t&&s[1]===n,o)},Zt=(e,[t,n,r],o)=>{const s=e.get(t);s===void 0?e.set(t,new Set([[n,r]])):le(s,[n,r],a=>a[0]===n,o)},En=e=>"inputs"in e,mt=(e,t,n,r)=>{if(En(t)){const o=t.inputs[r];return e.connect(o,n,0),[o,n,0]}return e.connect(t,n,r),[t,n,r]},An=(e,t,n)=>{for(const r of e)if(r[0]===t&&r[1]===n)return e.delete(r),r;return null},No=(e,t,n)=>qe(e,r=>r[0]===t&&r[1]===n),bn=(e,t)=>{if(!be(e).delete(t))throw new Error("Missing the expected event listener.")},Cn=(e,t,n)=>{const r=K(e,t),o=qe(r,s=>s[0]===n);return r.size===0&&e.delete(t),o},gt=(e,t,n,r)=>{En(t)?e.disconnect(t.inputs[r],n,0):e.disconnect(t,n,r)},X=e=>K(dn,e),Ee=e=>K(hn,e),ue=e=>ut.has(e),xe=e=>!we.has(e),Kt=(e,t)=>new Promise(n=>{if(t!==null)n(!0);else{const r=e.createScriptProcessor(256,1,1),o=e.createGain(),s=e.createBuffer(1,2,44100),a=s.getChannelData(0);a[0]=1,a[1]=1;const c=e.createBufferSource();c.buffer=s,c.loop=!0,c.connect(r).connect(e.destination),c.connect(o),c.disconnect(o),r.onaudioprocess=i=>{const u=i.inputBuffer.getChannelData(0);Array.prototype.some.call(u,d=>d===1)?n(!0):n(!1),c.stop(),r.onaudioprocess=null,c.disconnect(r),r.disconnect(e.destination)},c.start()}}),ot=(e,t)=>{const n=new Map;for(const r of e)for(const o of r){const s=n.get(o);n.set(o,s===void 0?1:s+1)}n.forEach((r,o)=>t(o,r))},Ve=e=>"context"in e,Mo=e=>{const t=new Map;e.connect=(n=>(r,o=0,s=0)=>{const a=Ve(r)?n(r,o,s):n(r,o),c=t.get(r);return c===void 0?t.set(r,[{input:s,output:o}]):c.every(i=>i.input!==s||i.output!==o)&&c.push({input:s,output:o}),a})(e.connect.bind(e)),e.disconnect=(n=>(r,o,s)=>{if(n.apply(e),r===void 0)t.clear();else if(typeof r=="number")for(const[a,c]of t){const i=c.filter(u=>u.output!==r);i.length===0?t.delete(a):t.set(a,i)}else if(t.has(r))if(o===void 0)t.delete(r);else{const a=t.get(r);if(a!==void 0){const c=a.filter(i=>i.output!==o&&(i.input!==s||s===void 0));c.length===0?t.delete(r):t.set(r,c)}}for(const[a,c]of t)c.forEach(i=>{Ve(a)?e.connect(a,i.output,i.input):e.connect(a,i.output)})})(e.disconnect)},Oo=(e,t,n,r)=>{const{activeInputs:o,passiveInputs:s}=Te(t),{outputs:a}=q(e),c=be(e),i=u=>{const d=X(e),l=Ee(t);if(u){const h=Cn(s,e,n);Xt(o,e,h,!1),!r&&!ue(e)&&d.connect(l,n)}else{const h=No(o,e,n);Zt(s,h,!1),!r&&!ue(e)&&d.disconnect(l,n)}};return le(a,[t,n],u=>u[0]===t&&u[1]===n,!0)?(c.add(i),se(e)?Xt(o,e,[n,i],!0):Zt(s,[e,n,i],!0),!0):!1},So=(e,t,n,r)=>{const{activeInputs:o,passiveInputs:s}=q(t),a=An(o[r],e,n);return a===null?[wn(s,e,n,r)[2],!1]:[a[2],!0]},Ro=(e,t,n)=>{const{activeInputs:r,passiveInputs:o}=Te(t),s=An(r,e,n);return s===null?[Cn(o,e,n)[1],!1]:[s[2],!0]},At=(e,t,n,r,o)=>{const[s,a]=So(e,n,r,o);if(s!==null&&(bn(e,s),a&&!t&&!ue(e)&&gt(X(e),X(n),r,o)),se(n)){const{activeInputs:c}=q(n);ft(n,c)}},bt=(e,t,n,r)=>{const[o,s]=Ro(e,n,r);o!==null&&(bn(e,o),s&&!t&&!ue(e)&&X(e).disconnect(Ee(n),r))},Io=(e,t)=>{const n=q(e),r=[];for(const o of n.outputs)Ne(o)?At(e,t,...o):bt(e,t,...o),r.push(o[0]);return n.outputs.clear(),r},ko=(e,t,n)=>{const r=q(e),o=[];for(const s of r.outputs)s[1]===n&&(Ne(s)?At(e,t,...s):bt(e,t,...s),o.push(s[0]),r.outputs.delete(s));return o},Lo=(e,t,n,r,o)=>{const s=q(e);return Array.from(s.outputs).filter(a=>a[0]===n&&(r===void 0||a[1]===r)&&(o===void 0||a[2]===o)).map(a=>(Ne(a)?At(e,t,...a):bt(e,t,...a),s.outputs.delete(a),a[0]))},Po=(e,t,n,r,o,s,a,c,i,u,d,l,h,m,w,f)=>class extends u{constructor(g,v,A,T){super(A),this._context=g,this._nativeAudioNode=A;const _=d(g);l(_)&&n(Kt,()=>Kt(_,f))!==!0&&Mo(A),dn.set(this,A),mn.set(this,new Set),g.state!=="closed"&&v&&ye(this),e(this,T,A)}get channelCount(){return this._nativeAudioNode.channelCount}set channelCount(g){this._nativeAudioNode.channelCount=g}get channelCountMode(){return this._nativeAudioNode.channelCountMode}set channelCountMode(g){this._nativeAudioNode.channelCountMode=g}get channelInterpretation(){return this._nativeAudioNode.channelInterpretation}set channelInterpretation(g){this._nativeAudioNode.channelInterpretation=g}get context(){return this._context}get numberOfInputs(){return this._nativeAudioNode.numberOfInputs}get numberOfOutputs(){return this._nativeAudioNode.numberOfOutputs}connect(g,v=0,A=0){if(v<0||v>=this._nativeAudioNode.numberOfOutputs)throw o();const T=d(this._context),_=w(T);if(h(g)||m(g))throw s();if(We(g)){const C=X(g);try{const I=mt(this._nativeAudioNode,C,v,A),N=xe(this);(_||N)&&this._nativeAudioNode.disconnect(...I),this.context.state!=="closed"&&!N&&xe(g)&&ye(g)}catch(I){throw I.code===12?s():I}if(t(this,g,v,A,_)){const I=i([this],g);ot(I,r(_))}return g}const E=Ee(g);if(E.name==="playbackRate"&&E.maxValue===1024)throw a();try{this._nativeAudioNode.connect(E,v),(_||xe(this))&&this._nativeAudioNode.disconnect(E,v)}catch(C){throw C.code===12?s():C}if(Oo(this,g,v,_)){const C=i([this],g);ot(C,r(_))}}disconnect(g,v,A){let T;const _=d(this._context),E=w(_);if(g===void 0)T=Io(this,E);else if(typeof g=="number"){if(g<0||g>=this.numberOfOutputs)throw o();T=ko(this,E,g)}else{if(v!==void 0&&(v<0||v>=this.numberOfOutputs)||We(g)&&A!==void 0&&(A<0||A>=g.numberOfInputs))throw o();if(T=Lo(this,E,g,v,A),T.length===0)throw s()}for(const y of T){const C=i([this],y);ot(C,c)}}},xo=(e,t,n,r,o,s,a,c,i,u,d,l,h)=>(m,w,f,p=null,g=null)=>{const v=new Hr(f.defaultValue),A=w?r(v):null,T={get defaultValue(){return f.defaultValue},get maxValue(){return p===null?f.maxValue:p},get minValue(){return g===null?f.minValue:g},get value(){return f.value},set value(_){f.value=_,T.setValueAtTime(_,m.context.currentTime)},cancelAndHoldAtTime(_){if(typeof f.cancelAndHoldAtTime=="function")A===null&&v.flush(m.context.currentTime),v.add(o(_)),f.cancelAndHoldAtTime(_);else{const E=Array.from(v).pop();A===null&&v.flush(m.context.currentTime),v.add(o(_));const y=Array.from(v).pop();f.cancelScheduledValues(_),E!==y&&y!==void 0&&(y.type==="exponentialRampToValue"?f.exponentialRampToValueAtTime(y.value,y.endTime):y.type==="linearRampToValue"?f.linearRampToValueAtTime(y.value,y.endTime):y.type==="setValue"?f.setValueAtTime(y.value,y.startTime):y.type==="setValueCurve"&&f.setValueCurveAtTime(y.values,y.startTime,y.duration))}return T},cancelScheduledValues(_){return A===null&&v.flush(m.context.currentTime),v.add(s(_)),f.cancelScheduledValues(_),T},exponentialRampToValueAtTime(_,E){if(_===0)throw new RangeError;if(!Number.isFinite(E)||E<0)throw new RangeError;return A===null&&v.flush(m.context.currentTime),v.add(a(_,E)),f.exponentialRampToValueAtTime(_,E),T},linearRampToValueAtTime(_,E){return A===null&&v.flush(m.context.currentTime),v.add(c(_,E)),f.linearRampToValueAtTime(_,E),T},setTargetAtTime(_,E,y){return A===null&&v.flush(m.context.currentTime),v.add(i(_,E,y)),f.setTargetAtTime(_,E,y),T},setValueAtTime(_,E){return A===null&&v.flush(m.context.currentTime),v.add(u(_,E)),f.setValueAtTime(_,E),T},setValueCurveAtTime(_,E,y){const C=_ instanceof Float32Array?_:new Float32Array(_);if(l!==null&&l.name==="webkitAudioContext"){const M=E+y,I=m.context.sampleRate,N=Math.ceil(E*I),P=Math.floor(M*I),k=P-N,B=new Float32Array(k);for(let R=0;R<k;R+=1){const x=(C.length-1)/y*((N+R)/I-E),D=Math.floor(x),O=Math.ceil(x);B[R]=D===O?C[D]:(1-(x-D))*C[D]+(1-(O-x))*C[O]}A===null&&v.flush(m.context.currentTime),v.add(d(B,E,y)),f.setValueCurveAtTime(B,E,y);const U=P/I;U<M&&h(T,B[B.length-1],U),h(T,C[C.length-1],M)}else A===null&&v.flush(m.context.currentTime),v.add(d(C,E,y)),f.setValueCurveAtTime(C,E,y);return T}};return n.set(T,f),t.set(T,m),e(T,A),T},Uo=e=>({replay(t){for(const n of e)if(n.type==="exponentialRampToValue"){const{endTime:r,value:o}=n;t.exponentialRampToValueAtTime(o,r)}else if(n.type==="linearRampToValue"){const{endTime:r,value:o}=n;t.linearRampToValueAtTime(o,r)}else if(n.type==="setTarget"){const{startTime:r,target:o,timeConstant:s}=n;t.setTargetAtTime(o,r,s)}else if(n.type==="setValue"){const{startTime:r,value:o}=n;t.setValueAtTime(o,r)}else if(n.type==="setValueCurve"){const{duration:r,startTime:o,values:s}=n;t.setValueCurveAtTime(s,o,r)}else throw new Error("Can't apply an unknown automation.")}});class Tn{constructor(t){this._map=new Map(t)}get size(){return this._map.size}entries(){return this._map.entries()}forEach(t,n=null){return this._map.forEach((r,o)=>t.call(n,r,o,this))}get(t){return this._map.get(t)}has(t){return this._map.has(t)}keys(){return this._map.keys()}values(){return this._map.values()}}const Bo={channelCount:2,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:1,numberOfOutputs:1,parameterData:{},processorOptions:{}},Do=(e,t,n,r,o,s,a,c,i,u,d,l,h,m)=>class extends t{constructor(f,p,g){var v;const A=c(f),T=i(A),_=d({...Bo,...g});h(_);const E=lt.get(A),y=E?.get(p),C=T||A.state!=="closed"?A:(v=a(A))!==null&&v!==void 0?v:A,M=o(C,T?null:f.baseLatency,u,p,y,_),I=T?r(p,_,y):null;super(f,!0,M,I);const N=[];M.parameters.forEach((k,B)=>{const U=n(this,T,k);N.push([B,U])}),this._nativeAudioWorkletNode=M,this._onprocessorerror=null,this._parameters=new Tn(N),T&&e(A,this);const{activeInputs:P}=s(this);l(M,P)}get onprocessorerror(){return this._onprocessorerror}set onprocessorerror(f){const p=typeof f=="function"?m(this,f):null;this._nativeAudioWorkletNode.onprocessorerror=p;const g=this._nativeAudioWorkletNode.onprocessorerror;this._onprocessorerror=g!==null&&g===p?f:g}get parameters(){return this._parameters===null?this._nativeAudioWorkletNode.parameters:this._parameters}get port(){return this._nativeAudioWorkletNode.port}};function Fe(e,t,n,r,o){if(typeof e.copyFromChannel=="function")t[n].byteLength===0&&(t[n]=new Float32Array(128)),e.copyFromChannel(t[n],r,o);else{const s=e.getChannelData(r);if(t[n].byteLength===0)t[n]=s.slice(o,o+128);else{const a=new Float32Array(s.buffer,o*Float32Array.BYTES_PER_ELEMENT,128);t[n].set(a)}}}const Nn=(e,t,n,r,o)=>{typeof e.copyToChannel=="function"?t[n].byteLength!==0&&e.copyToChannel(t[n],r,o):t[n].byteLength!==0&&e.getChannelData(r).set(t[n],o)},je=(e,t)=>{const n=[];for(let r=0;r<e;r+=1){const o=[],s=typeof t=="number"?t:t[r];for(let a=0;a<s;a+=1)o.push(new Float32Array(128));n.push(o)}return n},Wo=(e,t)=>{const n=K(dt,e),r=X(t);return K(n,r)},Vo=async(e,t,n,r,o,s,a)=>{const c=t===null?Math.ceil(e.context.length/128)*128:t.length,i=r.channelCount*r.numberOfInputs,u=o.reduce((p,g)=>p+g,0),d=u===0?null:n.createBuffer(u,c,n.sampleRate);if(s===void 0)throw new Error("Missing the processor constructor.");const l=q(e),h=await Wo(n,e),m=je(r.numberOfInputs,r.channelCount),w=je(r.numberOfOutputs,o),f=Array.from(e.parameters.keys()).reduce((p,g)=>({...p,[g]:new Float32Array(128)}),{});for(let p=0;p<c;p+=128){if(r.numberOfInputs>0&&t!==null)for(let g=0;g<r.numberOfInputs;g+=1)for(let v=0;v<r.channelCount;v+=1)Fe(t,m[g],v,v,p);s.parameterDescriptors!==void 0&&t!==null&&s.parameterDescriptors.forEach(({name:g},v)=>{Fe(t,f,g,i+v,p)});for(let g=0;g<r.numberOfInputs;g+=1)for(let v=0;v<o[g];v+=1)w[g][v].byteLength===0&&(w[g][v]=new Float32Array(128));try{const g=m.map((A,T)=>l.activeInputs[T].size===0?[]:A),v=a(p/n.sampleRate,n.sampleRate,()=>h.process(g,w,f));if(d!==null)for(let A=0,T=0;A<r.numberOfOutputs;A+=1){for(let _=0;_<o[A];_+=1)Nn(d,w[A],_,T+_,p);T+=o[A]}if(!v)break}catch(g){e.dispatchEvent(new ErrorEvent("processorerror",{colno:g.colno,filename:g.filename,lineno:g.lineno,message:g.message}));break}}return d},Fo=(e,t,n,r,o,s,a,c,i,u,d,l,h,m,w,f)=>(p,g,v)=>{const A=new WeakMap;let T=null;const _=async(E,y)=>{let C=d(E),M=null;const I=Et(C,y),N=Array.isArray(g.outputChannelCount)?g.outputChannelCount:Array.from(g.outputChannelCount);if(l===null){const P=N.reduce((R,x)=>R+x,0),k=o(y,{channelCount:Math.max(1,P),channelCountMode:"explicit",channelInterpretation:"discrete",numberOfOutputs:Math.max(1,P)}),B=[];for(let R=0;R<E.numberOfOutputs;R+=1)B.push(r(y,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:N[R]}));const U=a(y,{channelCount:g.channelCount,channelCountMode:g.channelCountMode,channelInterpretation:g.channelInterpretation,gain:1});U.connect=t.bind(null,B),U.disconnect=i.bind(null,B),M=[k,B,U]}else I||(C=new l(y,p));if(A.set(y,M===null?C:M[2]),M!==null){if(T===null){if(v===void 0)throw new Error("Missing the processor constructor.");if(h===null)throw new Error("Missing the native OfflineAudioContext constructor.");const x=E.channelCount*E.numberOfInputs,D=v.parameterDescriptors===void 0?0:v.parameterDescriptors.length,O=x+D;T=Vo(E,O===0?null:await(async()=>{const W=new h(O,Math.ceil(E.context.length/128)*128,y.sampleRate),G=[],he=[];for(let j=0;j<g.numberOfInputs;j+=1)G.push(a(W,{channelCount:g.channelCount,channelCountMode:g.channelCountMode,channelInterpretation:g.channelInterpretation,gain:1})),he.push(o(W,{channelCount:g.channelCount,channelCountMode:"explicit",channelInterpretation:"discrete",numberOfOutputs:g.channelCount}));const pe=await Promise.all(Array.from(E.parameters.values()).map(async j=>{const H=s(W,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",offset:j.value});return await m(W,j,H.offset),H})),me=r(W,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:Math.max(1,x+D)});for(let j=0;j<g.numberOfInputs;j+=1){G[j].connect(he[j]);for(let H=0;H<g.channelCount;H+=1)he[j].connect(me,H,j*g.channelCount+H)}for(const[j,H]of pe.entries())H.connect(me,0,x+j),H.start(0);return me.connect(W.destination),await Promise.all(G.map(j=>w(E,W,j))),f(W)})(),y,g,N,v,u)}const P=await T,k=n(y,{buffer:null,channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",loop:!1,loopEnd:0,loopStart:0,playbackRate:1}),[B,U,R]=M;P!==null&&(k.buffer=P,k.start(0)),k.connect(B);for(let x=0,D=0;x<E.numberOfOutputs;x+=1){const O=U[x];for(let L=0;L<N[x];L+=1)B.connect(O,D+L,L);D+=N[x]}return R}if(I)for(const[P,k]of E.parameters.entries())await e(y,k,C.parameters.get(P));else for(const[P,k]of E.parameters.entries())await m(y,k,C.parameters.get(P));return await w(E,y,C),C};return{render(E,y){c(y,E);const C=A.get(y);return C!==void 0?Promise.resolve(C):_(E,y)}}},jo=(e,t)=>(n,r)=>{const o=t.get(n);if(o!==void 0)return o;const s=e.get(n);if(s!==void 0)return s;try{const a=r();return a instanceof Promise?(e.set(n,a),a.catch(()=>!1).then(c=>(e.delete(n),t.set(n,c),c))):(t.set(n,a),a)}catch{return t.set(n,!1),!1}},$o=e=>(t,n,r)=>e(n,t,r),Go=e=>(t,n,r=0,o=0)=>{const s=t[r];if(s===void 0)throw e();return Ve(n)?s.connect(n,0,o):s.connect(n,0)},zo={channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",offset:1},qo=(e,t,n,r,o,s,a)=>class extends e{constructor(i,u){const d=o(i),l={...zo,...u},h=r(d,l),m=s(d),w=m?n():null;super(i,!1,h,w),this._constantSourceNodeRenderer=w,this._nativeConstantSourceNode=h,this._offset=t(this,m,h.offset,Ye,Ce),this._onended=null}get offset(){return this._offset}get onended(){return this._onended}set onended(i){const u=typeof i=="function"?a(this,i):null;this._nativeConstantSourceNode.onended=u;const d=this._nativeConstantSourceNode.onended;this._onended=d!==null&&d===u?i:d}start(i=0){if(this._nativeConstantSourceNode.start(i),this._constantSourceNodeRenderer!==null&&(this._constantSourceNodeRenderer.start=i),this.context.state!=="closed"){ye(this);const u=()=>{this._nativeConstantSourceNode.removeEventListener("ended",u),se(this)&&He(this)};this._nativeConstantSourceNode.addEventListener("ended",u)}}stop(i=0){this._nativeConstantSourceNode.stop(i),this._constantSourceNodeRenderer!==null&&(this._constantSourceNodeRenderer.stop=i)}},Ho=(e,t,n,r,o)=>()=>{const s=new WeakMap;let a=null,c=null;const i=async(u,d)=>{let l=n(u);const h=Et(l,d);if(!h){const m={channelCount:l.channelCount,channelCountMode:l.channelCountMode,channelInterpretation:l.channelInterpretation,offset:l.offset.value};l=t(d,m),a!==null&&l.start(a),c!==null&&l.stop(c)}return s.set(d,l),h?await e(d,u.offset,l.offset):await r(d,u.offset,l.offset),await o(u,d,l),l};return{set start(u){a=u},set stop(u){c=u},render(u,d){const l=s.get(d);return l!==void 0?Promise.resolve(l):i(u,d)}}},Yo=e=>t=>(e[0]=t,e[0]),Xo=()=>new DOMException("","DataCloneError"),Jt=e=>{const{port1:t,port2:n}=new MessageChannel;return new Promise(r=>{const o=()=>{n.onmessage=null,t.close(),n.close(),r()};n.onmessage=()=>o();try{t.postMessage(e,[e])}finally{o()}})},Zo=(e,t,n,r,o,s,a,c,i,u,d)=>(l,h)=>{const m=a(l)?l:s(l);if(o.has(h)){const w=n();return Promise.reject(w)}try{o.add(h)}catch{}return t(i,()=>i(m))?m.decodeAudioData(h).then(w=>(Jt(h).catch(()=>{}),t(c,()=>c(w))||d(w),e.add(w),w)):new Promise((w,f)=>{const p=async()=>{try{await Jt(h)}catch{}},g=v=>{f(v),p()};try{m.decodeAudioData(h,v=>{typeof v.copyFromChannel!="function"&&(u(v),_n(v)),e.add(v),p().then(()=>w(v))},v=>{g(v===null?r():v)})}catch(v){g(v)}})},Ko=(e,t,n,r,o,s,a,c)=>(i,u)=>{const d=t.get(i);if(d===void 0)throw new Error("Missing the expected cycle count.");const l=s(i.context),h=c(l);if(d===u){if(t.delete(i),!h&&a(i)){const m=r(i),{outputs:w}=n(i);for(const f of w)if(Ne(f)){const p=r(f[0]);e(m,p,f[1],f[2])}else{const p=o(f[0]);m.connect(p,f[1])}}}else t.set(i,d-u)},Jo=e=>(t,n,r,o)=>e(t[o],s=>s[0]===n&&s[1]===r),Qo=e=>(t,n)=>{e(t).delete(n)},es=e=>"delayTime"in e,ts=(e,t,n)=>function r(o,s){const a=We(s)?s:n(e,s);if(es(a))return[];if(o[0]===a)return[o];if(o.includes(a))return[];const{outputs:c}=t(a);return Array.from(c).map(i=>r([...o,a],i[0])).reduce((i,u)=>i.concat(u),[])},Pe=(e,t,n)=>{const r=t[n];if(r===void 0)throw e();return r},ns=e=>(t,n=void 0,r=void 0,o=0)=>n===void 0?t.forEach(s=>s.disconnect()):typeof n=="number"?Pe(e,t,n).disconnect():Ve(n)?r===void 0?t.forEach(s=>s.disconnect(n)):o===void 0?Pe(e,t,r).disconnect(n,0):Pe(e,t,r).disconnect(n,0,o):r===void 0?t.forEach(s=>s.disconnect(n)):Pe(e,t,r).disconnect(n,0),rs=()=>new DOMException("","EncodingError"),os=e=>t=>new Promise((n,r)=>{if(e===null){r(new SyntaxError);return}const o=e.document.head;if(o===null)r(new SyntaxError);else{const s=e.document.createElement("script"),a=new Blob([t],{type:"application/javascript"}),c=URL.createObjectURL(a),i=e.onerror,u=()=>{e.onerror=i,URL.revokeObjectURL(c)};e.onerror=(d,l,h,m,w)=>{if(l===c||l===e.location.href&&h===1&&m===1)return u(),r(w),!1;if(i!==null)return i(d,l,h,m,w)},s.onerror=()=>{u(),r(new SyntaxError)},s.onload=()=>{u(),n()},s.src=c,s.type="module",o.appendChild(s)}}),ss=e=>class{constructor(n){this._nativeEventTarget=n,this._listeners=new WeakMap}addEventListener(n,r,o){if(r!==null){let s=this._listeners.get(r);s===void 0&&(s=e(this,r),typeof r=="function"&&this._listeners.set(r,s)),this._nativeEventTarget.addEventListener(n,s,o)}}dispatchEvent(n){return this._nativeEventTarget.dispatchEvent(n)}removeEventListener(n,r,o){const s=r===null?void 0:this._listeners.get(r);this._nativeEventTarget.removeEventListener(n,s===void 0?null:s,o)}},as=e=>(t,n,r)=>{Object.defineProperties(e,{currentFrame:{configurable:!0,get(){return Math.round(t*n)}},currentTime:{configurable:!0,get(){return t}}});try{return r()}finally{e!==null&&(delete e.currentFrame,delete e.currentTime)}},is=e=>async t=>{try{const n=await fetch(t);if(n.ok)return[await n.text(),n.url]}catch{}throw e()},cs=(e,t)=>n=>t(e,n),us=e=>t=>{const n=e(t);if(n.renderer===null)throw new Error("Missing the renderer of the given AudioNode in the audio graph.");return n.renderer},ls=e=>t=>{var n;return(n=e.get(t))!==null&&n!==void 0?n:0},ds=e=>t=>{const n=e(t);if(n.renderer===null)throw new Error("Missing the renderer of the given AudioParam in the audio graph.");return n.renderer},fs=e=>t=>e.get(t),Z=()=>new DOMException("","InvalidStateError"),hs=e=>t=>{const n=e.get(t);if(n===void 0)throw Z();return n},ps=(e,t)=>n=>{let r=e.get(n);if(r!==void 0)return r;if(t===null)throw new Error("Missing the native OfflineAudioContext constructor.");return r=new t(1,1,44100),e.set(n,r),r},ms=e=>t=>{const n=e.get(t);if(n===void 0)throw new Error("The context has no set of AudioWorkletNodes.");return n},gs=()=>new DOMException("","InvalidAccessError"),ws=(e,t,n,r,o,s)=>a=>(c,i)=>{const u=e.get(c);if(u===void 0){if(!a&&s(c)){const d=r(c),{outputs:l}=n(c);for(const h of l)if(Ne(h)){const m=r(h[0]);t(d,m,h[1],h[2])}else{const m=o(h[0]);d.disconnect(m,h[1])}}e.set(c,i)}else e.set(c,u+i)},vs=e=>t=>e!==null&&t instanceof e,_s=e=>t=>e!==null&&typeof e.AudioNode=="function"&&t instanceof e.AudioNode,ys=e=>t=>e!==null&&typeof e.AudioParam=="function"&&t instanceof e.AudioParam,Es=(e,t)=>n=>e(n)||t(n),As=e=>t=>e!==null&&t instanceof e,bs=e=>e!==null&&e.isSecureContext,Cs=(e,t,n,r)=>class extends e{constructor(s,a){const c=n(s),i=t(c,a);if(r(c))throw new TypeError;super(s,!0,i,null),this._nativeMediaStreamAudioSourceNode=i}get mediaStream(){return this._nativeMediaStreamAudioSourceNode.mediaStream}},Ts=(e,t,n,r,o)=>class extends r{constructor(a={}){if(o===null)throw new Error("Missing the native AudioContext constructor.");let c;try{c=new o(a)}catch(d){throw d.code===12&&d.message==="sampleRate is not in range"?t():d}if(c===null)throw n();if(!Ao(a.latencyHint))throw new TypeError(`The provided value '${a.latencyHint}' is not a valid enum value of type AudioContextLatencyCategory.`);if(a.sampleRate!==void 0&&c.sampleRate!==a.sampleRate)throw t();super(c,2);const{latencyHint:i}=a,{sampleRate:u}=c;if(this._baseLatency=typeof c.baseLatency=="number"?c.baseLatency:i==="balanced"?512/u:i==="interactive"||i===void 0?256/u:i==="playback"?1024/u:Math.max(2,Math.min(128,Math.round(i*u/128)))*128/u,this._nativeAudioContext=c,o.name==="webkitAudioContext"?(this._nativeGainNode=c.createGain(),this._nativeOscillatorNode=c.createOscillator(),this._nativeGainNode.gain.value=1e-37,this._nativeOscillatorNode.connect(this._nativeGainNode).connect(c.destination),this._nativeOscillatorNode.start()):(this._nativeGainNode=null,this._nativeOscillatorNode=null),this._state=null,c.state==="running"){this._state="suspended";const d=()=>{this._state==="suspended"&&(this._state=null),c.removeEventListener("statechange",d)};c.addEventListener("statechange",d)}}get baseLatency(){return this._baseLatency}get state(){return this._state!==null?this._state:this._nativeAudioContext.state}close(){return this.state==="closed"?this._nativeAudioContext.close().then(()=>{throw e()}):(this._state==="suspended"&&(this._state=null),this._nativeAudioContext.close().then(()=>{this._nativeGainNode!==null&&this._nativeOscillatorNode!==null&&(this._nativeOscillatorNode.stop(),this._nativeGainNode.disconnect(),this._nativeOscillatorNode.disconnect()),Eo(this)}))}resume(){return this._state==="suspended"?new Promise((a,c)=>{const i=()=>{this._nativeAudioContext.removeEventListener("statechange",i),this._nativeAudioContext.state==="running"?a():this.resume().then(a,c)};this._nativeAudioContext.addEventListener("statechange",i)}):this._nativeAudioContext.resume().catch(a=>{throw a===void 0||a.code===15?e():a})}suspend(){return this._nativeAudioContext.suspend().catch(a=>{throw a===void 0?e():a})}},Ns=(e,t,n,r,o,s)=>class extends n{constructor(c,i){super(c),this._nativeContext=c,pn.set(this,c),r(c)&&o.set(c,new Set),this._destination=new e(this,i),this._listener=t(this,c),this._onstatechange=null}get currentTime(){return this._nativeContext.currentTime}get destination(){return this._destination}get listener(){return this._listener}get onstatechange(){return this._onstatechange}set onstatechange(c){const i=typeof c=="function"?s(this,c):null;this._nativeContext.onstatechange=i;const u=this._nativeContext.onstatechange;this._onstatechange=u!==null&&u===i?c:u}get sampleRate(){return this._nativeContext.sampleRate}get state(){return this._nativeContext.state}},wt=e=>{const t=new Uint32Array([1179011410,40,1163280727,544501094,16,131073,44100,176400,1048580,1635017060,4,0]);try{const n=e.decodeAudioData(t.buffer,()=>{});return n===void 0?!1:(n.catch(()=>{}),!0)}catch{}return!1},Ms=(e,t)=>(n,r,o)=>{const s=new Set;return n.connect=(a=>(c,i=0,u=0)=>{const d=s.size===0;if(t(c))return a.call(n,c,i,u),e(s,[c,i,u],l=>l[0]===c&&l[1]===i&&l[2]===u,!0),d&&r(),c;a.call(n,c,i),e(s,[c,i],l=>l[0]===c&&l[1]===i,!0),d&&r()})(n.connect),n.disconnect=(a=>(c,i,u)=>{const d=s.size>0;if(c===void 0)a.apply(n),s.clear();else if(typeof c=="number"){a.call(n,c);for(const h of s)h[1]===c&&s.delete(h)}else{t(c)?a.call(n,c,i,u):a.call(n,c,i);for(const h of s)h[0]===c&&(i===void 0||h[1]===i)&&(u===void 0||h[2]===u)&&s.delete(h)}const l=s.size===0;d&&l&&o()})(n.disconnect),n},ce=(e,t,n)=>{const r=t[n];r!==void 0&&r!==e[n]&&(e[n]=r)},Me=(e,t)=>{ce(e,t,"channelCount"),ce(e,t,"channelCountMode"),ce(e,t,"channelInterpretation")},Os=e=>e===null?null:e.hasOwnProperty("AudioBuffer")?e.AudioBuffer:null,Ct=(e,t,n)=>{const r=t[n];r!==void 0&&r!==e[n].value&&(e[n].value=r)},Ss=e=>{e.start=(t=>{let n=!1;return(r=0,o=0,s)=>{if(n)throw Z();t.call(e,r,o,s),n=!0}})(e.start)},Mn=e=>{e.start=(t=>(n=0,r=0,o)=>{if(typeof o=="number"&&o<0||r<0||n<0)throw new RangeError("The parameters can't be negative.");t.call(e,n,r,o)})(e.start)},On=e=>{e.stop=(t=>(n=0)=>{if(n<0)throw new RangeError("The parameter can't be negative.");t.call(e,n)})(e.stop)},Rs=(e,t,n,r,o,s,a,c,i,u,d)=>(l,h)=>{const m=l.createBufferSource();return Me(m,h),Ct(m,h,"playbackRate"),ce(m,h,"buffer"),ce(m,h,"loop"),ce(m,h,"loopEnd"),ce(m,h,"loopStart"),t(n,()=>n(l))||Ss(m),t(r,()=>r(l))||i(m),t(o,()=>o(l))||u(m,l),t(s,()=>s(l))||Mn(m),t(a,()=>a(l))||d(m,l),t(c,()=>c(l))||On(m),e(l,m),m},Is=e=>e===null?null:e.hasOwnProperty("AudioContext")?e.AudioContext:e.hasOwnProperty("webkitAudioContext")?e.webkitAudioContext:null,ks=(e,t)=>(n,r,o)=>{const s=n.destination;if(s.channelCount!==r)try{s.channelCount=r}catch{}o&&s.channelCountMode!=="explicit"&&(s.channelCountMode="explicit"),s.maxChannelCount===0&&Object.defineProperty(s,"maxChannelCount",{value:r});const a=e(n,{channelCount:r,channelCountMode:s.channelCountMode,channelInterpretation:s.channelInterpretation,gain:1});return t(a,"channelCount",c=>()=>c.call(a),c=>i=>{c.call(a,i);try{s.channelCount=i}catch(u){if(i>s.maxChannelCount)throw u}}),t(a,"channelCountMode",c=>()=>c.call(a),c=>i=>{c.call(a,i),s.channelCountMode=i}),t(a,"channelInterpretation",c=>()=>c.call(a),c=>i=>{c.call(a,i),s.channelInterpretation=i}),Object.defineProperty(a,"maxChannelCount",{get:()=>s.maxChannelCount}),a.connect(s),a},Ls=e=>e===null?null:e.hasOwnProperty("AudioWorkletNode")?e.AudioWorkletNode:null,Ps=e=>{const{port1:t}=new MessageChannel;try{t.postMessage(e)}finally{t.close()}},xs=(e,t,n,r,o)=>(s,a,c,i,u,d)=>{if(c!==null)try{const l=new c(s,i,d),h=new Map;let m=null;if(Object.defineProperties(l,{channelCount:{get:()=>d.channelCount,set:()=>{throw e()}},channelCountMode:{get:()=>"explicit",set:()=>{throw e()}},onprocessorerror:{get:()=>m,set:w=>{typeof m=="function"&&l.removeEventListener("processorerror",m),m=typeof w=="function"?w:null,typeof m=="function"&&l.addEventListener("processorerror",m)}}}),l.addEventListener=(w=>(...f)=>{if(f[0]==="processorerror"){const p=typeof f[1]=="function"?f[1]:typeof f[1]=="object"&&f[1]!==null&&typeof f[1].handleEvent=="function"?f[1].handleEvent:null;if(p!==null){const g=h.get(f[1]);g!==void 0?f[1]=g:(f[1]=v=>{v.type==="error"?(Object.defineProperties(v,{type:{value:"processorerror"}}),p(v)):p(new ErrorEvent(f[0],{...v}))},h.set(p,f[1]))}}return w.call(l,"error",f[1],f[2]),w.call(l,...f)})(l.addEventListener),l.removeEventListener=(w=>(...f)=>{if(f[0]==="processorerror"){const p=h.get(f[1]);p!==void 0&&(h.delete(f[1]),f[1]=p)}return w.call(l,"error",f[1],f[2]),w.call(l,f[0],f[1],f[2])})(l.removeEventListener),d.numberOfOutputs!==0){const w=n(s,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});return l.connect(w).connect(s.destination),o(l,()=>w.disconnect(),()=>w.connect(s.destination))}return l}catch(l){throw l.code===11?r():l}if(u===void 0)throw r();return Ps(d),t(s,a,u,d)},Us=(e,t)=>e===null?512:Math.max(512,Math.min(16384,Math.pow(2,Math.round(Math.log2(e*t))))),Bs=e=>new Promise((t,n)=>{const{port1:r,port2:o}=new MessageChannel;r.onmessage=({data:s})=>{r.close(),o.close(),t(s)},r.onmessageerror=({data:s})=>{r.close(),o.close(),n(s)},o.postMessage(e)}),Ds=async(e,t)=>{const n=await Bs(t);return new e(n)},Ws=(e,t,n,r)=>{let o=dt.get(e);o===void 0&&(o=new WeakMap,dt.set(e,o));const s=Ds(n,r);return o.set(t,s),s},Vs=(e,t,n,r,o,s,a,c,i,u,d,l,h)=>(m,w,f,p)=>{if(p.numberOfInputs===0&&p.numberOfOutputs===0)throw i();const g=Array.isArray(p.outputChannelCount)?p.outputChannelCount:Array.from(p.outputChannelCount);if(g.some(b=>b<1))throw i();if(g.length!==p.numberOfOutputs)throw t();if(p.channelCountMode!=="explicit")throw i();const v=p.channelCount*p.numberOfInputs,A=g.reduce((b,S)=>b+S,0),T=f.parameterDescriptors===void 0?0:f.parameterDescriptors.length;if(v+T>6||A>6)throw i();const _=new MessageChannel,E=[],y=[];for(let b=0;b<p.numberOfInputs;b+=1)E.push(a(m,{channelCount:p.channelCount,channelCountMode:p.channelCountMode,channelInterpretation:p.channelInterpretation,gain:1})),y.push(o(m,{channelCount:p.channelCount,channelCountMode:"explicit",channelInterpretation:"discrete",numberOfOutputs:p.channelCount}));const C=[];if(f.parameterDescriptors!==void 0)for(const{defaultValue:b,maxValue:S,minValue:z,name:F}of f.parameterDescriptors){const V=s(m,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",offset:p.parameterData[F]!==void 0?p.parameterData[F]:b===void 0?0:b});Object.defineProperties(V.offset,{defaultValue:{get:()=>b===void 0?0:b},maxValue:{get:()=>S===void 0?Ye:S},minValue:{get:()=>z===void 0?Ce:z}}),C.push(V)}const M=r(m,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:Math.max(1,v+T)}),I=Us(w,m.sampleRate),N=c(m,I,v+T,Math.max(1,A)),P=o(m,{channelCount:Math.max(1,A),channelCountMode:"explicit",channelInterpretation:"discrete",numberOfOutputs:Math.max(1,A)}),k=[];for(let b=0;b<p.numberOfOutputs;b+=1)k.push(r(m,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:g[b]}));for(let b=0;b<p.numberOfInputs;b+=1){E[b].connect(y[b]);for(let S=0;S<p.channelCount;S+=1)y[b].connect(M,S,b*p.channelCount+S)}const B=new Tn(f.parameterDescriptors===void 0?[]:f.parameterDescriptors.map(({name:b},S)=>{const z=C[S];return z.connect(M,0,v+S),z.start(0),[b,z.offset]}));M.connect(N);let U=p.channelInterpretation,R=null;const x=p.numberOfOutputs===0?[N]:k,D={get bufferSize(){return I},get channelCount(){return p.channelCount},set channelCount(b){throw n()},get channelCountMode(){return p.channelCountMode},set channelCountMode(b){throw n()},get channelInterpretation(){return U},set channelInterpretation(b){for(const S of E)S.channelInterpretation=b;U=b},get context(){return N.context},get inputs(){return E},get numberOfInputs(){return p.numberOfInputs},get numberOfOutputs(){return p.numberOfOutputs},get onprocessorerror(){return R},set onprocessorerror(b){typeof R=="function"&&D.removeEventListener("processorerror",R),R=typeof b=="function"?b:null,typeof R=="function"&&D.addEventListener("processorerror",R)},get parameters(){return B},get port(){return _.port2},addEventListener(...b){return N.addEventListener(b[0],b[1],b[2])},connect:e.bind(null,x),disconnect:u.bind(null,x),dispatchEvent(...b){return N.dispatchEvent(b[0])},removeEventListener(...b){return N.removeEventListener(b[0],b[1],b[2])}},O=new Map;_.port1.addEventListener=(b=>(...S)=>{if(S[0]==="message"){const z=typeof S[1]=="function"?S[1]:typeof S[1]=="object"&&S[1]!==null&&typeof S[1].handleEvent=="function"?S[1].handleEvent:null;if(z!==null){const F=O.get(S[1]);F!==void 0?S[1]=F:(S[1]=V=>{d(m.currentTime,m.sampleRate,()=>z(V))},O.set(z,S[1]))}}return b.call(_.port1,S[0],S[1],S[2])})(_.port1.addEventListener),_.port1.removeEventListener=(b=>(...S)=>{if(S[0]==="message"){const z=O.get(S[1]);z!==void 0&&(O.delete(S[1]),S[1]=z)}return b.call(_.port1,S[0],S[1],S[2])})(_.port1.removeEventListener);let L=null;Object.defineProperty(_.port1,"onmessage",{get:()=>L,set:b=>{typeof L=="function"&&_.port1.removeEventListener("message",L),L=typeof b=="function"?b:null,typeof L=="function"&&(_.port1.addEventListener("message",L),_.port1.start())}}),f.prototype.port=_.port1;let W=null;Ws(m,D,f,p).then(b=>W=b);const he=je(p.numberOfInputs,p.channelCount),pe=je(p.numberOfOutputs,g),me=f.parameterDescriptors===void 0?[]:f.parameterDescriptors.reduce((b,{name:S})=>({...b,[S]:new Float32Array(128)}),{});let j=!0;const H=()=>{p.numberOfOutputs>0&&N.disconnect(P);for(let b=0,S=0;b<p.numberOfOutputs;b+=1){const z=k[b];for(let F=0;F<g[b];F+=1)P.disconnect(z,S+F,F);S+=g[b]}},Ie=new Map;N.onaudioprocess=({inputBuffer:b,outputBuffer:S})=>{if(W!==null){const z=l(D);for(let F=0;F<I;F+=128){for(let V=0;V<p.numberOfInputs;V+=1)for(let $=0;$<p.channelCount;$+=1)Fe(b,he[V],$,$,F);f.parameterDescriptors!==void 0&&f.parameterDescriptors.forEach(({name:V},$)=>{Fe(b,me,V,v+$,F)});for(let V=0;V<p.numberOfInputs;V+=1)for(let $=0;$<g[V];$+=1)pe[V][$].byteLength===0&&(pe[V][$]=new Float32Array(128));try{const V=he.map((Y,ne)=>{if(z[ne].size>0)return Ie.set(ne,I/128),Y;const rt=Ie.get(ne);return rt===void 0?[]:(Y.every(or=>or.every(sr=>sr===0))&&(rt===1?Ie.delete(ne):Ie.set(ne,rt-1)),Y)});j=d(m.currentTime+F/m.sampleRate,m.sampleRate,()=>W.process(V,pe,me));for(let Y=0,ne=0;Y<p.numberOfOutputs;Y+=1){for(let _e=0;_e<g[Y];_e+=1)Nn(S,pe[Y],_e,ne+_e,F);ne+=g[Y]}}catch(V){j=!1,D.dispatchEvent(new ErrorEvent("processorerror",{colno:V.colno,filename:V.filename,lineno:V.lineno,message:V.message}))}if(!j){for(let V=0;V<p.numberOfInputs;V+=1){E[V].disconnect(y[V]);for(let $=0;$<p.channelCount;$+=1)y[F].disconnect(M,$,V*p.channelCount+$)}if(f.parameterDescriptors!==void 0){const V=f.parameterDescriptors.length;for(let $=0;$<V;$+=1){const Y=C[$];Y.disconnect(M,0,v+$),Y.stop()}}M.disconnect(N),N.onaudioprocess=null,tt?H():Pt();break}}}};let tt=!1;const nt=a(m,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0}),Lt=()=>N.connect(nt).connect(m.destination),Pt=()=>{N.disconnect(nt),nt.disconnect()},nr=()=>{if(j){Pt(),p.numberOfOutputs>0&&N.connect(P);for(let b=0,S=0;b<p.numberOfOutputs;b+=1){const z=k[b];for(let F=0;F<g[b];F+=1)P.connect(z,S+F,F);S+=g[b]}}tt=!0},rr=()=>{j&&(Lt(),H()),tt=!1};return Lt(),h(D,nr,rr)},Fs=(e,t)=>(n,r)=>{const o=n.createChannelMerger(r.numberOfInputs);return e!==null&&e.name==="webkitAudioContext"&&t(n,o),Me(o,r),o},js=e=>{const t=e.numberOfOutputs;Object.defineProperty(e,"channelCount",{get:()=>t,set:n=>{if(n!==t)throw Z()}}),Object.defineProperty(e,"channelCountMode",{get:()=>"explicit",set:n=>{if(n!=="explicit")throw Z()}}),Object.defineProperty(e,"channelInterpretation",{get:()=>"discrete",set:n=>{if(n!=="discrete")throw Z()}})},Sn=(e,t)=>{const n=e.createChannelSplitter(t.numberOfOutputs);return Me(n,t),js(n),n},$s=(e,t,n,r,o)=>(s,a)=>{if(s.createConstantSource===void 0)return n(s,a);const c=s.createConstantSource();return Me(c,a),Ct(c,a,"offset"),t(r,()=>r(s))||Mn(c),t(o,()=>o(s))||On(c),e(s,c),c},Rn=(e,t)=>(e.connect=t.connect.bind(t),e.disconnect=t.disconnect.bind(t),e),Gs=(e,t,n,r)=>(o,{offset:s,...a})=>{const c=o.createBuffer(1,2,44100),i=t(o,{buffer:null,channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",loop:!1,loopEnd:0,loopStart:0,playbackRate:1}),u=n(o,{...a,gain:s}),d=c.getChannelData(0);d[0]=1,d[1]=1,i.buffer=c,i.loop=!0;const l={get bufferSize(){},get channelCount(){return u.channelCount},set channelCount(w){u.channelCount=w},get channelCountMode(){return u.channelCountMode},set channelCountMode(w){u.channelCountMode=w},get channelInterpretation(){return u.channelInterpretation},set channelInterpretation(w){u.channelInterpretation=w},get context(){return u.context},get inputs(){return[]},get numberOfInputs(){return i.numberOfInputs},get numberOfOutputs(){return u.numberOfOutputs},get offset(){return u.gain},get onended(){return i.onended},set onended(w){i.onended=w},addEventListener(...w){return i.addEventListener(w[0],w[1],w[2])},dispatchEvent(...w){return i.dispatchEvent(w[0])},removeEventListener(...w){return i.removeEventListener(w[0],w[1],w[2])},start(w=0){i.start.call(i,w)},stop(w=0){i.stop.call(i,w)}},h=()=>i.connect(u),m=()=>i.disconnect(u);return e(o,i),r(Rn(l,u),h,m)},ae=(e,t)=>{const n=e.createGain();return Me(n,t),Ct(n,t,"gain"),n},zs=(e,{mediaStream:t})=>{const n=t.getAudioTracks();n.sort((s,a)=>s.id<a.id?-1:s.id>a.id?1:0);const r=n.slice(0,1),o=e.createMediaStreamSource(new MediaStream(r));return Object.defineProperty(o,"mediaStream",{value:t}),o},qs=e=>e===null?null:e.hasOwnProperty("OfflineAudioContext")?e.OfflineAudioContext:e.hasOwnProperty("webkitOfflineAudioContext")?e.webkitOfflineAudioContext:null,Hs=e=>(t,{disableNormalization:n,imag:r,real:o})=>{const s=r instanceof Float32Array?r:new Float32Array(r),a=o instanceof Float32Array?o:new Float32Array(o),c=t.createPeriodicWave(a,s,{disableNormalization:n});if(Array.from(r).length<2)throw e();return c},Tt=(e,t,n,r)=>e.createScriptProcessor(t,n,r),de=()=>new DOMException("","NotSupportedError"),Ys={disableNormalization:!1},Xs=(e,t,n,r)=>class In{constructor(s,a){const c=t(s),i=r({...Ys,...a}),u=e(c,i);return n.add(u),u}static[Symbol.hasInstance](s){return s!==null&&typeof s=="object"&&Object.getPrototypeOf(s)===In.prototype||n.has(s)}},Zs=(e,t)=>(n,r,o)=>(e(r).replay(o),t(r,n,o)),Ks=(e,t,n)=>async(r,o,s)=>{const a=e(r);await Promise.all(a.activeInputs.map((c,i)=>Array.from(c).map(async([u,d])=>{const h=await t(u).render(u,o),m=r.context.destination;!n(u)&&(r!==m||!n(r))&&h.connect(s,d,i)})).reduce((c,i)=>[...c,...i],[]))},Js=(e,t,n)=>async(r,o,s)=>{const a=t(r);await Promise.all(Array.from(a.activeInputs).map(async([c,i])=>{const d=await e(c).render(c,o);n(c)||d.connect(s,i)}))},Qs=(e,t,n,r)=>o=>e(wt,()=>wt(o))?Promise.resolve(e(r,r)).then(s=>{if(!s){const a=n(o,512,0,1);o.oncomplete=()=>{a.onaudioprocess=null,a.disconnect()},a.onaudioprocess=()=>o.currentTime,a.connect(o.destination)}return o.startRendering()}):new Promise(s=>{const a=t(o,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});o.oncomplete=c=>{a.disconnect(),s(c.renderedBuffer)},a.connect(o.destination),o.startRendering()}),ea=e=>(t,n)=>{e.set(t,n)},ta=e=>()=>{if(e===null)return!1;try{new e({length:1,sampleRate:44100})}catch{return!1}return!0},na=(e,t)=>async()=>{if(e===null)return!0;if(t===null)return!1;const n=new Blob(['class A extends AudioWorkletProcessor{process(i){this.port.postMessage(i,[i[0][0].buffer])}}registerProcessor("a",A)'],{type:"application/javascript; charset=utf-8"}),r=new t(1,128,44100),o=URL.createObjectURL(n);let s=!1,a=!1;try{await r.audioWorklet.addModule(o);const c=new e(r,"a",{numberOfOutputs:0}),i=r.createOscillator();c.port.onmessage=()=>s=!0,c.onprocessorerror=()=>a=!0,i.connect(c),i.start(0),await r.startRendering()}catch{}finally{URL.revokeObjectURL(o)}return s&&!a},ra=(e,t)=>()=>{if(t===null)return Promise.resolve(!1);const n=new t(1,1,44100),r=e(n,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});return new Promise(o=>{n.oncomplete=()=>{r.disconnect(),o(n.currentTime!==0)},n.startRendering()})},oa=()=>new DOMException("","UnknownError"),sa=()=>typeof window>"u"?null:window,aa=(e,t)=>n=>{n.copyFromChannel=(r,o,s=0)=>{const a=e(s),c=e(o);if(c>=n.numberOfChannels)throw t();const i=n.length,u=n.getChannelData(c),d=r.length;for(let l=a<0?-a:0;l+a<i&&l<d;l+=1)r[l]=u[l+a]},n.copyToChannel=(r,o,s=0)=>{const a=e(s),c=e(o);if(c>=n.numberOfChannels)throw t();const i=n.length,u=n.getChannelData(c),d=r.length;for(let l=a<0?-a:0;l+a<i&&l<d;l+=1)u[l+a]=r[l]}},ia=e=>t=>{t.copyFromChannel=(n=>(r,o,s=0)=>{const a=e(s),c=e(o);if(a<t.length)return n.call(t,r,c,a)})(t.copyFromChannel),t.copyToChannel=(n=>(r,o,s=0)=>{const a=e(s),c=e(o);if(a<t.length)return n.call(t,r,c,a)})(t.copyToChannel)},ca=e=>(t,n)=>{const r=n.createBuffer(1,1,44100);t.buffer===null&&(t.buffer=r),e(t,"buffer",o=>()=>{const s=o.call(t);return s===r?null:s},o=>s=>o.call(t,s===null?r:s))},ua=(e,t)=>(n,r)=>{r.channelCount=1,r.channelCountMode="explicit",Object.defineProperty(r,"channelCount",{get:()=>1,set:()=>{throw e()}}),Object.defineProperty(r,"channelCountMode",{get:()=>"explicit",set:()=>{throw e()}});const o=n.createBufferSource();t(r,()=>{const c=r.numberOfInputs;for(let i=0;i<c;i+=1)o.connect(r,0,i)},()=>o.disconnect(r))},la=(e,t,n)=>e.copyFromChannel===void 0?e.getChannelData(n)[0]:(e.copyFromChannel(t,n),t[0]),Nt=(e,t,n,r)=>{let o=e;for(;!o.hasOwnProperty(t);)o=Object.getPrototypeOf(o);const{get:s,set:a}=Object.getOwnPropertyDescriptor(o,t);Object.defineProperty(e,t,{get:n(s),set:r(a)})},da=e=>({...e,outputChannelCount:e.outputChannelCount!==void 0?e.outputChannelCount:e.numberOfInputs===1&&e.numberOfOutputs===1?[e.channelCount]:Array.from({length:e.numberOfOutputs},()=>1)}),fa=e=>{const{imag:t,real:n}=e;return t===void 0?n===void 0?{...e,imag:[0,0],real:[0,0]}:{...e,imag:Array.from(n,()=>0),real:n}:n===void 0?{...e,imag:t,real:Array.from(t,()=>0)}:{...e,imag:t,real:n}},kn=(e,t,n)=>{try{e.setValueAtTime(t,n)}catch(r){if(r.code!==9)throw r;kn(e,t,n+1e-7)}},ha=e=>{const t=e.createBufferSource();t.start();try{t.start()}catch{return!0}return!1},pa=e=>{const t=e.createBufferSource(),n=e.createBuffer(1,1,44100);t.buffer=n;try{t.start(0,1)}catch{return!1}return!0},ma=e=>{const t=e.createBufferSource();t.start();try{t.stop()}catch{return!1}return!0},Ln=e=>{const t=e.createOscillator();try{t.start(-1)}catch(n){return n instanceof RangeError}return!1},ga=e=>{const t=e.createBuffer(1,1,44100),n=e.createBufferSource();n.buffer=t,n.start(),n.stop();try{return n.stop(),!0}catch{return!1}},Pn=e=>{const t=e.createOscillator();try{t.stop(-1)}catch(n){return n instanceof RangeError}return!1},wa=e=>{const{port1:t,port2:n}=new MessageChannel;try{t.postMessage(e)}finally{t.close(),n.close()}},va=e=>{e.start=(t=>(n=0,r=0,o)=>{const s=e.buffer,a=s===null?r:Math.min(s.duration,r);s!==null&&a>s.duration-.5/e.context.sampleRate?t.call(e,n,0,0):t.call(e,n,a,o)})(e.start)},_a=(e,t)=>{const n=t.createGain();e.connect(n);const r=(o=>()=>{o.call(e,n),e.removeEventListener("ended",r)})(e.disconnect);e.addEventListener("ended",r),Rn(e,n),e.stop=(o=>{let s=!1;return(a=0)=>{if(s)try{o.call(e,a)}catch{n.gain.setValueAtTime(0,a)}else o.call(e,a),s=!0}})(e.stop)},Oe=(e,t)=>n=>{const r={value:e};return Object.defineProperties(n,{currentTarget:r,target:r}),typeof t=="function"?t.call(e,n):t.handleEvent.call(e,n)},ya=eo(le),Ea=ao(le),Aa=Jo(qe),ba=new WeakMap,Ca=ls(ba),fe=jo(new Map,new WeakMap),Q=sa(),xn=us(q),Xe=Ks(q,xn,ue),ee=hs(pn),ve=qs(Q),J=As(ve),Un=new WeakMap,Bn=ss(Oe),Ze=Is(Q),Dn=vs(Ze),Wn=_s(Q),Ta=ys(Q),Ae=Ls(Q),Se=Po(to(ln),so(ya,Ea,mt,Aa,gt,q,Ca,be,X,le,se,ue,xe),fe,ws(ut,gt,q,X,Ee,se),ie,gs,de,Ko(mt,ut,q,X,Ee,ee,se,J),ts(Un,q,K),Bn,ee,Dn,Wn,Ta,J,Ae),Vn=new WeakSet,Qt=Os(Q),Fn=Yo(new Uint32Array(1)),jn=aa(Fn,ie),$n=ia(Fn),Na=lo(Vn,fe,de,Qt,ve,ta(Qt),jn,$n),Mt=io(ae),Gn=Js(xn,Te,ue),Ot=$o(Gn),Ke=Rs(Mt,fe,ha,pa,ma,Ln,ga,Pn,va,ca(Nt),_a),St=Zs(ds(Te),Gn),Ma=po(Ot,Ke,X,St,Xe),Je=xo(no(fn),Un,hn,Uo,Yr,Xr,Zr,Kr,Jr,at,cn,Ze,kn),Oa=ho(Se,Ma,Je,Z,Ke,ee,J,Oe),Sa=bo(Se,Co,ie,Z,ks(ae,Nt),ee,J,Xe),Qe=Ms(le,Wn),Ra=ua(Z,Qe),Rt=Fs(Ze,Ra),Ia=Gs(Mt,Ke,ae,Qe),Re=$s(Mt,fe,Ia,Ln,Pn),ka=Ho(Ot,Re,X,St,Xe),La=qo(Se,Je,ka,Re,ee,J,Oe),Pa=Qs(fe,ae,Tt,ra(ae,ve)),xa=To(Je,Rt,Re,Tt,de,la,J,Nt),zn=new WeakMap,Ua=Ns(Sa,xa,Bn,J,zn,Oe),Ba=Hs(ie);Xs(Ba,ee,new WeakSet,fa);const qn=bs(Q),It=as(Q),Hn=new WeakMap,Da=ps(Hn,ve),en=qn?oo(fe,de,os(Q),It,is(Qr),ee,Da,J,Ae,new WeakMap,new WeakMap,na(Ae,ve),Q):void 0,Wa=Es(Dn,J);Zo(Vn,fe,Xo,rs,new WeakSet,ee,Wa,ht,wt,jn,$n);const Va=Cs(Se,zs,ee,J),Yn=ms(zn),Fa=co(Yn),Xn=Go(ie),ja=Qo(Yn),Zn=ns(ie),Kn=new WeakMap,$a=cs(Kn,K),Ga=Vs(Xn,ie,Z,Rt,Sn,Re,ae,Tt,de,Zn,It,$a,Qe),za=xs(Z,Ga,ae,de,Qe),qa=Fo(Ot,Xn,Ke,Rt,Sn,Re,ae,ja,Zn,It,X,Ae,ve,St,Xe,Pa),Ha=fs(Hn),Ya=ea(Kn),tn=qn?Do(Fa,Se,Je,qa,za,q,Ha,ee,J,Ae,da,Ya,wa,Oe):void 0,Xa=Ts(Z,de,oa,Ua,Ze),Jn="Missing AudioWorklet support. Maybe this is not running in a secure context.",Za=async(e,t,n,r,o)=>{const{encoderId:s,port:a}=await on(o,t.sampleRate);if(tn===void 0)throw new Error(Jn);const c=new Oa(t,{buffer:e}),i=new Va(t,{mediaStream:r}),u=Gr(tn,t,{channelCount:n});return{audioBufferSourceNode:c,encoderId:s,mediaStreamAudioSourceNode:i,port:a,recorderAudioWorkletNode:u}},Ka=(e,t,n,r)=>(o,s,a)=>{var c;const i=(c=s.getAudioTracks()[0])===null||c===void 0?void 0:c.getSettings().sampleRate,u=new Xa({latencyHint:"playback",sampleRate:i}),d=Math.max(1024,Math.ceil(u.baseLatency*u.sampleRate)),l=new Na({length:d,sampleRate:u.sampleRate}),h=[],m=$r(C=>{if(en===void 0)throw new Error(Jn);return en(u,C)});let w=null,f=null,p=null,g=null,v=!0;const A=C=>{o.dispatchEvent(e("dataavailable",{data:new Blob(C,{type:a})}))},T=async(C,M)=>{const I=await Ue(C,M);p===null?h.push(...I):(A(I),g=T(C,M))},_=()=>(v=!0,u.resume()),E=()=>{p!==null&&(w!==null&&(s.removeEventListener("addtrack",w),s.removeEventListener("removetrack",w)),f!==null&&clearTimeout(f),p.then(async({constantSourceNode:C,encoderId:M,mediaStreamAudioSourceNode:I,recorderAudioWorkletNode:N})=>{g!==null&&(g.catch(()=>{}),g=null),await N.stop(),I.disconnect(N),C.stop();const P=await Ue(M,null);p===null&&await y(),A([...h,...P]),h.length=0,o.dispatchEvent(new Event("stop"))}),p=null)},y=()=>(v=!1,u.suspend());return y(),{get mimeType(){return a},get state(){return p===null?"inactive":v?"recording":"paused"},pause(){if(p===null)throw n();v&&(y(),o.dispatchEvent(new Event("pause")))},resume(){if(p===null)throw n();v||(_(),o.dispatchEvent(new Event("resume")))},start(C){var M;if(p!==null)throw n();if(s.getVideoTracks().length>0)throw r();o.dispatchEvent(new Event("start"));const I=s.getAudioTracks(),N=I.length===0?2:(M=I[0].getSettings().channelCount)!==null&&M!==void 0?M:2;p=Promise.all([_(),m.then(()=>Za(l,u,N,s,a))]).then(async([,{audioBufferSourceNode:k,encoderId:B,mediaStreamAudioSourceNode:U,port:R,recorderAudioWorkletNode:x}])=>{U.connect(x),await new Promise(O=>{k.onended=O,k.connect(x),k.start(u.currentTime+d/u.sampleRate)}),k.disconnect(x);const D=new La(u,{offset:0});return D.onended=()=>D.disconnect(),D.connect(u.destination),D.start(),await x.record(R),C!==void 0&&(g=T(B,C)),{constantSourceNode:D,encoderId:B,mediaStreamAudioSourceNode:U,recorderAudioWorkletNode:x}});const P=s.getTracks();w=()=>{E(),o.dispatchEvent(new ErrorEvent("error",{error:t()}))},s.addEventListener("addtrack",w),s.addEventListener("removetrack",w),f=setInterval(()=>{const k=s.getTracks();(k.length!==P.length||k.some((B,U)=>B!==P[U]))&&w!==null&&w()},1e3)},stop:E}};class st{constructor(t,n=0,r){if(n<0||r!==void 0&&r<0)throw new RangeError;const o=t.reduce((d,l)=>d+l.byteLength,0);if(n>o||r!==void 0&&n+r>o)throw new RangeError;const s=[],a=r===void 0?o-n:r,c=[];let i=0,u=n;for(const d of t)if(c.length===0)if(d.byteLength>u){i=d.byteLength-u;const l=i>a?a:i;s.push(new DataView(d,u,l)),c.push(d)}else u-=d.byteLength;else if(i<a){i+=d.byteLength;const l=i>a?d.byteLength-i+a:d.byteLength;s.push(new DataView(d,0,l)),c.push(d)}this._buffers=c,this._byteLength=a,this._byteOffset=u,this._dataViews=s,this._internalBuffer=new DataView(new ArrayBuffer(8))}get buffers(){return this._buffers}get byteLength(){return this._byteLength}get byteOffset(){return this._byteOffset}getFloat32(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.getFloat32(0,n)}getFloat64(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.setUint8(4,this.getUint8(t+4)),this._internalBuffer.setUint8(5,this.getUint8(t+5)),this._internalBuffer.setUint8(6,this.getUint8(t+6)),this._internalBuffer.setUint8(7,this.getUint8(t+7)),this._internalBuffer.getFloat64(0,n)}getInt16(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.getInt16(0,n)}getInt32(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.getInt32(0,n)}getInt8(t){const[n,r]=this._findDataViewWithOffset(t);return n.getInt8(t-r)}getUint16(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.getUint16(0,n)}getUint32(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.getUint32(0,n)}getUint8(t){const[n,r]=this._findDataViewWithOffset(t);return n.getUint8(t-r)}setFloat32(t,n,r){this._internalBuffer.setFloat32(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3))}setFloat64(t,n,r){this._internalBuffer.setFloat64(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3)),this.setUint8(t+4,this._internalBuffer.getUint8(4)),this.setUint8(t+5,this._internalBuffer.getUint8(5)),this.setUint8(t+6,this._internalBuffer.getUint8(6)),this.setUint8(t+7,this._internalBuffer.getUint8(7))}setInt16(t,n,r){this._internalBuffer.setInt16(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1))}setInt32(t,n,r){this._internalBuffer.setInt32(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3))}setInt8(t,n){const[r,o]=this._findDataViewWithOffset(t);r.setInt8(t-o,n)}setUint16(t,n,r){this._internalBuffer.setUint16(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1))}setUint32(t,n,r){this._internalBuffer.setUint32(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3))}setUint8(t,n){const[r,o]=this._findDataViewWithOffset(t);r.setUint8(t-o,n)}_findDataViewWithOffset(t){let n=0;for(const r of this._dataViews){const o=n+r.byteLength;if(t>=n&&t<o)return[r,n];n=o}throw new RangeError}}const Ja=(e,t,n,r,o)=>(s,a,c,i)=>{const u=c.getAudioTracks(),d=[],l=u.length===0?void 0:u[0].getSettings().channelCount,h=new a(c,{mimeType:"audio/webm;codecs=pcm"}),m=u.length===0?void 0:u[0].getSettings().sampleRate;let w=null,f=()=>{};const p=A=>{s.dispatchEvent(e("dataavailable",{data:new Blob(A,{type:i})}))},g=async(A,T)=>{const _=await Ue(A,T);h.state==="inactive"?d.push(..._):(p(_),w=g(A,T))},v=()=>{h.state!=="inactive"&&(w!==null&&(w.catch(()=>{}),w=null),f(),f=()=>{},h.stop())};return h.addEventListener("error",()=>{v(),s.dispatchEvent(new ErrorEvent("error",{error:t()}))}),h.addEventListener("start",()=>s.dispatchEvent(new Event("start"))),{get mimeType(){return i},get state(){return h.state},pause(){return h.pause()},resume(){return h.resume()},start(A){if(c.getVideoTracks().length>0)throw n();if(h.state==="inactive"){if(m===void 0)throw new Error("The sampleRate is not defined.");let T=!1,_=!1,E=0,y=on(i,m);f=()=>{_=!0};const C=sn(h,"dataavailable")(({data:M})=>{E+=1,y=y.then(async({dataView:I=null,elementType:N=null,encoderId:P,port:k})=>{const B=await M.arrayBuffer();E-=1;const U=I===null?new st([B]):new st([...I.buffers,B],I.byteOffset);if(!T&&h.state==="recording"&&!_){const L=o(U,0);if(L===null)return{dataView:U,elementType:N,encoderId:P,port:k};const{value:W}=L;if(W!==172351395)return{dataView:I,elementType:N,encoderId:P,port:k};T=!0}const{currentElementType:R,offset:x,contents:D}=r(U,N,l),O=x<U.byteLength?new st(U.buffers,U.byteOffset+x):null;return D.forEach(L=>k.postMessage(L,L.map(({buffer:W})=>W))),E===0&&(h.state==="inactive"||_)&&(Ue(P,null).then(L=>{p([...d,...L]),d.length=0,s.dispatchEvent(new Event("stop"))}),k.postMessage([]),k.close(),C()),{dataView:O,elementType:R,encoderId:P,port:k}})});A!==void 0&&y.then(({encoderId:M})=>w=g(M,A))}h.start(100)},stop:v}},Qa=()=>typeof window>"u"?null:window,Qn=(e,t)=>{if(t>=e.byteLength)return null;const n=e.getUint8(t);if(n>127)return 1;if(n>63)return 2;if(n>31)return 3;if(n>15)return 4;if(n>7)return 5;if(n>3)return 6;if(n>1)return 7;if(n>0)return 8;const r=Qn(e,t+1);return r===null?null:r+8},ei=(e,t)=>n=>{const r={value:e};return Object.defineProperties(n,{currentTarget:r,target:r}),typeof t=="function"?t.call(e,n):t.handleEvent.call(e,n)},er=[],et=Qa(),ti=Er(et),tr=pr(ti),ni=Ka(tr,_t,vr,$e),kt=Nr(Qn),ri=Cr(kt),oi=Tr(kt),si=mr(ri,oi),ai=Ja(tr,_t,$e,si,kt),ii=wr(et),ci=br(et),ui=Ar(_t,$e),Ci=yr(ui,$e,ni,ai,er,gr(ii,ei),ci),Ti=()=>_r(et),Ni=async e=>{er.push(await hr(e))};export{Ci as MediaRecorder,Ti as isSupported,Ni as register};
9
- //# sourceMappingURL=module-447425fe.js.map