parquet-converter commited on
Commit
c41fc7b
·
1 Parent(s): 8720dd0

Update parquet files (step 91 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Enscape 2.7 Full Crack Bagas31 for Free The Good the Bad and the Ugly.md +0 -24
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/H force keygen autocad 2010 The ultimate solution for Autocad activation.md +0 -157
  3. spaces/1gistliPinn/ChatGPT4/Examples/Artcut 2009 !!EXCLUSIVE!! Full Crack.md +0 -68
  4. spaces/1gistliPinn/ChatGPT4/Examples/Cineasset Doremi Crack.md +0 -19
  5. spaces/1line/AutoGPT/autogpt/memory/pinecone.py +0 -75
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black Commando Special Ops MOD APK The Best Offline FPS Shooting Game of 2023.md +0 -61
  7. spaces/1phancelerku/anime-remove-background/Bingo Crush - The Ultimate Bingo Experience on Android APK.md +0 -95
  8. spaces/1phancelerku/anime-remove-background/Chess Universe Mod APK - The Ultimate Chess Experience for Android Users.md +0 -108
  9. spaces/1toTree/lora_test/ppdiffusers/pipelines/repaint/pipeline_repaint.py +0 -172
  10. spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +0 -469
  11. spaces/AI-Naga/Roof_Element_Identification/app.py +0 -68
  12. spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/utils/plot_for_paper.py +0 -565
  13. spaces/AIGC-Audio/AudioGPT/sound_extraction/model/LASSNet.py +0 -25
  14. spaces/AISuperheroes/02GR-ASR-Memory/app.py +0 -196
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/vis_pred_save.py +0 -209
  16. spaces/AchyuthGamer/text-to-speech-client/assets/worker-7f2d1abe.js +0 -0
  17. spaces/ActivatedOne/JorisCos-ConvTasNet_Libri1Mix_enhsingle_16k/README.md +0 -12
  18. spaces/Adapter/T2I-Adapter/ldm/modules/diffusionmodules/model.py +0 -852
  19. spaces/Adieudale/Adieudale/Dockerfile +0 -34
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/pngappender.d.ts +0 -9
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/flip/Factory.js +0 -11
  22. spaces/AlexWortega/t5_predict_activity/README.md +0 -12
  23. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py +0 -201
  24. spaces/Alpaca233/SadTalker/src/face3d/data/image_folder.py +0 -66
  25. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/plot.py +0 -72
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/watermark.py +0 -46
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +0 -755
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +0 -128
  29. spaces/Andy1621/uniformer_image_detection/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py +0 -5
  30. spaces/Andy1621/uniformer_image_detection/mmdet/core/post_processing/merge_augs.py +0 -150
  31. spaces/Andy1621/uniformer_image_segmentation/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py +0 -2
  32. spaces/AnimaLab/bias-test-gpt-pairs/mgr_cookies.py +0 -64
  33. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_nll.py +0 -96
  34. spaces/ArkanDash/rvc-models-new/lib/infer_pack/models_dml.py +0 -1124
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/extern/__init__.py +0 -76
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/__init__.py +0 -29
  37. spaces/B-patents/patent-bert/app.py +0 -351
  38. spaces/Banbri/zcvzcv/src/app/queries/predict.ts +0 -9
  39. spaces/Banjoo/What_The_Bun/README.md +0 -13
  40. spaces/Benson/text-generation/Examples/Apk Club Gacha Para Porttil.md +0 -106
  41. spaces/Benson/text-generation/Examples/Combate Areo En Lnea Mod Apk.md +0 -99
  42. spaces/Benson/text-generation/Examples/Descargar Geometra Dash Meltdown Versin Completa Apk.md +0 -53
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/index/sources.py +0 -223
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/dep_util.py +0 -25
  45. spaces/Bokanovskii/Image-to-music/shred_model.py +0 -109
  46. spaces/BorisovMaksim/denoising/denoisers/SpectralGating.py +0 -24
  47. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/__init__.py +0 -10
  48. spaces/CVPR/Dual-Key_Backdoor_Attacks/full_inference.py +0 -277
  49. spaces/CVPR/LIVE/thrust/internal/benchmark/combine_benchmark_results.py +0 -817
  50. spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_adaptor_base.h +0 -111
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Enscape 2.7 Full Crack Bagas31 for Free The Good the Bad and the Ugly.md DELETED
@@ -1,24 +0,0 @@
1
- <br />
2
- <h1>How to Download Enscape 2.7 Full Crack Bagas31 for Free</h1>
3
- <p>If you are looking for a way to download Enscape 2.7 full crack bagas31 for free, you have come to the right place. Enscape is a powerful and easy-to-use 3D rendering software that works with popular design tools like SketchUp, Revit, Rhino, and ArchiCAD. With Enscape, you can create stunning photorealistic images, animations, and virtual reality experiences in minutes.</p>
4
- <h2>download enscape 2.7 full crack bagas31</h2><br /><p><b><b>Download</b> &#10042; <a href="https://byltly.com/2uKvpF">https://byltly.com/2uKvpF</a></b></p><br /><br />
5
- <p>However, Enscape is not a cheap software. The official price for a monthly subscription is $58.99, and the annual license costs $469.00. That's why many people are searching for a way to download Enscape 2.7 full crack bagas31 for free. Bagas31 is a popular website that offers cracked software and games for download.</p>
6
- <p>But before you rush to download Enscape 2.7 full crack bagas31 for free, you should know the risks and consequences of using cracked software. In this article, we will explain why you should avoid downloading Enscape 2.7 full crack bagas31 for free, and what are the best alternatives to get Enscape legally and safely.</p>
7
- <h2>Why You Should Avoid Downloading Enscape 2.7 Full Crack Bagas31 for Free</h2>
8
- <p>Downloading Enscape 2.7 full crack bagas31 for free may seem like a good idea at first, but it can actually cause you more trouble than you think. Here are some of the reasons why you should avoid downloading Enscape 2.7 full crack bagas31 for free:</p>
9
- <ul>
10
- <li><b>It is illegal.</b> Downloading and using cracked software is a violation of the intellectual property rights of the software developers. You can face legal actions and penalties if you are caught using cracked software.</li>
11
- <li><b>It is unsafe.</b> Downloading cracked software from unknown sources can expose your computer to viruses, malware, spyware, ransomware, and other malicious programs that can harm your data and system. You can also compromise your personal and financial information if you use cracked software online.</li>
12
- <li><b>It is unreliable.</b> Cracked software often has bugs, errors, glitches, and compatibility issues that can affect your work and productivity. You may also miss out on important updates, features, and support from the official developers.</li>
13
- <li><b>It is unethical.</b> Downloading and using cracked software is unfair to the software developers who spend time, money, and effort to create quality products. You are also hurting the software industry and the innovation that it brings.</li>
14
- </ul>
15
- <p>As you can see, downloading Enscape 2.7 full crack bagas31 for free is not worth the risk and hassle. You are better off using the official version of Enscape that is legal, safe, reliable, and ethical.</p>
16
- <p></p>
17
- <h2>How to Get Enscape Legally and Safely</h2>
18
- <p>If you want to use Enscape without breaking the law or compromising your security, you have several options to get it legally and safely. Here are some of the best ways to get Enscape legally and safely:</p>
19
- <ul>
20
- <li><b>Try the free trial.</b> Enscape offers a 14-day free trial that allows you to test all the features and functions of the software without any limitations. You can download the free trial from the official website of Enscape: <a href="https://enscape3d.com/downloads/">https://enscape3d.com/downloads/</a>. You will need to register with your email address and verify your account before you can start the trial.</li>
21
- <li><b>Buy the subscription or license.</b> If you like Enscape after trying the free trial, you can buy the subscription or license that suits your needs and budget. You can choose between monthly or annual plans, or buy a perpetual license that gives you lifetime access to the software. You can also get discounts if you are a student, educator, or non-profit organization. You can buy Enscape from the official website of Enscape: <a href="https://enscape3d.com/pricing/">https://enscape3d.com/pricing/</a>.</li>
22
- <li><b>Use alternative software.</b> If you find Enscape too expensive or not suitable for your needs, you can also use alternative software that offers similar or better features and functions than Enscape. Some of the best alternative software</p> ddb901b051<br />
23
- <br />
24
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/H force keygen autocad 2010 The ultimate solution for Autocad activation.md DELETED
@@ -1,157 +0,0 @@
1
- <br />
2
- <h1>How to Use H Force Keygen Autocad 2010</h1>
3
- <p>Autocad is one of the most popular and powerful software for designing and drafting in various fields such as architecture, engineering, construction, and more. However, it is also quite expensive and requires online activation to use it fully. If you want to use Autocad 2010 without paying or activating it online, you might be interested in using h force keygen autocad 2010.</p>
4
- <p>H force keygen autocad 2010 is a tool that can generate activation codes for any Autodesk product, including Autocad 2010. It can also bypass the online activation process and let you use Autocad 2010 offline. In this article, we will show you what h force keygen autocad 2010 is, why you might want to use it, how to download it, how to install and activate Autocad 2010 with it, and some tips and warnings for using it.</p>
5
- <h2>h force keygen autocad 2010</h2><br /><p><b><b>Download</b> &#8230; <a href="https://byltly.com/2uKwzF">https://byltly.com/2uKwzF</a></b></p><br /><br />
6
- <h2>What is H Force Keygen Autocad 2010?</h2>
7
- <h3>A tool to generate activation codes for Autodesk products</h3>
8
- <p>H force keygen autocad 2010 is a software that can create serial numbers and activation codes for any Autodesk product, such as Autocad, Revit, Maya, Inventor, etc. It works by using an algorithm that can crack the encryption of Autodesk's licensing system. By using h force keygen autocad 2010, you can get a valid activation code for your Autodesk product without having to pay or register online.</p>
9
- <h3>A way to bypass the online activation process</h3>
10
- <p>Another feature of h force keygen autocad 2010 is that it can bypass the online activation process of Autodesk products. Normally, when you install an Autodesk product, you have to enter a serial number and a product key, and then activate it online by connecting to Autodesk's servers. However, with h force keygen autocad 2010, you can skip this step and activate your Autodesk product offline. This way, you can use your Autodesk product without having to connect to the internet or worry about being detected by Autodesk.</p>
11
- <p>xforce keygen autocad 2010 32 bit & 64 bit<br />
12
- how to activate key autocad 2010 with xforce<br />
13
- xforce keygen for all autodesk products 2010<br />
14
- download xforce keygen 2010 32 bit & 64 bit<br />
15
- civilmdc autodesk 2010 xforce keygenerator<br />
16
- how to use xforce keygen for autodesk 2010<br />
17
- disable antivirus before using xforce keygen<br />
18
- xforce keygen request code and activation code<br />
19
- download x force 2009 crack for autocad 2010<br />
20
- download x force 2010 crack for autocad 2010<br />
21
- download x force 2011 crack for autocad 2010<br />
22
- download x force 2012 crack for autocad 2010<br />
23
- rewanh h force keygen autocad 2010 free download<br />
24
- h force keygen autocad 2010 design and shape the world<br />
25
- patronway h force keygen autocad 2010 hot<br />
26
- h force keygen autocad 2010 versioning of a library<br />
27
- h force keygen autocad 2010 put your own name on it<br />
28
- h force keygen autocad 2010 it's here just here yes<br />
29
- h force keygen autocad 2010 lt v2010<br />
30
- h force keygen autocad 2010 civil engineering software<br />
31
- autodesk autocad 2010 product key and serial number<br />
32
- autodesk autocad 2010 system requirements and compatibility<br />
33
- autodesk autocad 2010 features and enhancements<br />
34
- autodesk autocad 2010 tutorials and learning resources<br />
35
- autodesk autocad 2010 trial version and free download<br />
36
- autodesk autocad 2010 service pack and update releases<br />
37
- autodesk autocad 2010 support and troubleshooting<br />
38
- autodesk autocad 2010 forums and community<br />
39
- autodesk autocad 2010 subscription and benefits<br />
40
- autodesk autocad 2010 license and activation issues<br />
41
- how to install and run autodesk autocad 2010 on windows<br />
42
- how to install and run autodesk autocad 2010 on mac os x<br />
43
- how to install and run autodesk autocad 2010 on linux<br />
44
- how to uninstall and remove autodesk autocad 2010 completely<br />
45
- how to migrate and transfer autodesk autocad 2010 settings and data<br />
46
- how to customize and optimize autodesk autocad 2010 performance and appearance<br />
47
- how to create and edit drawings in autodesk autocad 2010<br />
48
- how to annotate and dimension drawings in autodesk autocad 2010<br />
49
- how to print and plot drawings in autodesk autocad 2010<br />
50
- how to share and collaborate drawings in autodesk autocad 2010<br />
51
- how to import and export drawings in autodesk autocad 2010<br />
52
- how to use blocks and attributes in autodesk autocad 2010<br />
53
- how to use layers and layouts in autodesk autocad 2010<br />
54
- how to use commands and shortcuts in autodesk autocad 2010<br />
55
- how to use tools and palettes in autodesk autocad 2010<br />
56
- how to use hatch and gradient in autodesk autocad 2010<br />
57
- how to use text and tables in autodesk autocad 2010<br />
58
- how to use dimensions and leaders in autodesk autocad 2010<br />
59
- how to use dynamic blocks and parameters in autodesk autocad 2010</p>
60
- <h2>Why Use H Force Keygen Autocad 2010?</h2>
61
- <h3>To save money and time</h3>
62
- <p>One of the main reasons why you might want to use h force keygen autocad 2010 is to save money and time. As you may know, Autocad 2010 is not a cheap software. It costs hundreds or even thousands of dollars depending on the version and the license type. Moreover, it requires online activation every time you install it on a new computer or change your hardware configuration. This can be inconvenient and time-consuming if you have multiple computers or devices that you want to use Autocad on. By using h force keygen autocad 2010, you can avoid these costs and hassles and use Autocad 2010 on as many computers as you want.</p>
63
- <h3>To access all the features and updates of Autocad 2010</h3>
64
- <p>Another reason why you might want to use h force keygen autocad 2010 is to access all the features and updates of Autocad 2010. Autocad 2010 is a powerful software that has many features and functions that can help you create amazing designs and drawings. However, some of these features are only available for registered users or subscribers who have paid for a license. For example, some of the advanced tools, commands, options, settings, libraries, templates, etc. are only accessible for licensed users. Moreover, licensed users can also get regular updates and patches from Autodesk that can improve the performance and security of Autocad. By using h force keygen autocad 2010, you can unlock all these features and updates without having to pay or subscribe.</p>
65
- <h2>How to Download H Force Keygen Autocad 2010?</h2>
66
- <h3>Find a reliable source online</h3>
67
- <p>The first step to use h force keygen autocad 2010 is to find a reliable source online where you can download it safely. Since h force keygen autocad 2010 is an illegal software that violates Autodesk's terms of service and intellectual property rights, it is not available on official websites or platforms. Instead, you have to look for it on unofficial websites or platforms that host pirated software or cracks. However, not all of these websites or platforms are trustworthy or secure. Some of them may contain malware or viruses that can harm your computer or steal your personal information. Therefore, you have to be careful when choosing where to download h force keygen autocad 2010 from.</p>
68
- <p>One way to find a reliable source online is to use a search engine such as Google or Bing . You can type keywords such as "h force keygen autocad 2010", "h force keygen autocad", "xforce keygen autocad", etc. You will get many results that link to different websites or platforms that offer h force keygen autocad 2010 for download. However, before clicking on any of these links, you should check their credibility and reputation by looking at their domain name, reviews, ratings, comments, etc. You should avoid websites or platforms that have suspicious domain names (such as .ru .cn .tk .biz .info etc.), negative reviews or ratings (such as low stars or thumbs down), no comments or feedback (or only positive ones), etc.</p>
69
- <p>Another way to find a reliable source online is to use a peer-to-peer network such as BitTorrent or uTorrent . These are applications that allow users to share files with each other directly without using a central server. You can use these applications to download h force keygen autocad 2010 from other users who have already downloaded it before. However, before downloading any file from these applications, you should check its size (it should be around few MBs), name (it should match with h force keygen autocad), extension (it should be .exe .rar .zip etc.), seeders (the more the better), leechers (the less the better), etc.</p>
70
- <h3>Extract the zip file and run the keygen</h3>
71
- <p>The second step to use h force keygen autocad The response was too long so I had to truncate it. <p>The second step to use h force keygen autocad 2010 is to extract the zip file and run the keygen. After you have downloaded h force keygen autocad 2010 from a reliable source online, you will get a zip file that contains the keygen and some instructions. You need to extract this zip file using a software such as WinRAR or 7-Zip . You will get a folder that contains the keygen and some files. You need to run the keygen by double-clicking on it or right-clicking on it and choosing Run as administrator. You will see a window that looks like this:</p>
72
- <code>
73
- +---------------------------------------------------+ | X-FORCE KEYGEN | | Autodesk 2010 Products | | | | Serial: [Enter any serial number] | | Product Key: [Enter the product key for Autocad] | | Request Code: [Enter the request code from Autocad]| | | | [Patch] [Generate] [Copy] | +---------------------------------------------------+ </code>
74
- <p>You need to enter some information in the keygen to generate an activation code for Autocad 2010. Here is what you need to do:</p>
75
- <ul>
76
- <li>Enter any serial number in the Serial field. You can use any combination of numbers and letters, such as 123-4567890 or ABCD-EFGH-IJKL.</li>
77
- <li>Enter the product key for Autocad 2010 in the Product Key field. You can find the product key for Autocad 2010 on the official website of Autodesk or on the installation CD or DVD. The product key for Autocad 2010 is 001B1.</li>
78
- <li>Enter the request code from Autocad 2010 in the Request Code field. You can get the request code from Autocad 2010 by installing it and launching it for the first time. You will see an activation screen that asks you to enter a serial number and a product key. After you enter them, you will see another screen that shows you a request code. You need to copy this request code and paste it into the keygen.</li>
79
- <li>Click on Patch in the keygen. You will see a message that says "Successfully patched". This means that the keygen has modified some files in your Autodesk product to bypass the online activation process.</li>
80
- <li>Click on Generate in the keygen. You will see an activation code in the keygen. You need to copy this activation code and paste it into the activation screen of Autocad 2010.</li>
81
- </ul>
82
- <h2>How to Install and Activate Autocad 2010 with H Force Keygen?</h2>
83
- <h3>Install Autocad 2010 and restart your computer</h3>
84
- <p>The third step to use h force keygen autocad 2010 is to install Autocad 2010 and restart your computer. Before you can use h force keygen autocad 2010, you need to have Autocad 2010 installed on your computer. You can install Autocad 2010 from a CD or DVD that you have bought or downloaded from an unofficial website or platform. To install Autocad 2010, you need to follow these steps:</p>
85
- <ol>
86
- <li>Insert the CD or DVD of Autocad 2010 into your computer's drive or mount the ISO file if you have downloaded it.</li>
87
- <li>Run the setup.exe file or autorun.exe file from the CD or DVD or ISO file.</li>
88
- <li>Follow the instructions on the screen to choose your language, accept the license agreement, enter your name and organization, etc.</li>
89
- <li>Choose the type of installation you want: Typical (recommended), Custom (advanced), or Network (for multiple users).</li>
90
- <li>Choose the components you want to install: Autocad 2010, Autodesk Design Review, Autodesk DWG TrueView, etc.</li>
91
- <li>Choose the location where you want to install Autocad 2010 on your computer's hard drive.</li>
92
- <li>Click on Install and wait for the installation process to complete.</li>
93
- <li>Click on Finish and restart your computer.</li>
94
- </ol>
95
- <h3>Disable your internet connection and antivirus</h3>
96
- <p>The fourth step to use h force keygen autocad 2010 is to disable your internet connection and antivirus. Before you can activate Autocad 2010 with h force keygen autocad 2010, you need to disconnect your computer from the internet and disable your antivirus software. This is because h force keygen autocad 2010 is an illegal software that can be detected and blocked by Autodesk's servers or your antivirus software. To disable your internet connection and antivirus, you need to follow these steps:</p>
97
- <ul>
98
- <li>To disable your internet connection, you can unplug your ethernet cable, turn off your Wi-Fi adapter, or disable your network adapter from your computer's settings.</li>
99
- <li>To disable your antivirus software, you can right-click on its icon in your system tray and choose Disable, Exit, Quit, or Turn off. Alternatively, you can open its main window and look for an option to disable it temporarily or permanently.</li>
100
- </ul>
101
- <h3>Run the keygen and click on Patch</h3>
102
- <p>The fifth step to use h force keygen autocad 2010 is to run the keygen and click on Patch. After you have disabled your internet connection and antivirus, you need to run h force keygen autocad 2010 and patch your Autodesk product. To do this, you need to follow these steps:</p>
103
- <ul>
104
- <li>Navigate to the folder where you have extracted h force keygen autocad The response was too long so I had to truncate it. <li>Navigate to the folder where you have extracted h force keygen autocad 2010 and double-click on the keygen.exe file or right-click on it and choose Run as administrator.</li>
105
- <li>Enter any serial number in the Serial field. You can use any combination of numbers and letters, such as 123-4567890 or ABCD-EFGH-IJKL.</li>
106
- <li>Enter the product key for Autocad 2010 in the Product Key field. You can find the product key for Autocad 2010 on the official website of Autodesk or on the installation CD or DVD. The product key for Autocad 2010 is 001B1.</li>
107
- <li>Click on Patch in the keygen. You will see a message that says "Successfully patched". This means that the keygen has modified some files in your Autodesk product to bypass the online activation process.</li>
108
- </ul>
109
- <h3>Copy the request code and paste it into the keygen</h3>
110
- <p>The sixth step to use h force keygen autocad 2010 is to copy the request code and paste it into the keygen. After you have patched your Autodesk product with h force keygen autocad 2010, you need to copy the request code from Autocad 2010 and paste it into the keygen. To do this, you need to follow these steps:</p>
111
- <ul>
112
- <li>Launch Autocad 2010 by clicking on its icon on your desktop or start menu.</li>
113
- <li>You will see an activation screen that asks you to enter a serial number and a product key. Enter the same serial number and product key that you have entered in the keygen.</li>
114
- <li>You will see another screen that shows you a request code. This is a code that is generated by Autocad 2010 based on your computer's hardware configuration and software installation.</li>
115
- <li>Copy this request code by selecting it with your mouse and pressing Ctrl+C or by clicking on Copy.</li>
116
- <li>Go back to the keygen and paste the request code into the Request Code field by pressing Ctrl+V or by clicking on Paste.</li>
117
- </ul>
118
- <h3>Copy the activation code and paste it into the activation screen</h3>
119
- <p>The seventh and final step to use h force keygen autocad 2010 is to copy the activation code and paste it into the activation screen. After you have entered the request code into the keygen, you need to copy the activation code from the keygen and paste it into the activation screen of Autocad 2010. To do this, you need to follow these steps:</p>
120
- <ul>
121
- <li>Click on Generate in the keygen. You will see an activation code in the keygen. This is a code that is generated by the keygen based on your request code.</li>
122
- <li>Copy this activation code by selecting it with your mouse and pressing Ctrl+C or by clicking on Copy.</li>
123
- <li>Go back to the activation screen of Autocad 2010 and paste the activation code into the Activation Code field by pressing Ctrl+V or by clicking on Paste.</li>
124
- <li>Click on Next in the activation screen. You will see a message that says "Thank you for activating your Autodesk product". This means that you have successfully activated Autocad 2010 with h force keygen autocad 2010.</li>
125
- <li>Click on Finish and enjoy using Autocad 2010 with all its features and updates.</li>
126
- </ul>
127
- <h2>Tips and Warnings for Using H Force Keygen Autocad 2010</h2>
128
- <h3>Use it at your own risk</h3>
129
- <p>While using h force keygen autocad 2010 can save you money and time and give you access to all the features and updates of Autocad 2010, it also comes with some risks and drawbacks. Using h force keygen autocad 2010 is illegal and unethical, as it violates Autodesk's terms of service and intellectual property rights. By using h force keygen autocad 2010, you are not supporting Autodesk's development and innovation, and you are depriving them of their rightful revenue. Moreover, using h force keygen autocad 2010 can expose you to legal consequences, such as fines, lawsuits, or even criminal charges, if Autodesk finds out that you are using their software without a valid license. Therefore, use h force keygen autocad 2010 at your own risk and discretion.</p>
130
- <h3>Be careful of malware and viruses</h3>
131
- <p>Another risk of using h force keygen autocad 2010 is that it can contain malware or viruses that can harm your computer or steal your personal information. Since h force keygen autocad 2010 is an unofficial software that is not verified or approved by Autodesk or any other authority, it can be infected with malicious code that can damage your system files, corrupt your data, slow down your performance, spy on your activities, or even take control of your computer. Therefore, be careful of where you download h force keygen autocad 2010 from and what files you run on your computer. Always scan h force keygen autocad 2010 with a reputable antivirus software before using it, and never open any suspicious files or links that come with it.</p>
132
- <h3>Do not update Autocad 2010 online</h3>
133
- <p>A final tip for using h force keygen autocad 2010 is to avoid updating Autocad 2010 online. While h force keygen autocad 2010 can give you access to all the features and updates of Autocad 2010, it can also make your software vulnerable to detection and deactivation by Autodesk's servers. If you update Autocad 2010 online, you may lose your activation status and get a message that says "Your license is invalid" or "Your license has expired". Moreover, updating Autocad 2010 online may also overwrite some files that were patched by h force keygen autocad 2010, making it unusable or unstable. Therefore, do not update Autocad 2010 online if you have activated it with h force keygen autocad The response was too long so I had to truncate it. 2010. Instead, you can use offline updates or patches that are compatible with h force keygen autocad 2010. <h2>Conclusion</h2>
134
- <p>In conclusion, h force keygen autocad 2010 is a tool that can generate activation codes for Autocad 2010 and bypass the online activation process. It can help you save money and time and access all the features and updates of Autocad 2010. However, it also comes with some risks and drawbacks, such as being illegal and unethical, containing malware or viruses, and exposing you to legal consequences. Therefore, you should use h force keygen autocad 2010 at your own risk and discretion, and be careful of where you download it from and what files you run on your computer. Moreover, you should avoid updating Autocad 2010 online if you have activated it with h force keygen autocad 2010.</p>
135
- <h2>FAQs</h2>
136
- <h3>What is Autocad 2010?</h3>
137
- <p>Autocad 2010 is a software for designing and drafting in various fields such as architecture, engineering, construction, and more. It is developed by Autodesk, a leading company in software and technology. Autocad 2010 has many features and functions that can help you create amazing designs and drawings.</p>
138
- <h3>What is h force keygen autocad 2010?</h3>
139
- <p>H force keygen autocad 2010 is a tool that can generate activation codes for Autocad 2010 and bypass the online activation process. It can help you use Autocad 2010 without paying or activating it online.</p>
140
- <h3>How to use h force keygen autocad 2010?</h3>
141
- <p>To use h force keygen autocad 2010, you need to follow these steps:</p>
142
- <ol>
143
- <li>Find a reliable source online where you can download h force keygen autocad 2010 safely.</li>
144
- <li>Extract the zip file and run the keygen.</li>
145
- <li>Install Autocad 2010 and restart your computer.</li>
146
- <li>Disable your internet connection and antivirus.</li>
147
- <li>Run the keygen and click on Patch.</li>
148
- <li>Copy the request code from Autocad 2010 and paste it into the keygen.</li>
149
- <li>Copy the activation code from the keygen and paste it into the activation screen of Autocad 2010.</li>
150
- </ol>
151
- <h3>Is h force keygen autocad 2010 safe?</h3>
152
- <p>H force keygen autocad 2010 is not safe, as it is an illegal software that violates Autodesk's terms of service and intellectual property rights. It can also contain malware or viruses that can harm your computer or steal your personal information. Moreover, it can expose you to legal consequences if Autodesk finds out that you are using their software without a valid license.</p>
153
- <h3>Is h force keygen autocad 2010 legal?</h3>
154
- <p>H force keygen autocad 2010 is not legal, as it violates Autodesk's terms of service and intellectual property rights. By using h force keygen autocad 2010, you are not supporting Autodesk's development and innovation, and you are depriving them of their rightful revenue. Moreover, you are breaking the law and risking fines, lawsuits, or even criminal charges if Autodesk finds out that you are using their software without a valid license.</p>
155
- </p> 0a6ba089eb<br />
156
- <br />
157
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Artcut 2009 !!EXCLUSIVE!! Full Crack.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- <h1>Artcut 2009: A Comprehensive Review</h1>
3
- <p>Artcut 2009 is a software that allows you to cut vinyl and plastic with ease. It is designed for cutting plotters and supports EPS files for cutting. In this article, we will give you a comprehensive review of Artcut 2009, its features, benefits, and drawbacks.</p>
4
- <h2>Artcut 2009 Full Crack</h2><br /><p><b><b>DOWNLOAD</b> &#10040; <a href="https://imgfil.com/2uy0qH">https://imgfil.com/2uy0qH</a></b></p><br /><br />
5
- <h2>What is Artcut 2009?</h2>
6
- <p>Artcut 2009 is a software developed by WENTAI Technology Corporation, a Chinese company that specializes in cutting plotters. It is a 3D and 2D graphics programming software that can be used for 5-axis laser cutting with LT-FREE. It has a step-by-step guide to the entire programming process and allows you to modify axis paths independently to avoid collisions with other components or devices.</p>
7
- <p>Artcut 2009 is a user-friendly software that does not require any special skills to operate. It has a straight cut interface that lets you interact with others easily. It works in the length of included fashion where pictures can only point to the bitmap and are in style mode. It also has a color confirmation feature that corrects sharp images linearly.</p>
8
- <h2>What are the features of Artcut 2009?</h2>
9
- <p>Artcut 2009 has many features that make it a powerful software for cutting vinyl and plastic. Some of the main features are:</p>
10
- <ul>
11
- <li>Built-in assistance for reducing plotters identified by manufacturers.</li>
12
- <li>Ability to produce plastic decreasing and nesting up to produce plastic slicing.</li>
13
- <li>Support for EPS files for cutting out.</li>
14
- <li>Numeric user's files parameters.</li>
15
- <li>Phase compensation, corner adjustment, and purchase steps for the initial phase.</li>
16
- <li>Rounding corners which are the basics of the artcut.</li>
17
- </ul>
18
- <h2>What are the benefits of Artcut 2009?</h2>
19
- <p>Artcut 2009 has many benefits that make it a popular software among users. Some of the main benefits are:</p>
20
- <p></p>
21
- <ul>
22
- <li>It is free to use for computer clients.</li>
23
- <li>It is safe and secure to use for Windows.</li>
24
- <li>It can cut all the Chinese plotters.</li>
25
- <li>It has improved features and functionalities for the users.</li>
26
- <li>It can help beginners to fulfill their target in their working field.</li>
27
- <li>It can create and edit 3D and 2D graphics with ease.</li>
28
- </ul>
29
- <h2>What are the drawbacks of Artcut 2009?</h2>
30
- <p>Artcut 2009 is not a perfect software and it has some drawbacks that you should be aware of. Some of the main drawbacks are:</p>
31
- <ul>
32
- <li>It only supports EPS files for cutting out.</li>
33
- <li>It does not have a lot of updates or support from the developer.</li>
34
- <li>It may not be compatible with some newer cutting plotters or devices.</li>
35
- <li>It may have some bugs or errors that affect its performance.</li>
36
- </ul>
37
- <h2>How to download and install Artcut 2009?</h2>
38
- <p>If you want to download and install Artcut 2009 for free on your Windows computer, you can follow these steps:</p>
39
- <ol>
40
- <li>Go to this link: https://isoriver.com/download-artcut-2009/</li>
41
- <li>Click on the download button and wait for the file to be downloaded.</li>
42
- <li>Extract the file using WinRAR or any other software.</li>
43
- <li>Run the setup file and follow the instructions on the screen.</li>
44
- <li>You may need to install some drivers for your cutting plotter or device. You can find them here: https://rahim-soft.com/artcut-2009/</li>
45
- <li>Enjoy using Artcut 2009 for your cutting projects.</li>
46
- </ol></p>
47
- <h2>How to use Artcut 2009?</h2>
48
- <p>Artcut 2009 is easy to use and has a simple interface. To use Artcut 2009, you need to follow these steps:</p>
49
- <ol>
50
- <li>Launch the software and select the cutting plotter you want to use.</li>
51
- <li>Import or create the graphics you want to cut. You can use bitmap or EPS files for your graphics.</li>
52
- <li>Adjust the size, position, rotation, and other parameters of your graphics according to your needs.</li>
53
- <li>Preview the cutting path and make sure it is correct.</li>
54
- <li>Send the cutting command to the plotter and wait for the cutting process to finish.</li>
55
- </ol>
56
- <p>Artcut 2009 also has some advanced features that you can use to enhance your cutting projects. For example, you can use the color confirmation feature to correct the colors of your graphics. You can also use the phase compensation feature to adjust the cutting speed and pressure. You can also use the corner adjustment feature to smooth the corners of your graphics.</p>
57
- <h2>What are the alternatives to Artcut 2009?</h2>
58
- <p>Artcut 2009 is not the only software that you can use for cutting vinyl and plastic. There are some alternatives that you can try if you want to explore other options. Some of the alternatives are:</p>
59
- <ul>
60
- <li>CorelDRAW: This is a popular vector graphics software that can also be used for cutting plotters. It has more features and tools than Artcut 2009 and supports more file formats. However, it is also more expensive and complex to use.</li>
61
- <li>Inkscape: This is a free and open-source vector graphics software that can also be used for cutting plotters. It has a similar interface and functionality as CorelDRAW but with fewer features and tools. It supports SVG files for cutting.</li>
62
- <li>SignCut Pro: This is a dedicated software for cutting plotters that can work with any plotter model. It has a simple interface and a powerful cutting engine. It supports EPS, PDF, AI, DXF, PLT, and other file formats for cutting.</li>
63
- </ul>
64
- <h2>Conclusion</h2>
65
- <p>Artcut 2009 is a software that allows you to cut vinyl and plastic with ease. It is designed for cutting plotters and supports EPS files for cutting. It has many features and benefits that make it a popular software among users. However, it also has some drawbacks and limitations that you should be aware of. If you want to download and install Artcut 2009 for free on your Windows computer, you can follow the steps in this article. You can also try some alternatives to Artcut 2009 if you want to explore other options.</p>
66
- <p>Artcut 2009 is a software that allows you to cut vinyl and plastic with ease. It is designed for cutting plotters and supports EPS files for cutting. It has many features and benefits that make it a popular software among users. However, it also has some drawbacks and limitations that you should be aware of. If you want to download and install Artcut 2009 for free on your Windows computer, you can follow the steps in this article. You can also try some alternatives to Artcut 2009 if you want to explore other options.</p> 3cee63e6c2<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cineasset Doremi Crack.md DELETED
@@ -1,19 +0,0 @@
1
-
2
- <h1>What is Dolby CineAsset and How Does It Work?</h1>
3
- <p>Dolby CineAsset is a software suite that can create and play back digital cinema packages (DCPs) from virtually any source. DCPs are the standard format for distributing and projecting movies in digital cinemas around the world. Dolby CineAsset can also generate encrypted DCPs and key delivery messages (KDMs) for content protection and management.</p>
4
- <p>Dolby CineAsset is developed by Dolby Laboratories, a leading company in audio and video technologies for cinema, home entertainment, and mobile devices. Dolby CineAsset was formerly known as CineAsset Doremi, before Dolby acquired Doremi Labs in 2014.</p>
5
- <h2>cineasset doremi crack</h2><br /><p><b><b>Download</b> &#8250; <a href="https://imgfil.com/2uxZiy">https://imgfil.com/2uxZiy</a></b></p><br /><br />
6
- <p>Dolby CineAsset has two versions: Dolby CineAsset Player and Dolby CineAsset Pro. The Player version is a simple and cost-effective solution for playing back DCPs on a PC or Mac. The Pro version has more advanced features, such as creating DCPs from any file format, encoding audio and video in Dolby formats, adding subtitles and metadata, and generating KDMs for encrypted DCPs.</p>
7
- <p>Dolby CineAsset is compatible with most digital cinema servers and projectors, as well as Dolby Cinema processors and sound systems. Dolby CineAsset can also support high dynamic range (HDR), high frame rate (HFR), 3D, and immersive audio formats, such as Dolby Atmos.</p>
8
- <p>Dolby CineAsset is a powerful and versatile tool for content creation and playback in digital cinemas. It can help filmmakers, distributors, exhibitors, and post-production facilities to deliver high-quality and secure cinematic experiences to audiences worldwide.</p>
9
-
10
- <h2>How to Use Dolby CineAsset</h2>
11
- <p>To use Dolby CineAsset, you need to install the software on a PC or Mac that meets the minimum system requirements. You also need to have a license key that matches the version and features of the software you want to use. You can purchase a license key from Dolby or from an authorized reseller.</p>
12
- <p>Once you have installed and activated the software, you can launch it from the Start menu (Windows) or the Applications folder (Mac). You will see the main window of Dolby CineAsset, which has four tabs: Playlist, Ingest, Encode, and KDM.</p>
13
- <p>The Playlist tab allows you to create and edit playlists of DCPs and other media files. You can drag and drop files from your computer or network to the playlist, or use the Add button to browse for files. You can also adjust the playback settings, such as audio channels, subtitles, and aspect ratio. To play back a playlist, you need to connect your computer to a digital cinema server and projector via Ethernet or HDMI.</p>
14
- <p>The Ingest tab allows you to import media files and convert them to DCPs. You can select the source files from your computer or network, or use the Capture button to record video and audio from an external device. You can also specify the output settings, such as resolution, frame rate, color space, compression, encryption, and metadata. To create a DCP, you need to click on the Ingest button and wait for the process to finish.</p>
15
- <p></p>
16
- <p>The Encode tab allows you to encode audio and video files in Dolby formats, such as Dolby Digital Plus, Dolby TrueHD, Dolby E, and Dolby Vision. You can select the source files from your computer or network, or use the Capture button to record video and audio from an external device. You can also choose the output settings, such as bit rate, sample rate, channel layout, dynamic range control, and metadata. To encode a file, you need to click on the Encode button and wait for the process to finish.</p>
17
- <p>The KDM tab allows you to generate KDMs for encrypted DCPs. You can select the DCPs from your computer or network, or use the Browse button to locate them. You can also enter the recipient information, such as name, email address, certificate file, and validity period. To generate a KDM, you need to click on the Generate button and wait for the process to finish.</p> d5da3c52bf<br />
18
- <br />
19
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/memory/pinecone.py DELETED
@@ -1,75 +0,0 @@
1
- import pinecone
2
- from colorama import Fore, Style
3
-
4
- from autogpt.llm_utils import create_embedding_with_ada
5
- from autogpt.logs import logger
6
- from autogpt.memory.base import MemoryProviderSingleton
7
-
8
-
9
- class PineconeMemory(MemoryProviderSingleton):
10
- def __init__(self, cfg):
11
- pinecone_api_key = cfg.pinecone_api_key
12
- pinecone_region = cfg.pinecone_region
13
- pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
14
- dimension = 1536
15
- metric = "cosine"
16
- pod_type = "p1"
17
- table_name = "auto-gpt"
18
- # this assumes we don't start with memory.
19
- # for now this works.
20
- # we'll need a more complicated and robust system if we want to start with
21
- # memory.
22
- self.vec_num = 0
23
-
24
- try:
25
- pinecone.whoami()
26
- except Exception as e:
27
- logger.typewriter_log(
28
- "FAILED TO CONNECT TO PINECONE",
29
- Fore.RED,
30
- Style.BRIGHT + str(e) + Style.RESET_ALL,
31
- )
32
- logger.double_check(
33
- "Please ensure you have setup and configured Pinecone properly for use."
34
- + f"You can check out {Fore.CYAN + Style.BRIGHT}"
35
- "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup"
36
- f"{Style.RESET_ALL} to ensure you've set up everything correctly."
37
- )
38
- exit(1)
39
-
40
- if table_name not in pinecone.list_indexes():
41
- pinecone.create_index(
42
- table_name, dimension=dimension, metric=metric, pod_type=pod_type
43
- )
44
- self.index = pinecone.Index(table_name)
45
-
46
- def add(self, data):
47
- vector = create_embedding_with_ada(data)
48
- # no metadata here. We may wish to change that long term.
49
- self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
50
- _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
51
- self.vec_num += 1
52
- return _text
53
-
54
- def get(self, data):
55
- return self.get_relevant(data, 1)
56
-
57
- def clear(self):
58
- self.index.delete(deleteAll=True)
59
- return "Obliviated"
60
-
61
- def get_relevant(self, data, num_relevant=5):
62
- """
63
- Returns all the data in the memory that is relevant to the given data.
64
- :param data: The data to compare to.
65
- :param num_relevant: The number of relevant data to return. Defaults to 5
66
- """
67
- query_embedding = create_embedding_with_ada(data)
68
- results = self.index.query(
69
- query_embedding, top_k=num_relevant, include_metadata=True
70
- )
71
- sorted_results = sorted(results.matches, key=lambda x: x.score)
72
- return [str(item["metadata"]["raw_text"]) for item in sorted_results]
73
-
74
- def get_stats(self):
75
- return self.index.describe_index_stats()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black Commando Special Ops MOD APK The Best Offline FPS Shooting Game of 2023.md DELETED
@@ -1,61 +0,0 @@
1
-
2
- <h1>Download Black Commando Special Ops Mod APK: A Thrilling Action Game for Android</h1>
3
- <p>If you are looking for a challenging and exciting action game for your Android device, you should definitely check out Black Commando Special Ops. This is a first-person shooter game that puts you in the role of a black commando who has to complete various missions in different locations. You will face enemies, obstacles, and traps as you try to survive and accomplish your objectives.</p>
4
- <p>But what if you want to enjoy the game with more features and benefits? Well, there is a way to do that. You can download the mod apk version of Black Commando Special Ops, which gives you access to unlimited money, ammo, health, and more. With this mod apk, you can unlock all the weapons, items, and upgrades in the game and have more fun and satisfaction.</p>
5
- <h2>download black commando special ops mod apk</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://urlin.us/2uSY7n">https://urlin.us/2uSY7n</a></b></p><br /><br />
6
- <p>In this article, we will tell you more about the features of Black Commando Special Ops mod apk, how to download and install it on your device, and some tips and tricks to improve your gameplay. So, let's get started!</p>
7
- <h2>Features of Black Commando Special Ops Mod APK</h2>
8
- <p>Black Commando Special Ops is a great game by itself, but with the mod apk version, you can enjoy it even more. Here are some of the main features of the mod apk that you can benefit from:</p>
9
- <ul>
10
- <li><b>Unlimited money:</b> You can get unlimited money in the game, which you can use to buy weapons, items, and upgrades. You can also customize your character with different outfits and accessories.</li>
11
- <li><b>Unlimited ammo:</b> You don't have to worry about running out of bullets or grenades in the game. You can shoot as much as you want and blast your enemies away.</li>
12
- <li><b>Unlimited health:</b> You can survive any damage or injury in the game. You don't have to worry about dying or losing your progress. You can also heal yourself instantly with medkits.</li>
13
- <li><b>No ads:</b> You can play the game without any interruptions or distractions from annoying ads. You can focus on your missions and enjoy the game fully.</li>
14
- <li><b>No root required:</b> You don't need to root your device to install or use the mod apk. It is compatible with any Android device and does not require any special permissions or access.</li>
15
- </ul>
16
- <h2>How to Download and Install Black Commando Special Ops Mod APK</h2>
17
- <p>Downloading and installing Black Commando Special Ops mod apk is very easy and simple. Just follow these steps:</p>
18
- <p>How to download black commando special ops mod apk for free<br />
19
- Black commando special ops mod apk unlimited money and gems<br />
20
- Black commando special ops mod apk latest version 2023<br />
21
- Black commando special ops mod apk offline mode<br />
22
- Black commando special ops mod apk hack and cheats<br />
23
- Download black commando special ops mod apk for android<br />
24
- Download black commando special ops mod apk for pc<br />
25
- Download black commando special ops mod apk for ios<br />
26
- Download black commando special ops mod apk for windows 10<br />
27
- Download black commando special ops mod apk for mac<br />
28
- Black commando special ops mod apk gameplay and review<br />
29
- Black commando special ops mod apk features and benefits<br />
30
- Black commando special ops mod apk tips and tricks<br />
31
- Black commando special ops mod apk best weapons and skills<br />
32
- Black commando special ops mod apk missions and challenges<br />
33
- Black commando special ops mod apk download link and instructions<br />
34
- Black commando special ops mod apk no root required<br />
35
- Black commando special ops mod apk safe and secure<br />
36
- Black commando special ops mod apk virus free and malware free<br />
37
- Black commando special ops mod apk compatible with all devices<br />
38
- Black commando special ops mod apk online multiplayer mode<br />
39
- Black commando special ops mod apk realistic graphics and sound effects<br />
40
- Black commando special ops mod apk high quality performance and optimization<br />
41
- Black commando special ops mod apk easy to install and use<br />
42
- Black commando special ops mod apk user friendly interface and controls<br />
43
- Black commando special ops mod apk fun and addictive gameplay<br />
44
- Black commando special ops mod apk action packed and thrilling adventure<br />
45
- Black commando special ops mod apk strategic and tactical combat<br />
46
- Black commando special ops mod apk secret and covert missions<br />
47
- Black commando special ops mod apk diverse and dynamic environments</p>
48
- <ol>
49
- <li>Click on this link to download the mod apk file of Black Commando Special Ops.</li>
50
- <li>Once the download is complete, go to your device's settings and enable the installation of apps from unknown sources.</li>
51
- <li>Locate the downloaded file in your device's file manager and tap on it to start the installation process.</li>
52
- <li>Follow the instructions on the screen and wait for the installation to finish.</li>
53
- <li>Launch the game from your app drawer and enjoy!</li>
54
- </ol>
55
- <h2>Tips and Tricks for Black Commando Special Ops</h2>
56
- <p>Now that you have installed Black Commando Special Ops mod apk on your device, you are ready to play the game. But before you do that, here are some tips and tricks that can help you improve your gameplay and performance:</p>
57
- <ul>
58
- <li><b>Choose your weapons wisely:</b> There are many weapons available in the game, such as pistols, rifles, shotguns, snipers, rocket launchers, etc. Each weapon has its own advantages and disadvantages, such as range, accuracy, damage, fire rate, etc. You should choose your weapons according to your mission objectives and preferences. For example, if you need to snipe enemies from a distance, use with other players online?</h3>
59
- <p>A: Yes, you can play Black Commando Special Ops mod apk with other players online. You can join or create rooms and invite your friends or random players to join you. You can also chat with them and cooperate to complete the missions.</p> 197e85843d<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bingo Crush - The Ultimate Bingo Experience on Android APK.md DELETED
@@ -1,95 +0,0 @@
1
-
2
- <h1>Bingo Crush APK: A Fun and Engaging Bingo Game for Android</h1>
3
- <p>If you love playing bingo games, you might want to try Bingo Crush APK, a fun and engaging bingo game for Android devices. Bingo Crush APK is a game of chance where you can play bingo with players from around the world, enjoy different themes and rooms, and collect bonuses and rewards. In this article, we will tell you what Bingo Crush APK is, how to download and install it, and what are its pros and cons.</p>
4
- <h2>bingo crush apk</h2><br /><p><b><b>Download</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNUvX">https://jinyurl.com/2uNUvX</a></b></p><br /><br />
5
- <h2>What is Bingo Crush APK?</h2>
6
- <p>Bingo Crush APK is an Android game that lets you play bingo in a casino-like environment. You can choose from various rooms that are designed like different countries, such as Christmas, San Francisco, Magical World, etc. You can also compete with other players in special tournaments and events. The game is free to play, but you can also purchase in-game items and coins to enhance your experience.</p>
7
- <h3>Features of Bingo Crush APK</h3>
8
- <h4>Play with players from around the world</h4>
9
- <p>One of the best features of Bingo Crush APK is that you can play with players from around the world in real-time. You can chat with them, send them gifts, and make new friends. You can also join a club or create your own to play with your buddies. The game has a global leaderboard where you can see your ranking and achievements.</p>
10
- <h4>Enjoy different themes and rooms</h4>
11
- <p>Bingo Crush APK has a variety of themes and rooms that will keep you entertained. You can play bingo in rooms that are decorated like different countries, such as Christmas, San Francisco, Magical World, etc. Each room has its own rules, prizes, and challenges. You can also unlock new rooms as you progress in the game.</p>
12
- <h4>Collect bonuses and rewards</h4>
13
- <p>Bingo Crush APK rewards you with bonuses and rewards for playing the game. You can get free coins every day by logging in, spinning the wheel, completing tasks, etc. You can also get free tickets by watching ads or inviting friends. You can use these coins and tickets to play more bingo games and win more prizes.</p>
14
- <h3>How to download and install Bingo Crush APK?</h3>
15
- <h4>Download the APK file from a trusted source</h4>
16
- <p>To download Bingo Crush APK, you need to find a trusted source that offers the latest version of the game. You can use the web search results above to find some websites that provide the APK file. For example, you can use [this link](^1^) to download the game from mob.org.</p>
17
- <p>bingo crush fun game apk<br />
18
- bingo crush free download apk<br />
19
- bingo crush android game apk<br />
20
- bingo crush latest version apk<br />
21
- bingo crush mod apk unlimited money<br />
22
- bingo crush online game apk<br />
23
- bingo crush offline game apk<br />
24
- bingo crush hack apk download<br />
25
- bingo crush cheats apk free<br />
26
- bingo crush update apk 2023<br />
27
- bingo tribe crush apk download<br />
28
- bingo tribe crush game apk<br />
29
- bingo tribe crush mod apk<br />
30
- bingo tribe crush hack apk<br />
31
- bingo tribe crush cheats apk<br />
32
- bingo tribe crush free coins apk<br />
33
- bingo tribe crush online apk<br />
34
- bingo tribe crush offline apk<br />
35
- bingo tribe crush latest apk<br />
36
- bingo tribe crush new version apk<br />
37
- bingo crush win real money apk<br />
38
- bingo crush cash game apk<br />
39
- bingo crush rewards apk download<br />
40
- bingo crush prizes apk free<br />
41
- bingo crush gift cards apk 2023<br />
42
- bingo crush paypal apk mod<br />
43
- bingo crush amazon apk hack<br />
44
- bingo crush google play apk cheat<br />
45
- bingo crush walmart apk update<br />
46
- bingo crush target apk new<br />
47
- bingo pop candy crush saga apk<br />
48
- bingo pop candy crush game apk<br />
49
- bingo pop candy crush mod apk<br />
50
- bingo pop candy crush hack apk<br />
51
- bingo pop candy crush cheats apk<br />
52
- bingo pop candy crush free boosters apk<br />
53
- bingo pop candy crush online apk<br />
54
- bingo pop candy crush offline apk<br />
55
- bingo pop candy crush latest apk<br />
56
- bingo pop candy crush new version apk</p>
57
- <h4>Enable unknown sources on your device</h4>
58
- <p>Before you install Bingo Crush APK, you need to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
59
- <h4>Install the APK file and launch the game</h4>
60
- <p>After you download the APK file, locate it on your device and tap on it to install it. Follow the instructions on the screen to complete the installation. Once done, launch the game from your app drawer or home screen and enjoy playing bingo.</p>
61
- <h3>Pros and cons of Bingo Crush APK</h3>
62
- <h4>Pros: fun, free, easy, social, diverse</h4>
63
- <p>Bingo Crush APK has many advantages that make it a fun and engaging bingo game for Android users. Some of them are:</p>
64
- <ul>
65
- <li>It - It is fun to play bingo with players from around the world and enjoy different themes and rooms. - It is free to play, but you can also purchase in-game items and coins to enhance your experience. - It is easy to play, as you just need to tap on the numbers that match the ones on your card. - It is social, as you can chat with other players, send them gifts, and make new friends. - It is diverse, as it has a variety of rooms, rules, prizes, and challenges.</ul>
66
- <h4>Cons: ads, bugs, compatibility issues, internet required</h4>
67
- <p>Bingo Crush APK also has some disadvantages that might affect your enjoyment of the game. Some of them are:</p>
68
- <ul>
69
- <li>It has ads that might interrupt your gameplay or consume your data. - It has bugs that might cause the game to crash or freeze. - It has compatibility issues that might prevent the game from running smoothly on some devices. - It requires an internet connection to play, which might limit your access or increase your costs.</ul>
70
- <h2>Conclusion</h2>
71
- <p>Bingo Crush APK is a fun and engaging bingo game for Android devices that lets you play bingo with players from around the world, enjoy different themes and rooms, and collect bonuses and rewards. It is free to play, but you can also purchase in-game items and coins to enhance your experience. It is easy to play, social, and diverse. However, it also has some drawbacks, such as ads, bugs, compatibility issues, and internet requirement. If you want to try Bingo Crush APK, you can download it from a trusted source and install it on your device. You might find it a great way to pass the time and have fun.</p>
72
- <h3>Why you should try Bingo Crush APK?</h3>
73
- <p>You should try Bingo Crush APK if you:</p>
74
- <ul>
75
- <li>Love playing bingo games - Want to play with players from around the world - Enjoy different themes and rooms - Like collecting bonuses and rewards - Don't mind ads, bugs, compatibility issues, and internet requirement</ul>
76
- <p>If you have any questions about Bingo Crush APK, you can check out the FAQs below or contact the developer for more information.</p>
77
- <h2>FAQs</h2>
78
- <h4>What is the difference between Bingo Crush APK and Bingo Crush from Google Play Store?</h4>
79
- <p>Bingo Crush APK is an Android application package (APK) file that you can download and install from a third-party source. Bingo Crush from Google Play Store is the official version of the game that you can download and install from the Google Play Store. Both versions are similar in terms of gameplay and features, but they might have some differences in terms of updates, security, and performance.</p>
80
- <h4>Is Bingo Crush APK safe to download and install?</h4>
81
- <p>Bingo Crush APK is generally safe to download and install if you get it from a trusted source. However, you should always be careful when downloading and installing APK files from unknown sources, as they might contain malware or viruses that could harm your device or steal your data. You should also enable unknown sources on your device before installing Bingo Crush APK, which might expose your device to potential risks.</p>
82
- <h4>How can I get more coins and tickets in Bingo Crush APK?</h4>
83
- <p>You can get more coins and tickets in Bingo Crush APK by:</p>
84
- <ul>
85
- <li>Logging in every day - Spinning the wheel - Completing tasks - Watching ads - Inviting friends - Purchasing in-game items and coins</ul>
86
- <h4>How can I join or create a club in Bingo Crush APK?</h4>
87
- <p>You can join or create a club in Bingo Crush APK by:</p>
88
- <ul>
89
- <li>Tapping on the club icon on the main screen - Choosing to join an existing club or create your own - Following the instructions on the screen</ul>
90
- <h4>How can I contact the developer of Bingo Crush APK?</h4>
91
- <p>You can contact the developer of Bingo Crush APK by:</p>
92
- <ul>
93
- <li>Tapping on the settings icon on the main screen - Tapping on the feedback option - Filling out the form and submitting it</ul></p> 197e85843d<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Chess Universe Mod APK - The Ultimate Chess Experience for Android Users.md DELETED
@@ -1,108 +0,0 @@
1
-
2
- <br> Q2: Is Chess Universe Mod APK Apkpure safe? <br> Q3: How do I update Chess Universe Mod APK Apkpure? <br> Q4: Can I play online with Chess Universe Mod APK Apkpure? <br> Q5: How do I uninstall Chess Universe Mod APK Apkpure? | Here is the article based on the outline: <h1>Chess Universe Mod APK Apkpure: A Review</h1>
3
- <p>If you are a fan of chess and looking for a fun and engaging chess app to play on your Android device, you might have heard of Chess Universe. It is a popular chess app that offers a lot of features and game modes to suit your preferences and skill level. But what if you want to enjoy all the premium features and resources of the app without spending any money? That's where Chess Universe Mod APK Apkpure comes in.</p>
4
- <p>In this article, we will review Chess Universe Mod APK Apkpure and tell you what it offers, how to download and install it on your device, some tips and tricks for playing the game, and some alternatives to try out. Let's get started!</p>
5
- <h2>chess universe mod apk apkpure</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://jinyurl.com/2uNLA3">https://jinyurl.com/2uNLA3</a></b></p><br /><br />
6
- <h2>Chess Universe: A Fun and Engaging Chess App</h2>
7
- <p>Chess Universe is a chess app that is designed to motivate and guide chess players to improve their skills from beginner to master. It has a unique design and exciting gameplay that makes chess more fun and accessible for everyone.</p>
8
- <p>Some of the features of Chess Universe are:</p>
9
- <ul>
10
- <li><b>Online and offline chess games:</b> You can play unlimited chess games online with your friends or other players from all over the world. You can also play offline against AI opponents with different difficulty levels.</li>
11
- <li><b>Different game modes:</b> You can choose from various game modes such as blitz chess, bullet chess, rapid chess or easy mode. You can also customize your own game settings such as time control, <p>board size, and chess pieces.</li>
12
- <li><b>Daily chess challenges:</b> You can test your chess skills and knowledge with daily puzzles, quizzes, and mini-games. You can also earn rewards and achievements for completing them.</li>
13
- <li><b>Chess with friends:</b> You can invite your friends to play chess with you or chat with them using emojis and stickers. You can also join or create chess clubs and participate in club events and tournaments.</li>
14
- <li><b>Chess academy:</b> You can learn from the best chess coaches and masters in the world. You can access hundreds of interactive lessons, videos, and articles that cover all aspects of chess. You can also track your progress and get feedback on your performance.</li>
15
- </ul>
16
- <p>Chess Universe is a free app that you can download from the Google Play Store or the App Store. However, some of the features and resources of the app require in-app purchases or subscriptions. For example, you need to spend gems, coins, keys, or chests to unlock new chessboards, chess sets, emojis, special effects, academy towers, characters, or pets. You also need to watch ads to get hints or undo moves. If you want to avoid these limitations and enjoy the full potential of the app, you might want to try Chess Universe Mod APK Apkpure.</p>
17
- <h2>Chess Universe Mod APK: What Does It Offer?</h2>
18
- <p>A mod apk is a modified version of an original app that gives you access to all the premium features and resources of the app for free. Chess Universe Mod APK Apkpure is one such mod apk that you can download from the apkpure website, which is a reliable source for mod apks.</p>
19
- <p>Some of the benefits of Chess Universe Mod APK Apkpure are:</p>
20
- <ul>
21
- <li><b>Unlimited gems, coins, keys, chests, hints, and undo moves:</b> You can use these resources to unlock and use any chessboard, chess set, emoji, special effect, academy tower, character, or pet you want. You can also get hints or undo moves without watching ads.</li>
22
- <li><b>All features unlocked:</b> You can access all the features of the app without any restrictions or subscriptions. You can play online and offline chess games, choose any game mode, complete daily challenges, learn from the chess academy, and join or create chess clubs.</li>
23
- <li><b>No ads:</b> You can enjoy the app without any annoying or disruptive ads.</li>
24
- </ul>
25
- <p>Chess Universe Mod APK Apkpure is a great way to enhance your chess experience and have more fun with the app. However, you should be aware of some of the risks and drawbacks of using mod apks from third-party sources. For example, you might face compatibility issues with your device or the original app. You might also violate the terms and conditions of the app developer or expose your device to malware or viruses. Therefore, you should always scan the mod apk file before installing it on your device and use it at your own risk.</p>
26
- <p>chess universe online mod apk download<br />
27
- chess universe mod apk unlimited money<br />
28
- chess universe online chess apk<br />
29
- chess universe mod apk latest version<br />
30
- chess universe online mod apk free<br />
31
- chess universe mod apk android 1<br />
32
- chess universe online chess app<br />
33
- chess universe mod apk premium<br />
34
- chess universe online mod apk hack<br />
35
- chess universe mod apk revdl<br />
36
- chess universe online chess game<br />
37
- chess universe mod apk pro<br />
38
- chess universe online mod apk unlocked<br />
39
- chess universe mod apk rexdl<br />
40
- chess universe online chess multiplayer<br />
41
- chess universe mod apk vip<br />
42
- chess universe online mod apk no ads<br />
43
- chess universe mod apk happymod<br />
44
- chess universe online chess puzzles<br />
45
- chess universe mod apk full<br />
46
- chess universe online mod apk offline<br />
47
- chess universe mod apk cracked<br />
48
- chess universe online chess lessons<br />
49
- chess universe mod apk paid<br />
50
- chess universe online mod apk 2021<br />
51
- chess universe mod apk old version<br />
52
- chess universe online mod apk update<br />
53
- chess universe mod apk obb<br />
54
- chess universe online mod apk 2020<br />
55
- chess universe mod apk original<br />
56
- chess universe online mod apk android<br />
57
- chess universe mod apk data<br />
58
- chess universe online mod apk ios<br />
59
- chess universe mod apk pure<br />
60
- chess universe online mod apk pc<br />
61
- chess universe mod apk mirror<br />
62
- chess universe online mod apk windows<br />
63
- chess universe mod apk uptodown<br />
64
- chess universe online mod apk mac<br />
65
- chess universe mod apk mob.org<br />
66
- chess universe online mod apk laptop<br />
67
- chess universe mod apk xda<br />
68
- chess universe online mod apk reddit<br />
69
- chess universe mod apk youtube<br />
70
- chess universe online mod apk quora<br />
71
- chess universe mod apk facebook<br />
72
- chess universe online mod apk telegram<br />
73
- chess universe mod apk twitter</p>
74
- <h2>Apkpure: A Reliable Source for Mod APKs</h2>
75
- <p>If you decide to download Chess Universe Mod APK Apkpure, you need to find a reliable source for it. Apkpure is one of the most popular websites that offer mod apks for various apps and games. It has a large collection of mod apks that are updated regularly and verified for safety and quality.</p>
76
- <p>Some of the advantages of using apkpure are:</p>
77
- <ul>
78
- <li><b>Safe and secure downloads:</b> Apkpure scans all the mod apks for viruses and malware before uploading them on their website. They also use SSL encryption to protect your data and privacy.</li>
79
- <li><b>Fast and easy installation:</b> Apkpure allows you to download mod apks directly from their website without any registration or verification. You just need to enable unknown sources in your device settings and install the apk file on your device.</li>
80
- <li><b>Updated versions:</b> Apkpure keeps track of the latest versions of the original apps and games and updates their mod apks accordingly. You can also check the version history and changelog of each mod apk on their website.</li>
81
- <li><b>No registration required:</b> Apkpure does not require you to create an account or sign in to use their services. You can download as many mod apks as you want without any hassle.</li>
82
- </ul>
83
- <p>Apkpure is a trusted and reputable website that has millions of users worldwide. However, you should still be careful when downloading mod apks from any third-party source and always backup your data before installing them on your device.</p>
84
- <h2>How to Download and Install Chess Universe Mod APK from Apkpure</h2>
85
- <p>If you want to download and install Chess Universe Mod APK from Apkpure, you need to follow these steps:</p>
86
- <ol>
87
- <li><b>Visit the apkpure website:</b> Go to <a href="">https://apkpure.com</a> on your browser and search for Chess Universe Mod APK. You can also use this direct link: <a href="">https://apkpure.com/chess-universe-play-learn-online-with-friends/com.kingofchess.chessuniverse</a>.</li>
88
- <li><b>Click on download:</b> On the mod apk page, you will see a green download button. Click on it and wait for the download to start. The file size is about 130 MB, so make sure you have enough space and a stable internet connection.</li>
89
- <li><b>Enable unknown sources:</b> Before you can install the mod apk on your device, you need to enable unknown sources in your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
90
- <li><b>Install the apk file:</b> Once the download is complete, locate the apk file in your device storage and tap on it. You will see a prompt asking you to confirm the installation. Tap on Install and wait for the process to finish.</li>
91
- <li><b>Enjoy the game:</b> After the installation is done, you can launch the game from your app drawer or home screen. You will see that you have unlimited resources and all features unlocked. You can now play chess with friends or online with other players without any limitations.</li>
92
- </ol>
93
- <p>Congratulations! You have successfully downloaded and installed Chess Universe Mod APK from Apkpure. Now you can enjoy the game and improve your chess skills.</p>
94
- <h2>Tips and Tricks for Playing Chess Universe</h2>
95
- <p>Chess Universe is a fun and engaging chess app that can help you learn and master chess. However, if you want to get better at the game and win more matches, you need to practice and apply some tips and tricks. Here are some suggestions that can help you improve your chess performance:</p>
96
- <ul>
97
- <li><b>Learn the basic rules and moves of chess:</b> If you are new to chess or need a refresher, you should learn the basic rules and moves of the game. You should know how each piece moves, how to capture, check, and checkmate, how to castle, en passant, and promote pawns, and how to avoid stalemate and draw situations. You can use the Chess Academy feature of the app to learn these basics.</li>
98
- <li><b>Practice with puzzles and AI opponents:</b> One of the best ways to improve your chess skills is to practice with puzzles and AI opponents. Puzzles can help you train your tactical vision, calculation, and problem-solving skills. AI opponents can help you practice your opening, middlegame, and endgame skills. You can use the Daily Challenges feature of the app to access puzzles and quizzes. You can also use the Offline Mode feature of the app to play against AI opponents with different difficulty levels.</li>
99
- <li><b>Analyze your games and learn from your mistakes:</b> Another important way to improve your chess skills is to analyze your games and learn from your mistakes. You should review your moves and identify where you went wrong or missed opportunities. You should also study the moves of your opponents and see what they did right or wrong. You can use the Analysis feature of the app to replay your games and get feedback on your moves.</li>
100
- <li><b>Try different game modes and strategies:</b> Chess Universe offers a variety of game modes and settings that can help you test your skills and have fun. You can try different time controls, board sizes, chess pieces, special effects, and game modes such as blitz chess, bullet chess, rapid chess or easy mode. You can also try different strategies such as attacking, defending, positional, or tactical play. You can use the Online Mode feature of the app to play with other players from all over the world.</li>
101
- <li><b>Challenge yourself with daily quests and tournaments:</b> Chess Universe also offers daily quests and tournaments that can help you challenge yourself and earn rewards. You can complete daily quests such as winning a certain number of games, solving puzzles, or using emojis. You can also join or create tournaments with different formats, rules, and prizes. You can use the Quests feature of the app to access daily quests. You can also use the Clubs feature of the app to join or create chess clubs and participate in club events and tournaments.</li>
102
- </ul>
103
- <p>Chess Universe is a great chess app that offers a lot of fun and learning opportunities for chess players of all levels. By following these tips and tricks, you can improve your chess skills and enjoy the game more.</p>
104
- <h Universe Mod APK Apkpure, you need to go to your device settings and find the app in your app list. You then need to tap on the app and select Uninstall. You can also delete the apk file from your device storage if you want to free up some space.</li>
105
- </ol>
106
- <p>These are some of the FAQs about Chess Universe Mod APK Apkpure. If you have any other questions, please feel free to ask us in the comments section below.</p> 197e85843d<br />
107
- <br />
108
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/repaint/pipeline_repaint.py DELETED
@@ -1,172 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import paddle
20
- import PIL
21
-
22
- from ...models import UNet2DModel
23
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
24
- from ...schedulers import RePaintScheduler
25
- from ...utils import PIL_INTERPOLATION, logging
26
-
27
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
-
29
-
30
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
31
- def _preprocess_image(image: Union[List, PIL.Image.Image, paddle.Tensor]):
32
- if isinstance(image, paddle.Tensor):
33
- return image
34
- elif isinstance(image, PIL.Image.Image):
35
- image = [image]
36
-
37
- if isinstance(image[0], PIL.Image.Image):
38
- w, h = image[0].size
39
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
40
-
41
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
42
- image = np.concatenate(image, axis=0)
43
- image = np.array(image).astype(np.float32) / 255.0
44
- image = image.transpose(0, 3, 1, 2)
45
- image = 2.0 * image - 1.0
46
- image = paddle.to_tensor(image)
47
- elif isinstance(image[0], paddle.Tensor):
48
- image = paddle.concat(image, axis=0)
49
- return image
50
-
51
-
52
- def _preprocess_mask(mask: Union[List, PIL.Image.Image, paddle.Tensor]):
53
- if isinstance(mask, paddle.Tensor):
54
- return mask
55
- elif isinstance(mask, PIL.Image.Image):
56
- mask = [mask]
57
-
58
- if isinstance(mask[0], PIL.Image.Image):
59
- w, h = mask[0].size
60
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
61
- mask = [np.array(m.convert("L").resize((w, h), resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask]
62
- mask = np.concatenate(mask, axis=0)
63
- mask = mask.astype(np.float32) / 255.0
64
- mask[mask < 0.5] = 0
65
- mask[mask >= 0.5] = 1
66
- mask = paddle.to_tensor(mask)
67
- elif isinstance(mask[0], paddle.Tensor):
68
- mask = paddle.concat(mask, axis=0)
69
- return mask
70
-
71
-
72
- class RePaintPipeline(DiffusionPipeline):
73
- unet: UNet2DModel
74
- scheduler: RePaintScheduler
75
-
76
- def __init__(self, unet, scheduler):
77
- super().__init__()
78
- self.register_modules(unet=unet, scheduler=scheduler)
79
-
80
- @paddle.no_grad()
81
- def __call__(
82
- self,
83
- image: Union[paddle.Tensor, PIL.Image.Image],
84
- mask_image: Union[paddle.Tensor, PIL.Image.Image],
85
- num_inference_steps: int = 250,
86
- eta: float = 0.0,
87
- jump_length: int = 10,
88
- jump_n_sample: int = 10,
89
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
90
- output_type: Optional[str] = "pil",
91
- return_dict: bool = True,
92
- ) -> Union[ImagePipelineOutput, Tuple]:
93
- r"""
94
- Args:
95
- image (`paddle.Tensor` or `PIL.Image.Image`):
96
- The original image to inpaint on.
97
- mask_image (`paddle.Tensor` or `PIL.Image.Image`):
98
- The mask_image where 0.0 values define which part of the original image to inpaint (change).
99
- num_inference_steps (`int`, *optional*, defaults to 1000):
100
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
101
- expense of slower inference.
102
- eta (`float`):
103
- The weight of noise for added noise in a diffusion step. Its value is between 0.0 and 1.0 - 0.0 is DDIM
104
- and 1.0 is DDPM scheduler respectively.
105
- jump_length (`int`, *optional*, defaults to 10):
106
- The number of steps taken forward in time before going backward in time for a single jump ("j" in
107
- RePaint paper). Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
108
- jump_n_sample (`int`, *optional*, defaults to 10):
109
- The number of times we will make forward time jump for a given chosen time sample. Take a look at
110
- Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
111
- generator (`paddle.Generator`, *optional*):
112
- One or a list of paddle generator(s) to make generation deterministic.
113
- output_type (`str`, *optional*, defaults to `"pil"`):
114
- The output format of the generate image. Choose between
115
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
116
- return_dict (`bool`, *optional*, defaults to `True`):
117
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
118
-
119
- Returns:
120
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
121
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
122
- generated images.
123
- """
124
- original_image = _preprocess_image(image)
125
- original_image = original_image.cast(self.unet.dtype)
126
- mask_image = _preprocess_mask(mask_image)
127
- mask_image = mask_image.cast(self.unet.dtype)
128
-
129
- batch_size = original_image.shape[0]
130
-
131
- # sample gaussian noise to begin the loop
132
- if isinstance(generator, list) and len(generator) != batch_size:
133
- raise ValueError(
134
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
135
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
136
- )
137
-
138
- image_shape = original_image.shape
139
- if isinstance(generator, list):
140
- shape = (1,) + image_shape[1:]
141
- image = [paddle.randn(shape, generator=generator[i], dtype=self.unet.dtype) for i in range(batch_size)]
142
- image = paddle.concat(image, axis=0)
143
- else:
144
- image = paddle.randn(image_shape, generator=generator, dtype=self.unet.dtype)
145
-
146
- # set step values
147
- self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample)
148
- self.scheduler.eta = eta
149
-
150
- t_last = self.scheduler.timesteps[0] + 1
151
- generator = generator[0] if isinstance(generator, list) else generator
152
- for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
153
- if t < t_last:
154
- # predict the noise residual
155
- model_output = self.unet(image, t).sample
156
- # compute previous image: x_t -> x_t-1
157
- image = self.scheduler.step(model_output, t, image, original_image, mask_image, generator).prev_sample
158
-
159
- else:
160
- # compute the reverse: x_t-1 -> x_t
161
- image = self.scheduler.undo_step(image, t_last, generator)
162
- t_last = t
163
-
164
- image = (image / 2 + 0.5).clip(0, 1)
165
- image = image.transpose([0, 2, 3, 1]).numpy()
166
- if output_type == "pil":
167
- image = self.numpy_to_pil(image)
168
-
169
- if not return_dict:
170
- return (image,)
171
-
172
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py DELETED
@@ -1,469 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- from typing import Callable, List, Optional, Union
18
-
19
- import numpy as np
20
- import paddle
21
- import PIL
22
-
23
- from paddlenlp.transformers import CLIPTextModel, CLIPTokenizer
24
-
25
- from ...models import AutoencoderKL, UNet2DConditionModel
26
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
27
- from ...schedulers import (
28
- DDIMScheduler,
29
- DDPMScheduler,
30
- LMSDiscreteScheduler,
31
- PNDMScheduler,
32
- )
33
- from ...utils import logging
34
-
35
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
-
37
-
38
- def preprocess(image):
39
- if isinstance(image, paddle.Tensor):
40
- return image
41
- elif isinstance(image, PIL.Image.Image):
42
- image = [image]
43
-
44
- if isinstance(image[0], PIL.Image.Image):
45
- w, h = image[0].size
46
- w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32
47
-
48
- image = [np.array(i.resize((w, h)))[None, :] for i in image]
49
- image = np.concatenate(image, axis=0)
50
- image = np.array(image).astype(np.float32) / 255.0
51
- image = image.transpose(0, 3, 1, 2)
52
- image = 2.0 * image - 1.0
53
- image = paddle.to_tensor(image)
54
- elif isinstance(image[0], paddle.Tensor):
55
- image = paddle.concat(image, axis=0)
56
- return image
57
-
58
-
59
- class StableDiffusionUpscalePipeline(DiffusionPipeline):
60
- r"""
61
- Pipeline for text-guided image super-resolution using Stable Diffusion 2.
62
-
63
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
64
- library implements for all the pipelines (such as downloading or saving etc.)
65
-
66
- Args:
67
- vae ([`AutoencoderKL`]):
68
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
69
- text_encoder ([`CLIPTextModel`]):
70
- Frozen text-encoder. Stable Diffusion uses the text portion of
71
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
72
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
73
- tokenizer (`CLIPTokenizer`):
74
- Tokenizer of class
75
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
76
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
77
- low_res_scheduler ([`SchedulerMixin`]):
78
- A scheduler used to add initial noise to the low res conditioning image. It must be an instance of
79
- [`DDPMScheduler`].
80
- scheduler ([`SchedulerMixin`]):
81
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
82
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
83
- """
84
-
85
- def __init__(
86
- self,
87
- vae: AutoencoderKL,
88
- text_encoder: CLIPTextModel,
89
- tokenizer: CLIPTokenizer,
90
- unet: UNet2DConditionModel,
91
- low_res_scheduler: DDPMScheduler,
92
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
93
- max_noise_level: int = 350,
94
- ):
95
- super().__init__()
96
-
97
- self.register_modules(
98
- vae=vae,
99
- text_encoder=text_encoder,
100
- tokenizer=tokenizer,
101
- unet=unet,
102
- low_res_scheduler=low_res_scheduler,
103
- scheduler=scheduler,
104
- )
105
- self.register_to_config(max_noise_level=max_noise_level)
106
-
107
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
108
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
109
- r"""
110
- Encodes the prompt into text encoder hidden states.
111
-
112
- Args:
113
- prompt (`str` or `list(int)`):
114
- prompt to be encoded
115
- num_images_per_prompt (`int`):
116
- number of images that should be generated per prompt
117
- do_classifier_free_guidance (`bool`):
118
- whether to use classifier free guidance or not
119
- negative_prompt (`str` or `List[str]`):
120
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
121
- if `guidance_scale` is less than `1`).
122
- """
123
- batch_size = len(prompt) if isinstance(prompt, list) else 1
124
-
125
- text_inputs = self.tokenizer(
126
- prompt,
127
- padding="max_length",
128
- max_length=self.tokenizer.model_max_length,
129
- truncation=True,
130
- return_tensors="pd",
131
- )
132
- text_input_ids = text_inputs.input_ids
133
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
134
-
135
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
136
- text_input_ids, untruncated_ids
137
- ):
138
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
139
- logger.warning(
140
- "The following part of your input was truncated because CLIP can only handle sequences up to"
141
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
142
- )
143
-
144
- config = (
145
- self.text_encoder.config
146
- if isinstance(self.text_encoder.config, dict)
147
- else self.text_encoder.config.to_dict()
148
- )
149
- if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]:
150
- attention_mask = text_inputs.attention_mask
151
- else:
152
- attention_mask = None
153
-
154
- text_embeddings = self.text_encoder(
155
- text_input_ids,
156
- attention_mask=attention_mask,
157
- )
158
- text_embeddings = text_embeddings[0]
159
-
160
- # duplicate text embeddings for each generation per prompt, using mps friendly method
161
- bs_embed, seq_len, _ = text_embeddings.shape
162
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
163
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
164
-
165
- # get unconditional embeddings for classifier free guidance
166
- if do_classifier_free_guidance:
167
- uncond_tokens: List[str]
168
- if negative_prompt is None:
169
- uncond_tokens = [""] * batch_size
170
- elif type(prompt) is not type(negative_prompt):
171
- raise TypeError(
172
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
173
- f" {type(prompt)}."
174
- )
175
- elif isinstance(negative_prompt, str):
176
- uncond_tokens = [negative_prompt]
177
- elif batch_size != len(negative_prompt):
178
- raise ValueError(
179
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
180
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
181
- " the batch size of `prompt`."
182
- )
183
- else:
184
- uncond_tokens = negative_prompt
185
-
186
- max_length = text_input_ids.shape[-1]
187
- uncond_input = self.tokenizer(
188
- uncond_tokens,
189
- padding="max_length",
190
- max_length=max_length,
191
- truncation=True,
192
- return_tensors="pd",
193
- )
194
-
195
- if config.get("use_attention_mask", None) is not None and config["use_attention_mask"]:
196
- attention_mask = uncond_input.attention_mask
197
- else:
198
- attention_mask = None
199
-
200
- uncond_embeddings = self.text_encoder(
201
- uncond_input.input_ids,
202
- attention_mask=attention_mask,
203
- )
204
- uncond_embeddings = uncond_embeddings[0]
205
-
206
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
207
- seq_len = uncond_embeddings.shape[1]
208
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
209
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
210
-
211
- # For classifier free guidance, we need to do two forward passes.
212
- # Here we concatenate the unconditional and text embeddings into a single batch
213
- # to avoid doing two forward passes
214
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
215
-
216
- return text_embeddings
217
-
218
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
219
- def prepare_extra_step_kwargs(self, generator, eta):
220
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
221
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
222
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
223
- # and should be between [0, 1]
224
-
225
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
226
- extra_step_kwargs = {}
227
- if accepts_eta:
228
- extra_step_kwargs["eta"] = eta
229
-
230
- # check if the scheduler accepts generator
231
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
232
- if accepts_generator:
233
- extra_step_kwargs["generator"] = generator
234
- return extra_step_kwargs
235
-
236
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents with 0.18215->0.08333
237
- def decode_latents(self, latents):
238
- latents = 1 / 0.08333 * latents
239
- image = self.vae.decode(latents).sample
240
- image = (image / 2 + 0.5).clip(0, 1)
241
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
242
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
243
- return image
244
-
245
- def check_inputs(self, prompt, image, noise_level, callback_steps):
246
- if not isinstance(prompt, str) and not isinstance(prompt, list):
247
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
248
-
249
- if (
250
- not isinstance(image, paddle.Tensor)
251
- and not isinstance(image, PIL.Image.Image)
252
- and not isinstance(image, list)
253
- ):
254
- raise ValueError(
255
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or `list` but is {type(image)}"
256
- )
257
-
258
- # verify batch size of prompt and image are same if image is a list or tensor
259
- if isinstance(image, list) or isinstance(image, paddle.Tensor):
260
- if isinstance(prompt, str):
261
- batch_size = 1
262
- else:
263
- batch_size = len(prompt)
264
- if isinstance(image, list):
265
- image_batch_size = len(image)
266
- else:
267
- image_batch_size = image.shape[0]
268
- if batch_size != image_batch_size:
269
- raise ValueError(
270
- f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}."
271
- " Please make sure that passed `prompt` matches the batch size of `image`."
272
- )
273
-
274
- # check noise level
275
- if noise_level > self.config.max_noise_level:
276
- raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}")
277
-
278
- if (callback_steps is None) or (
279
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
280
- ):
281
- raise ValueError(
282
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
283
- f" {type(callback_steps)}."
284
- )
285
-
286
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None):
287
- shape = [batch_size, num_channels_latents, height, width]
288
- if latents is None:
289
- latents = paddle.randn(shape, generator=generator, dtype=dtype)
290
- else:
291
- if latents.shape != shape:
292
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
293
-
294
- # scale the initial noise by the standard deviation required by the scheduler
295
- latents = latents * self.scheduler.init_noise_sigma
296
- return latents
297
-
298
- @paddle.no_grad()
299
- def __call__(
300
- self,
301
- prompt: Union[str, List[str]],
302
- image: Union[paddle.Tensor, PIL.Image.Image, List[PIL.Image.Image]],
303
- num_inference_steps: int = 75,
304
- guidance_scale: float = 9.0,
305
- noise_level: int = 20,
306
- negative_prompt: Optional[Union[str, List[str]]] = None,
307
- num_images_per_prompt: Optional[int] = 1,
308
- eta: float = 0.0,
309
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
310
- latents: Optional[paddle.Tensor] = None,
311
- output_type: Optional[str] = "pil",
312
- return_dict: bool = True,
313
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
314
- callback_steps: Optional[int] = 1,
315
- ):
316
- r"""
317
- Function invoked when calling the pipeline for generation.
318
-
319
- Args:
320
- prompt (`str` or `List[str]`):
321
- The prompt or prompts to guide the image generation.
322
- image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `paddle.Tensor`):
323
- `Image`, or tensor representing an image batch which will be upscaled. *
324
- num_inference_steps (`int`, *optional*, defaults to 50):
325
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
326
- expense of slower inference.
327
- guidance_scale (`float`, *optional*, defaults to 7.5):
328
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
329
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
330
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
331
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
332
- usually at the expense of lower image quality.
333
- negative_prompt (`str` or `List[str]`, *optional*):
334
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
335
- if `guidance_scale` is less than `1`).
336
- num_images_per_prompt (`int`, *optional*, defaults to 1):
337
- The number of images to generate per prompt.
338
- eta (`float`, *optional*, defaults to 0.0):
339
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
340
- [`schedulers.DDIMScheduler`], will be ignored for others.
341
- generator (`paddle.Generator`, *optional*):
342
- A [paddle generator] to make generation
343
- deterministic.
344
- latents (`paddle.Tensor`, *optional*):
345
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
346
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
347
- tensor will ge generated by sampling using the supplied random `generator`.
348
- output_type (`str`, *optional*, defaults to `"pil"`):
349
- The output format of the generate image. Choose between
350
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
351
- return_dict (`bool`, *optional*, defaults to `True`):
352
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
353
- plain tuple.
354
- callback (`Callable`, *optional*):
355
- A function that will be called every `callback_steps` steps during inference. The function will be
356
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
357
- callback_steps (`int`, *optional*, defaults to 1):
358
- The frequency at which the `callback` function will be called. If not specified, the callback will be
359
- called at every step.
360
-
361
- Returns:
362
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
363
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
364
- When returning a tuple, the first element is a list with the generated images, and the second element is a
365
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
366
- (nsfw) content, according to the `safety_checker`.
367
- """
368
-
369
- # 1. Check inputs
370
- self.check_inputs(prompt, image, noise_level, callback_steps)
371
-
372
- # 2. Define call parameters
373
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
374
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
375
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
376
- # corresponds to doing no classifier free guidance.
377
- do_classifier_free_guidance = guidance_scale > 1.0
378
-
379
- # 3. Encode input prompt
380
- text_embeddings = self._encode_prompt(
381
- prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
382
- )
383
-
384
- # 4. Preprocess image
385
- image = preprocess(image)
386
- image = image.cast(text_embeddings.dtype)
387
-
388
- # 5. set timesteps
389
- self.scheduler.set_timesteps(num_inference_steps)
390
- timesteps = self.scheduler.timesteps
391
-
392
- # 5. Add noise to image
393
- noise_level = paddle.to_tensor([noise_level], dtype="int64")
394
- noise = paddle.randn(image.shape, generator=generator, dtype=text_embeddings.dtype)
395
- image = self.low_res_scheduler.add_noise(image, noise, noise_level)
396
- batch_multiplier = 2 if do_classifier_free_guidance else 1
397
- image = paddle.concat([image] * batch_multiplier * num_images_per_prompt)
398
- noise_level = paddle.concat([noise_level] * image.shape[0])
399
-
400
- # 6. Prepare latent variables
401
- height, width = image.shape[2:]
402
- num_channels_latents = self.vae.config.latent_channels
403
- latents = self.prepare_latents(
404
- batch_size * num_images_per_prompt,
405
- num_channels_latents,
406
- height,
407
- width,
408
- text_embeddings.dtype,
409
- generator,
410
- latents,
411
- )
412
-
413
- # 7. Check that sizes of image and latents match
414
- num_channels_image = image.shape[1]
415
- if num_channels_latents + num_channels_image != self.unet.config.in_channels:
416
- raise ValueError(
417
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
418
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
419
- f" `num_channels_image`: {num_channels_image} "
420
- f" = {num_channels_latents+num_channels_image}. Please verify the config of"
421
- " `pipeline.unet` or your `image` input."
422
- )
423
-
424
- # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
425
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
426
-
427
- # 9. Denoising loop
428
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
429
- with self.progress_bar(total=num_inference_steps) as progress_bar:
430
- for i, t in enumerate(timesteps):
431
- # expand the latents if we are doing classifier free guidance
432
- latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
433
-
434
- # concat latents, mask, masked_image_latents in the channel dimension
435
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
436
- latent_model_input = paddle.concat([latent_model_input, image], axis=1)
437
-
438
- # predict the noise residual
439
- noise_pred = self.unet(
440
- latent_model_input, t, encoder_hidden_states=text_embeddings, class_labels=noise_level
441
- ).sample
442
-
443
- # perform guidance
444
- if do_classifier_free_guidance:
445
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
446
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
447
-
448
- # compute the previous noisy sample x_t -> x_t-1
449
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
450
-
451
- # call the callback, if provided
452
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
453
- progress_bar.update()
454
- if callback is not None and i % callback_steps == 0:
455
- callback(i, t, latents)
456
-
457
- # 10. Post-processing
458
- # make sure the VAE is in float32 mode, as it overflows in float16
459
- # self.vae.to(dtype=paddle.float32)
460
- image = self.decode_latents(latents.cast("float32"))
461
-
462
- # 11. Convert to PIL
463
- if output_type == "pil":
464
- image = self.numpy_to_pil(image)
465
-
466
- if not return_dict:
467
- return (image,)
468
-
469
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Naga/Roof_Element_Identification/app.py DELETED
@@ -1,68 +0,0 @@
1
- import gradio as gr
2
- from gradio.outputs import Label
3
- import cv2
4
- import requests
5
- import os
6
- import numpy as np
7
-
8
- from ultralytics import YOLO
9
- import yolov5
10
-
11
- # Function for inference
12
- def yolov5_inference(
13
- image: gr.inputs.Image = None,
14
- model_path: gr.inputs.Dropdown = None,
15
- image_size: gr.inputs.Slider = 640,
16
- conf_threshold: gr.inputs.Slider = 0.25,
17
- iou_threshold: gr.inputs.Slider = 0.45 ):
18
-
19
- # Loading Yolo V5 model
20
- model = yolov5.load(model_path, device="cpu")
21
-
22
- # Setting model configuration
23
- model.conf = conf_threshold
24
- model.iou = iou_threshold
25
-
26
- # Inference
27
- results = model([image], size=image_size)
28
-
29
- # Cropping the predictions
30
- crops = results.crop(save=False)
31
- img_crops = []
32
- for i in range(len(crops)):
33
- img_crops.append(crops[i]["im"][..., ::-1])
34
- return results.render()[0], img_crops
35
-
36
- # gradio Input
37
- inputs = [
38
- gr.inputs.Image(type="pil", label="Input Image"),
39
- gr.inputs.Dropdown(["Roof_Elements_Y5.pt"], label="Model", default = 'Roof_Elements_Y5.pt'),
40
- gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
41
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
42
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
43
- ]
44
-
45
- # gradio Output
46
- outputs = gr.outputs.Image(type="filepath", label="Output Image")
47
- outputs_crops = gr.Gallery(label="Object crop")
48
-
49
- title = "Roof element identification"
50
-
51
- # gradio examples: "Image", "Model", "Image Size", "Confidence Threshold", "IOU Threshold"
52
- examples = [['image_0.jpg', 'Roof_Elements_Y5.pt', 640, 0.35, 0.45]
53
- ,['image_1.jpg', 'Roof_Elements_Y5.pt', 640, 0.35, 0.45]
54
- ,['image_2.jpg', 'Roof_Elements_Y5.pt', 640, 0.35, 0.45],
55
- ]
56
-
57
- # gradio app launch
58
- demo_app = gr.Interface(
59
- fn=yolov5_inference,
60
- inputs=inputs,
61
- outputs=[outputs,outputs_crops],
62
- title=title,
63
- examples=examples,
64
- cache_examples=True,
65
- live=True,
66
- theme='huggingface',
67
- )
68
- demo_app.launch(debug=True, enable_queue=True, width=50, height=50)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/utils/plot_for_paper.py DELETED
@@ -1,565 +0,0 @@
1
- import os
2
- import sys
3
- import numpy as np
4
- import argparse
5
- import h5py
6
- import time
7
- import pickle
8
- import matplotlib.pyplot as plt
9
- import csv
10
- from sklearn import metrics
11
-
12
- from utilities import (create_folder, get_filename, d_prime)
13
- import config
14
-
15
-
16
- def load_statistics(statistics_path):
17
- statistics_dict = pickle.load(open(statistics_path, 'rb'))
18
-
19
- bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
20
- bal_map = np.mean(bal_map, axis=-1)
21
- test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
22
- test_map = np.mean(test_map, axis=-1)
23
-
24
- return bal_map, test_map
25
-
26
-
27
- def crop_label(label):
28
- max_len = 16
29
- if len(label) <= max_len:
30
- return label
31
- else:
32
- words = label.split(' ')
33
- cropped_label = ''
34
- for w in words:
35
- if len(cropped_label + ' ' + w) > max_len:
36
- break
37
- else:
38
- cropped_label += ' {}'.format(w)
39
- return cropped_label
40
-
41
-
42
- def add_comma(integer):
43
- """E.g., 1234567 -> 1,234,567
44
- """
45
- integer = int(integer)
46
- if integer >= 1000:
47
- return str(integer // 1000) + ',' + str(integer % 1000)
48
- else:
49
- return str(integer)
50
-
51
-
52
- def plot_classwise_iteration_map(args):
53
-
54
- # Paths
55
- save_out_path = 'results/classwise_iteration_map.pdf'
56
- create_folder(os.path.dirname(save_out_path))
57
-
58
- # Load statistics
59
- statistics_dict = pickle.load(open('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_WavegramLogmelCnn_balanced_mixup_bs32.pkl', 'rb'))
60
-
61
- mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
62
- mAP_mat = mAP_mat[0 : 300, :] # 300 * 2000 = 600k iterations
63
- sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
64
-
65
- fig, axs = plt.subplots(1, 3, figsize=(20, 5))
66
- ranges = [np.arange(0, 10), np.arange(250, 260), np.arange(517, 527)]
67
- axs[0].set_ylabel('AP')
68
-
69
- for col in range(0, 3):
70
- axs[col].set_ylim(0, 1.)
71
- axs[col].set_xlim(0, 301)
72
- axs[col].set_xlabel('Iterations')
73
- axs[col].set_ylabel('AP')
74
- axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
75
- axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
76
- lines = []
77
- for _ix in ranges[col]:
78
- _label = crop_label(config.labels[sorted_indexes[_ix]]) + \
79
- ' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
80
- line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
81
- lines.append(line)
82
- box = axs[col].get_position()
83
- axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
84
- axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
85
- axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
86
-
87
- plt.tight_layout(pad=4, w_pad=1, h_pad=1)
88
- plt.savefig(save_out_path)
89
- print(save_out_path)
90
-
91
-
92
- def plot_six_figures(args):
93
-
94
- # Arguments & parameters
95
- classes_num = config.classes_num
96
- labels = config.labels
97
- max_plot_iteration = 540000
98
- iterations = np.arange(0, max_plot_iteration, 2000)
99
-
100
- # Paths
101
- class_labels_indices_path = os.path.join('metadata', 'class_labels_indices.csv')
102
- save_out_path = 'results/six_figures.pdf'
103
- create_folder(os.path.dirname(save_out_path))
104
-
105
- # Plot
106
- fig, ax = plt.subplots(2, 3, figsize=(14, 7))
107
- bal_alpha = 0.3
108
- test_alpha = 1.0
109
- linewidth = 1.
110
-
111
- # (a) Comparison of architectures
112
- if True:
113
- lines = []
114
-
115
- # Wavegram-Logmel-CNN
116
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_WavegramLogmelCnn_balanced_mixup_bs32.pkl')
117
- line, = ax[0, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
118
- line, = ax[0, 0].plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
119
- lines.append(line)
120
-
121
- # Cnn14
122
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
123
- line, = ax[0, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
124
- line, = ax[0, 0].plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
125
- lines.append(line)
126
-
127
- # MobileNetV1
128
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_MobileNetV1_balanced_mixup_bs32.pkl')
129
- line, = ax[0, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
130
- line, = ax[0, 0].plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
131
- lines.append(line)
132
-
133
- ax[0, 0].legend(handles=lines, loc=2)
134
- ax[0, 0].set_title('(a) Comparison of architectures')
135
-
136
- # (b) Comparison of training data and augmentation'
137
- if True:
138
- lines = []
139
-
140
- # Full data + balanced sampler + mixup
141
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
142
- line, = ax[0, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
143
- line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
144
- lines.append(line)
145
-
146
- # Full data + balanced sampler + mixup in time domain
147
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_timedomain_bs32.pkl')
148
- line, = ax[0, 1].plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
149
- line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
150
- lines.append(line)
151
-
152
- # Full data + balanced sampler + no mixup
153
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_nomixup_bs32.pkl')
154
- line, = ax[0, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
155
- line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
156
- lines.append(line)
157
-
158
- # Full data + uniform sampler + no mixup
159
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_nobalanced_nomixup_bs32.pkl')
160
- line, = ax[0, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
161
- line, = ax[0, 1].plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
162
- lines.append(line)
163
-
164
- # Balanced data + balanced sampler + mixup
165
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_balanced_train_Cnn14_balanced_mixup_bs32.pkl')
166
- line, = ax[0, 1].plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
167
- line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
168
- lines.append(line)
169
-
170
- # Balanced data + balanced sampler + no mixup
171
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_balanced_train_Cnn14_balanced_nomixup_bs32.pkl')
172
- line, = ax[0, 1].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
173
- line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
174
- lines.append(line)
175
-
176
- ax[0, 1].legend(handles=lines, loc=2, fontsize=8)
177
- ax[0, 1].set_title('(b) Comparison of training data and augmentation')
178
-
179
- # (c) Comparison of embedding size
180
- if True:
181
- lines = []
182
-
183
- # Embedding size 2048
184
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
185
- line, = ax[0, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
186
- line, = ax[0, 2].plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
187
- lines.append(line)
188
-
189
- # Embedding size 128
190
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_emb128_balanced_mixup_bs32.pkl')
191
- line, = ax[0, 2].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
192
- line, = ax[0, 2].plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
193
- lines.append(line)
194
-
195
- # Embedding size 32
196
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_emb32_balanced_mixup_bs32.pkl')
197
- line, = ax[0, 2].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
198
- line, = ax[0, 2].plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
199
- lines.append(line)
200
-
201
- ax[0, 2].legend(handles=lines, loc=2)
202
- ax[0, 2].set_title('(c) Comparison of embedding size')
203
-
204
- # (d) Comparison of amount of training data
205
- if True:
206
- lines = []
207
-
208
- # 100% of full training data
209
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
210
- line, = ax[1, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
211
- line, = ax[1, 0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
212
- lines.append(line)
213
-
214
- # 80% of full training data
215
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_0.8full_train_Cnn14_balanced_mixup_bs32.pkl')
216
- line, = ax[1, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
217
- line, = ax[1, 0].plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
218
- lines.append(line)
219
-
220
- # 50% of full training data
221
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_0.5full_train_Cnn14_balanced_mixup_bs32.pkl')
222
- line, = ax[1, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
223
- line, = ax[1, 0].plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
224
- lines.append(line)
225
-
226
- ax[1, 0].legend(handles=lines, loc=2)
227
- ax[1, 0].set_title('(d) Comparison of amount of training data')
228
-
229
- # (e) Comparison of sampling rate
230
- if True:
231
- lines = []
232
-
233
- # Cnn14 + 32 kHz
234
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
235
- line, = ax[1, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
236
- line, = ax[1, 1].plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
237
- lines.append(line)
238
-
239
- # Cnn14 + 16 kHz
240
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_16k_balanced_mixup_bs32.pkl')
241
- line, = ax[1, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
242
- line, = ax[1, 1].plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
243
- lines.append(line)
244
-
245
- # Cnn14 + 8 kHz
246
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_8k_balanced_mixup_bs32.pkl')
247
- line, = ax[1, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
248
- line, = ax[1, 1].plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
249
- lines.append(line)
250
-
251
- ax[1, 1].legend(handles=lines, loc=2)
252
- ax[1, 1].set_title('(e) Comparison of sampling rate')
253
-
254
- # (f) Comparison of mel bins number
255
- if True:
256
- lines = []
257
-
258
- # Cnn14 + 128 mel bins
259
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel128_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
260
- line, = ax[1, 2].plot(bal_map, color='g', alpha=bal_alpha)
261
- line, = ax[1, 2].plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
262
- lines.append(line)
263
-
264
- # Cnn14 + 64 mel bins
265
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
266
- line, = ax[1, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
267
- line, = ax[1, 2].plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
268
- lines.append(line)
269
-
270
- # Cnn14 + 32 mel bins
271
- (bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel32_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
272
- line, = ax[1, 2].plot(bal_map, color='b', alpha=bal_alpha)
273
- line, = ax[1, 2].plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
274
- lines.append(line)
275
-
276
- ax[1, 2].legend(handles=lines, loc=2)
277
- ax[1, 2].set_title('(f) Comparison of mel bins number')
278
-
279
- for i in range(2):
280
- for j in range(3):
281
- ax[i, j].set_ylim(0, 0.8)
282
- ax[i, j].set_xlim(0, len(iterations))
283
- ax[i, j].set_xlabel('Iterations')
284
- ax[i, j].set_ylabel('mAP')
285
- ax[i, j].xaxis.set_ticks(np.arange(0, len(iterations), 50))
286
- ax[i, j].xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
287
- ax[i, j].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
288
- ax[i, j].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3',
289
- '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
290
- ax[i, j].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
291
- ax[i, j].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
292
-
293
- plt.tight_layout(0, 1, 0)
294
- plt.savefig(save_out_path)
295
- print('Save figure to {}'.format(save_out_path))
296
-
297
-
298
- def plot_complexity_map(args):
299
-
300
- # Paths
301
- save_out_path = 'results/complexity_mAP.pdf'
302
- create_folder(os.path.dirname(save_out_path))
303
-
304
- plt.figure(figsize=(5, 5))
305
- fig, ax = plt.subplots(1, 1)
306
-
307
- model_types = np.array(['Cnn6', 'Cnn10', 'Cnn14', 'ResNet22', 'ResNet38', 'ResNet54',
308
- 'MobileNetV1', 'MobileNetV2', 'DaiNet', 'LeeNet', 'LeeNet18',
309
- 'Res1dNet30', 'Res1dNet44', 'Wavegram-CNN', 'Wavegram-\nLogmel-CNN'])
310
- flops = np.array([21.986, 28.166, 42.220, 30.081, 48.962, 54.563, 3.614, 2.810,
311
- 30.395, 4.741, 26.369, 32.688, 61.833, 44.234, 53.510])
312
- mAPs = np.array([0.343, 0.380, 0.431, 0.430, 0.434, 0.429, 0.389, 0.383, 0.295,
313
- 0.266, 0.336, 0.365, 0.355, 0.389, 0.439])
314
-
315
- sorted_indexes = np.sort(flops)
316
- ax.scatter(flops, mAPs)
317
-
318
- shift = [[-5.5, -0.004], [1, -0.004], [-1, -0.014], [-2, 0.006], [-7, 0.006],
319
- [1, -0.01], [0.5, 0.004], [-1, -0.014], [1, -0.007], [0.8, -0.008],
320
- [1, -0.007], [1, 0.002], [-6, -0.015], [1, -0.008], [0.8, 0]]
321
-
322
- for i, model_type in enumerate(model_types):
323
- ax.annotate(model_type, (flops[i] + shift[i][0], mAPs[i] + shift[i][1]))
324
-
325
- ax.plot(flops[[0, 1, 2]], mAPs[[0, 1, 2]])
326
- ax.plot(flops[[3, 4, 5]], mAPs[[3, 4, 5]])
327
- ax.plot(flops[[6, 7]], mAPs[[6, 7]])
328
- ax.plot(flops[[9, 10]], mAPs[[9, 10]])
329
- ax.plot(flops[[11, 12]], mAPs[[11, 12]])
330
- ax.plot(flops[[13, 14]], mAPs[[13, 14]])
331
-
332
- ax.set_xlim(0, 70)
333
- ax.set_ylim(0.2, 0.5)
334
- ax.set_xlabel('Multi-load_statisticss (million)', fontsize=15)
335
- ax.set_ylabel('mAP', fontsize=15)
336
- ax.tick_params(axis='x', labelsize=12)
337
- ax.tick_params(axis='y', labelsize=12)
338
-
339
- plt.tight_layout(0, 0, 0)
340
-
341
- plt.savefig(save_out_path)
342
- print('Write out figure to {}'.format(save_out_path))
343
-
344
-
345
- def plot_long_fig(args):
346
-
347
- # Paths
348
- stats = pickle.load(open('paper_statistics/stats_for_long_fig.pkl', 'rb'))
349
-
350
- save_out_path = 'results/long_fig.pdf'
351
- create_folder(os.path.dirname(save_out_path))
352
-
353
- # Load meta
354
- N = len(config.labels)
355
- sorted_indexes = stats['sorted_indexes_for_plot']
356
- sorted_labels = np.array(config.labels)[sorted_indexes]
357
- audio_clips_per_class = stats['official_balanced_training_samples'] + stats['official_unbalanced_training_samples']
358
- audio_clips_per_class = audio_clips_per_class[sorted_indexes]
359
-
360
- # Prepare axes for plot
361
- (ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b) = prepare_plot_long_4_rows(sorted_labels)
362
-
363
- # plot the number of training samples
364
- ax1a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
365
- ax2a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
366
- ax3a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
367
- ax4a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
368
-
369
- # Load mAP of different systems
370
- """Average instance system of [1] with an mAP of 0.317.
371
- [1] Kong, Qiuqiang, Changsong Yu, Yong Xu, Turab Iqbal, Wenwu Wang, and
372
- Mark D. Plumbley. "Weakly labelled audioset tagging with attention neural
373
- networks." IEEE/ACM Transactions on Audio, Speech, and Language Processing
374
- 27, no. 11 (2019): 1791-1802."""
375
- maps_avg_instances = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
376
- maps_avg_instances = maps_avg_instances[sorted_indexes]
377
-
378
- # PANNs Cnn14
379
- maps_panns_cnn14 = stats['panns_cnn14']['eval']['average_precision']
380
- maps_panns_cnn14 = maps_panns_cnn14[sorted_indexes]
381
-
382
- # PANNs MobileNetV1
383
- maps_panns_mobilenetv1 = stats['panns_mobilenetv1']['eval']['average_precision']
384
- maps_panns_mobilenetv1 = maps_panns_mobilenetv1[sorted_indexes]
385
-
386
- # PANNs Wavegram-Logmel-Cnn14
387
- maps_panns_wavegram_logmel_cnn14 = stats['panns_wavegram_logmel_cnn14']['eval']['average_precision']
388
- maps_panns_wavegram_logmel_cnn14 = maps_panns_wavegram_logmel_cnn14[sorted_indexes]
389
-
390
- # Plot mAPs
391
- _scatter_4_rows(maps_panns_wavegram_logmel_cnn14, ax1b, ax2b, ax3b, ax4b, s=5, c='g')
392
- _scatter_4_rows(maps_panns_cnn14, ax1b, ax2b, ax3b, ax4b, s=5, c='r')
393
- _scatter_4_rows(maps_panns_mobilenetv1, ax1b, ax2b, ax3b, ax4b, s=5, c='b')
394
- _scatter_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, s=5, c='k')
395
-
396
- linewidth = 0.7
397
- line0te = _plot_4_rows(maps_panns_wavegram_logmel_cnn14, ax1b, ax2b, ax3b, ax4b,
398
- c='g', linewidth=linewidth, label='AP with Wavegram-Logmel-CNN')
399
- line1te = _plot_4_rows(maps_panns_cnn14, ax1b, ax2b, ax3b, ax4b, c='r',
400
- linewidth=linewidth, label='AP with CNN14')
401
- line2te = _plot_4_rows(maps_panns_mobilenetv1, ax1b, ax2b, ax3b, ax4b, c='b',
402
- linewidth=linewidth, label='AP with MobileNetV1')
403
- line3te = _plot_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, c='k',
404
- linewidth=linewidth, label='AP with averaging instances (baseline)')
405
-
406
- # Plot label quality
407
- label_quality = stats['label_quality']
408
- sorted_label_quality = np.array(label_quality)[sorted_indexes]
409
- for k in range(len(sorted_label_quality)):
410
- if sorted_label_quality[k] and sorted_label_quality[k] == 1:
411
- sorted_label_quality[k] = 0.99
412
-
413
- ax1b.scatter(np.arange(N)[sorted_label_quality != None],
414
- sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
415
- ax2b.scatter(np.arange(N)[sorted_label_quality != None],
416
- sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
417
- ax3b.scatter(np.arange(N)[sorted_label_quality != None],
418
- sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
419
- line_label_quality = ax4b.scatter(np.arange(N)[sorted_label_quality != None],
420
- sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+', label='Label quality')
421
- ax1b.scatter(np.arange(N)[sorted_label_quality == None],
422
- 0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
423
- ax2b.scatter(np.arange(N)[sorted_label_quality == None],
424
- 0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
425
- ax3b.scatter(np.arange(N)[sorted_label_quality == None],
426
- 0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
427
- ax4b.scatter(np.arange(N)[sorted_label_quality == None],
428
- 0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
429
-
430
- plt.legend(handles=[line0te, line1te, line2te, line3te, line_label_quality], fontsize=6, loc=1)
431
- plt.tight_layout(0, 0, 0)
432
- plt.savefig(save_out_path)
433
- print('Save fig to {}'.format(save_out_path))
434
-
435
-
436
- def prepare_plot_long_4_rows(sorted_lbs):
437
- N = len(sorted_lbs)
438
-
439
- f,(ax1a, ax2a, ax3a, ax4a) = plt.subplots(4, 1, sharey=False, facecolor='w', figsize=(10, 10.5))
440
-
441
- fontsize = 5
442
-
443
- K = 132
444
- ax1a.set_xlim(0, K)
445
- ax2a.set_xlim(K, 2 * K)
446
- ax3a.set_xlim(2 * K, 3 * K)
447
- ax4a.set_xlim(3 * K, N)
448
-
449
- truncated_sorted_lbs = []
450
- for lb in sorted_lbs:
451
- lb = lb[0 : 25]
452
- words = lb.split(' ')
453
- if len(words[-1]) < 3:
454
- lb = ' '.join(words[0:-1])
455
- truncated_sorted_lbs.append(lb)
456
-
457
- ax1a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
458
- ax2a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
459
- ax3a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
460
- ax4a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
461
-
462
- ax1a.set_yscale('log')
463
- ax2a.set_yscale('log')
464
- ax3a.set_yscale('log')
465
- ax4a.set_yscale('log')
466
-
467
- ax1b = ax1a.twinx()
468
- ax2b = ax2a.twinx()
469
- ax3b = ax3a.twinx()
470
- ax4b = ax4a.twinx()
471
- ax1b.set_ylim(0., 1.)
472
- ax2b.set_ylim(0., 1.)
473
- ax3b.set_ylim(0., 1.)
474
- ax4b.set_ylim(0., 1.)
475
- ax1b.set_ylabel('Average precision')
476
- ax2b.set_ylabel('Average precision')
477
- ax3b.set_ylabel('Average precision')
478
- ax4b.set_ylabel('Average precision')
479
-
480
- ax1b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
481
- ax2b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
482
- ax3b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
483
- ax4b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
484
-
485
- ax1a.xaxis.set_ticks(np.arange(K))
486
- ax1a.xaxis.set_ticklabels(truncated_sorted_lbs[0:K], rotation=90, fontsize=fontsize)
487
- ax1a.xaxis.tick_bottom()
488
- ax1a.set_ylabel("Number of audio clips")
489
-
490
- ax2a.xaxis.set_ticks(np.arange(K, 2*K))
491
- ax2a.xaxis.set_ticklabels(truncated_sorted_lbs[K:2*K], rotation=90, fontsize=fontsize)
492
- ax2a.xaxis.tick_bottom()
493
- ax2a.set_ylabel("Number of audio clips")
494
-
495
- ax3a.xaxis.set_ticks(np.arange(2*K, 3*K))
496
- ax3a.xaxis.set_ticklabels(truncated_sorted_lbs[2*K:3*K], rotation=90, fontsize=fontsize)
497
- ax3a.xaxis.tick_bottom()
498
- ax3a.set_ylabel("Number of audio clips")
499
-
500
- ax4a.xaxis.set_ticks(np.arange(3*K, N))
501
- ax4a.xaxis.set_ticklabels(truncated_sorted_lbs[3*K:], rotation=90, fontsize=fontsize)
502
- ax4a.xaxis.tick_bottom()
503
- ax4a.set_ylabel("Number of audio clips")
504
-
505
- ax1a.spines['right'].set_visible(False)
506
- ax1b.spines['right'].set_visible(False)
507
- ax2a.spines['left'].set_visible(False)
508
- ax2b.spines['left'].set_visible(False)
509
- ax2a.spines['right'].set_visible(False)
510
- ax2b.spines['right'].set_visible(False)
511
- ax3a.spines['left'].set_visible(False)
512
- ax3b.spines['left'].set_visible(False)
513
- ax3a.spines['right'].set_visible(False)
514
- ax3b.spines['right'].set_visible(False)
515
- ax4a.spines['left'].set_visible(False)
516
- ax4b.spines['left'].set_visible(False)
517
-
518
- plt.subplots_adjust(hspace = 0.8)
519
-
520
- return ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b
521
-
522
-
523
- def _scatter_4_rows(x, ax, ax2, ax3, ax4, s, c, marker='.', alpha=1.):
524
- N = len(x)
525
- ax.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
526
- ax2.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
527
- ax3.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
528
- ax4.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
529
-
530
-
531
- def _plot_4_rows(x, ax, ax2, ax3, ax4, c, linewidth=1.0, alpha=1.0, label=""):
532
- N = len(x)
533
- ax.plot(x, c=c, linewidth=linewidth, alpha=alpha)
534
- ax2.plot(x, c=c, linewidth=linewidth, alpha=alpha)
535
- ax3.plot(x, c=c, linewidth=linewidth, alpha=alpha)
536
- line, = ax4.plot(x, c=c, linewidth=linewidth, alpha=alpha, label=label)
537
- return line
538
-
539
-
540
- if __name__ == '__main__':
541
-
542
- parser = argparse.ArgumentParser(description='')
543
- subparsers = parser.add_subparsers(dest='mode')
544
-
545
- parser_classwise_iteration_map = subparsers.add_parser('plot_classwise_iteration_map')
546
- parser_six_figures = subparsers.add_parser('plot_six_figures')
547
- parser_complexity_map = subparsers.add_parser('plot_complexity_map')
548
- parser_long_fig = subparsers.add_parser('plot_long_fig')
549
-
550
- args = parser.parse_args()
551
-
552
- if args.mode == 'plot_classwise_iteration_map':
553
- plot_classwise_iteration_map(args)
554
-
555
- elif args.mode == 'plot_six_figures':
556
- plot_six_figures(args)
557
-
558
- elif args.mode == 'plot_complexity_map':
559
- plot_complexity_map(args)
560
-
561
- elif args.mode == 'plot_long_fig':
562
- plot_long_fig(args)
563
-
564
- else:
565
- raise Exception('Incorrect argument!')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/sound_extraction/model/LASSNet.py DELETED
@@ -1,25 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from .text_encoder import Text_Encoder
5
- from .resunet_film import UNetRes_FiLM
6
-
7
- class LASSNet(nn.Module):
8
- def __init__(self, device='cuda'):
9
- super(LASSNet, self).__init__()
10
- self.text_embedder = Text_Encoder(device)
11
- self.UNet = UNetRes_FiLM(channels=1, cond_embedding_dim=256)
12
-
13
- def forward(self, x, caption):
14
- # x: (Batch, 1, T, 128))
15
- input_ids, attns_mask = self.text_embedder.tokenize(caption)
16
-
17
- cond_vec = self.text_embedder(input_ids, attns_mask)[0]
18
- dec_cond_vec = cond_vec
19
-
20
- mask = self.UNet(x, cond_vec, dec_cond_vec)
21
- mask = torch.sigmoid(mask)
22
- return mask
23
-
24
- def get_tokenizer(self):
25
- return self.text_embedder.tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/02GR-ASR-Memory/app.py DELETED
@@ -1,196 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import time
4
- import librosa
5
- import soundfile
6
- import nemo.collections.asr as nemo_asr
7
- import tempfile
8
- import os
9
- import uuid
10
-
11
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
12
- import torch
13
-
14
- # PersistDataset -----
15
- import os
16
- import csv
17
- import gradio as gr
18
- from gradio import inputs, outputs
19
- import huggingface_hub
20
- from huggingface_hub import Repository, hf_hub_download, upload_file
21
- from datetime import datetime
22
- DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv"
23
- DATASET_REPO_ID = "awacke1/Carddata.csv"
24
- DATA_FILENAME = "Carddata.csv"
25
- DATA_FILE = os.path.join("data", DATA_FILENAME)
26
- HF_TOKEN = os.environ.get("HF_TOKEN")
27
-
28
- SCRIPT = """
29
- <script>
30
- if (!window.hasBeenRun) {
31
- window.hasBeenRun = true;
32
- console.log("should only happen once");
33
- document.querySelector("button.submit").click();
34
- }
35
- </script>
36
- """
37
-
38
-
39
- try:
40
- hf_hub_download(
41
- repo_id=DATASET_REPO_ID,
42
- filename=DATA_FILENAME,
43
- cache_dir=DATA_DIRNAME,
44
- force_filename=DATA_FILENAME
45
- )
46
- except:
47
- print("file not found")
48
- repo = Repository(
49
- local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
50
- )
51
-
52
- def generate_html() -> str:
53
- with open(DATA_FILE) as csvfile:
54
- reader = csv.DictReader(csvfile)
55
- rows = []
56
- for row in reader:
57
- rows.append(row)
58
- rows.reverse()
59
- if len(rows) == 0:
60
- return "no messages yet"
61
- else:
62
- html = "<div class='chatbot'>"
63
- for row in rows:
64
- html += "<div>"
65
- html += f"<span>{row['inputs']}</span>"
66
- html += f"<span class='outputs'>{row['outputs']}</span>"
67
- html += "</div>"
68
- html += "</div>"
69
- return html
70
-
71
-
72
- def store_message(name: str, message: str):
73
- if name and message:
74
- with open(DATA_FILE, "a") as csvfile:
75
- writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
76
- writer.writerow(
77
- {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())}
78
- )
79
- commit_url = repo.push_to_hub()
80
- return ""
81
-
82
-
83
- iface = gr.Interface(
84
- store_message,
85
- [
86
- inputs.Textbox(placeholder="Your name"),
87
- inputs.Textbox(placeholder="Your message", lines=2),
88
- ],
89
- "html",
90
- css="""
91
- .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; }
92
- """,
93
- title="Reading/writing to a HuggingFace dataset repo from Spaces",
94
- description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.",
95
- article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})",
96
- )
97
-
98
-
99
- mname = "facebook/blenderbot-400M-distill"
100
- model = BlenderbotForConditionalGeneration.from_pretrained(mname)
101
- tokenizer = BlenderbotTokenizer.from_pretrained(mname)
102
-
103
- def take_last_tokens(inputs, note_history, history):
104
- """Filter the last 128 tokens"""
105
- if inputs['input_ids'].shape[1] > 128:
106
- inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
107
- inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
108
- note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
109
- history = history[1:]
110
- return inputs, note_history, history
111
-
112
- def add_note_to_history(note, note_history):
113
- """Add a note to the historical information"""
114
- note_history.append(note)
115
- note_history = '</s> <s>'.join(note_history)
116
- return [note_history]
117
-
118
-
119
- def chat(message, history):
120
- history = history or []
121
- if history:
122
- history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
123
- else:
124
- history_useful = []
125
- history_useful = add_note_to_history(message, history_useful)
126
- inputs = tokenizer(history_useful, return_tensors="pt")
127
- inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
128
- reply_ids = model.generate(**inputs)
129
- response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
130
- history_useful = add_note_to_history(response, history_useful)
131
- list_history = history_useful[0].split('</s> <s>')
132
- history.append((list_history[-2], list_history[-1]))
133
- store_message(message, response) # Save to dataset
134
- return history, history
135
-
136
- SAMPLE_RATE = 16000
137
- model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("nvidia/stt_en_conformer_transducer_xlarge")
138
- model.change_decoding_strategy(None)
139
- model.eval()
140
-
141
- def process_audio_file(file):
142
- data, sr = librosa.load(file)
143
- if sr != SAMPLE_RATE:
144
- data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
145
- # monochannel
146
- data = librosa.to_mono(data)
147
- return data
148
-
149
- #def transcribe(audio, state = "", im4 = "", file = ""):
150
- #def transcribe(audio, state = "", im4 = None, file = None):
151
- def transcribe(audio, state = ""): # two parms - had been testing video and file inputs at same time.
152
- # Grant additional context
153
- # time.sleep(1)
154
- if state is None:
155
- state = ""
156
- audio_data = process_audio_file(audio)
157
- with tempfile.TemporaryDirectory() as tmpdir:
158
- # Filepath transcribe
159
- audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav')
160
- soundfile.write(audio_path, audio_data, SAMPLE_RATE)
161
- transcriptions = model.transcribe([audio_path])
162
- # Direct transcribe
163
- # transcriptions = model.transcribe([audio])
164
- # if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
165
- if type(transcriptions) == tuple and len(transcriptions) == 2:
166
- transcriptions = transcriptions[0]
167
- transcriptions = transcriptions[0]
168
- store_message(transcriptions, state) # Save to dataset
169
- state = state + transcriptions + " "
170
- return state, state
171
-
172
- iface = gr.Interface(
173
- fn=transcribe,
174
- inputs=[
175
- gr.Audio(source="microphone", type='filepath', streaming=True),
176
- "state",
177
- #gr.Image(label="Webcam", source="webcam"),
178
- #gr.File(label="File"),
179
- ],
180
- outputs=[
181
- "textbox",
182
- "state",
183
- #gr.HighlightedText(label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"}),
184
- #gr.HighlightedText(label="HighlightedText", show_legend=True),
185
- #gr.JSON(label="JSON"),
186
- #gr.HTML(label="HTML"),
187
- ],
188
- layout="horizontal",
189
- theme="huggingface",
190
- title="🗣️LiveSpeechRecognition🧠Memory💾",
191
- description=f"Live Automatic Speech Recognition (ASR) with Memory💾 Dataset.",
192
- allow_flagging='never',
193
- live=True,
194
- article=f"Result Output Saved to Memory💾 Dataset: [{DATASET_REPO_URL}]({DATASET_REPO_URL})"
195
- )
196
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/vis_pred_save.py DELETED
@@ -1,209 +0,0 @@
1
- from .dataset_info import DatasetInfo
2
- import cv2
3
- import mmcv
4
- import numpy as np
5
- import os
6
- from os import path as osp
7
- import json
8
-
9
-
10
- def save_result(img,
11
- poses,
12
- img_name=None,
13
- radius=4,
14
- thickness=1,
15
- bbox_score_thr=None,
16
- kpt_score_thr=0.3,
17
- bbox_color='green',
18
- dataset_info=None,
19
- show=False,
20
- out_dir=None,
21
- vis_out_dir=None,
22
- pred_out_dir=None,):
23
- """Visualize the detection results on the image.
24
-
25
- Args:
26
- img (str | np.ndarray): Image filename or loaded image.
27
- poses (dict[dict]): a dict which contains pose_model and pose_results of different classes.
28
- And the pose_results contains bboxes, bbox_scores, keypoints and keypoint_scores.
29
- img_name (str): Image name.
30
- radius (int): Radius of circles.
31
- thickness (int): Thickness of lines.
32
- bbox_score_thr (float): The threshold to visualize the bounding boxes.
33
- kpt_score_thr (float): The threshold to visualize the keypoints.
34
- bbox_color (str | tuple[int]): Color of bounding boxes.
35
- dataset_info (DatasetInfo): Dataset info.
36
- show (bool): Whether to show the image. Default False.
37
- out_dir (str): The output directory to save the visualizations and predictions results.
38
- If vis_out_dir is None, visualizations will be saved in ${out_dir}/visualizations.
39
- If pred_out_dir is None, predictions will be saved in ${out_dir}/predictions.
40
- Default None.
41
- vis_out_dir (str): The output directory to save the visualization results. Default None.
42
- pred_out_dir (str): The output directory to save the predictions results. Default None.
43
- """
44
- # set flags
45
- vis_out_flag = False if vis_out_dir is None else vis_out_dir
46
- pred_out_flag = False if pred_out_dir is None else pred_out_dir
47
- if out_dir:
48
- if not vis_out_dir:
49
- vis_out_flag = osp.join(out_dir, 'visualizations')
50
- if not osp.exists(vis_out_flag):
51
- os.mkdir(vis_out_flag)
52
- if not pred_out_dir:
53
- pred_out_flag = osp.join(out_dir, 'predictions')
54
- if not osp.exists(pred_out_flag):
55
- os.mkdir(pred_out_flag)
56
-
57
- # read image
58
- img_path = None
59
- if isinstance(img, str):
60
- img_path = img
61
- img = mmcv.imread(img)
62
- elif isinstance(img, np.ndarray):
63
- img = img.copy()
64
- else:
65
- raise TypeError('img must be a filename or numpy array, '
66
- f'but got {type(img)}')
67
- bbox_list = []
68
- label_list = []
69
- class_name_list = []
70
- bbox_score_list = []
71
- idx = 0
72
- for label, v in poses.items():
73
- if len(v) == 0:
74
- continue
75
- pose_results = v['pose_results']
76
- bbox = pose_results[0].gt_instances.bboxes
77
- bbox_score = pose_results[0].gt_instances.bbox_scores
78
- for bbox_idx in range(len(bbox)):
79
- b = bbox[bbox_idx]
80
- s = bbox_score[bbox_idx]
81
- if bbox_score_thr is not None:
82
- b = np.append(b, values=s) # switch to x1, y1, x2, y2, score
83
- bbox_score_list.append(s.tolist())
84
- bbox_list.append(b)
85
- label_list.append(idx)
86
- class_name_list.append(label)
87
- idx += 1
88
- bbox_list = np.array(bbox_list)
89
- label_list = np.array(label_list)
90
-
91
- # draw bbox
92
- img = mmcv.imshow_det_bboxes(
93
- img,
94
- bbox_list,
95
- label_list,
96
- class_names=class_name_list,
97
- score_thr=bbox_score_thr if bbox_score_thr is not None else 0,
98
- bbox_color=bbox_color,
99
- text_color='white',
100
- show=False,
101
- # out_file=out_file
102
- )
103
-
104
- keypoints_list = []
105
- keypoint_scores_list = []
106
- # draw pose of different classes
107
- for label, v in poses.items():
108
- if len(v) == 0:
109
- continue
110
- pose_model = v['pose_model']
111
- pose_results = v['pose_results']
112
- keypoints = pose_results[0].pred_instances.keypoints
113
- for ks in keypoints:
114
- keypoints_list.append(ks.tolist())
115
- keypoint_scores = pose_results[0].pred_instances.keypoint_scores
116
- for kss in keypoint_scores:
117
- keypoint_scores_list.append(kss.tolist())
118
-
119
- # get dataset info
120
- if (dataset_info is None and hasattr(pose_model, 'cfg')
121
- and 'dataset_info' in pose_model.cfg):
122
- dataset_info = DatasetInfo(pose_model.cfg.dataset_info)
123
-
124
- if dataset_info is not None:
125
- skeleton = dataset_info.skeleton
126
-
127
- pose_kpt_color = dataset_info.pose_kpt_color
128
- pose_kpt_color_tmp = []
129
- for color in pose_kpt_color:
130
- pose_kpt_color_tmp.append(tuple([int(x) for x in color]))
131
- pose_kpt_color = pose_kpt_color_tmp
132
-
133
- pose_link_color = dataset_info.pose_link_color
134
- pose_link_color_tmp = []
135
- for color in pose_link_color:
136
- pose_link_color_tmp.append(tuple([int(x) for x in color]))
137
- pose_link_color = pose_link_color_tmp
138
- else:
139
- warnings.warn(
140
- 'dataset is deprecated.'
141
- 'Please set `dataset_info` in the config.'
142
- 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
143
- DeprecationWarning)
144
- raise ValueError('dataset_info is not specified or set in the config file.')
145
-
146
- # create circles_list
147
- circles_list = []
148
- for bbox_idx, circles in enumerate(keypoints):
149
- c_dict = {}
150
- for c_idx, c in enumerate(circles):
151
- if keypoint_scores[bbox_idx][c_idx] >= kpt_score_thr:
152
- c_dict[c_idx] = c
153
- # else:
154
- # c_dict[c_idx] = None
155
- circles_list.append(c_dict)
156
-
157
- # create lines_list
158
- lines_list = []
159
- for bbox_idx, _ in enumerate(keypoints):
160
- s_dict = {}
161
- for s_idx, s in enumerate(skeleton):
162
- if s[0] in circles_list[bbox_idx].keys() and s[1] in circles_list[bbox_idx].keys():
163
- s_dict[s_idx] = True
164
- else:
165
- s_dict[s_idx] = False
166
- lines_list.append(s_dict)
167
-
168
- # draw circle
169
- for _, circles in enumerate(circles_list):
170
- for c_idx, c in circles.items():
171
- if c is not None:
172
- cv2.circle(img, (int(c[0]), int(c[1])), radius, pose_kpt_color[c_idx], -1)
173
-
174
- # draw line
175
- for bbox_idx, lines in enumerate(lines_list):
176
- for l_idx, l in lines.items():
177
- if l:
178
- s = skeleton[l_idx][0] # idx of start point
179
- e = skeleton[l_idx][1] # idx of end point
180
- cv2.line(img,
181
- (int(circles_list[bbox_idx][s][0]), int(circles_list[bbox_idx][s][1])),
182
- (int(circles_list[bbox_idx][e][0]), int(circles_list[bbox_idx][e][1])),
183
- pose_link_color[l_idx], thickness)
184
-
185
- if show:
186
- mmcv.imshow(img, wait_time=0)
187
- if img_path is None:
188
- if img_name is not None:
189
- img_path = img_name
190
- else:
191
- img_path = 'demo.jpg'
192
- if vis_out_flag:
193
- out_file = osp.join(vis_out_flag, osp.basename(img_path))
194
- mmcv.imwrite(img, out_file)
195
- if pred_out_flag:
196
- pred_list = []
197
- for bbox_idx in range(len(bbox_list)):
198
- bbl = bbox_list[bbox_idx].tolist()
199
- pred_list.append(dict(
200
- keypoints=keypoints_list[bbox_idx],
201
- keypoint_scores=keypoint_scores_list[bbox_idx],
202
- bbox=[bbl],
203
- bbox_score=bbox_score_list[bbox_idx],
204
- ))
205
- # replace .jpg or .png with .json
206
- out_file = osp.join(pred_out_flag, osp.basename(img_path).rsplit('.', 1)[0] + '.json')
207
- json.dump(pred_list, open(out_file, 'w'))
208
-
209
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/text-to-speech-client/assets/worker-7f2d1abe.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/ActivatedOne/JorisCos-ConvTasNet_Libri1Mix_enhsingle_16k/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: JorisCos-ConvTasNet Libri1Mix Enhsingle 16k
3
- emoji: 💻
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.38.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/diffusionmodules/model.py DELETED
@@ -1,852 +0,0 @@
1
- # pytorch_diffusion + derived encoder decoder
2
- import math
3
- import torch
4
- import torch.nn as nn
5
- import numpy as np
6
- from einops import rearrange
7
- from typing import Optional, Any
8
-
9
- from ldm.modules.attention import MemoryEfficientCrossAttention
10
-
11
- try:
12
- import xformers
13
- import xformers.ops
14
- XFORMERS_IS_AVAILBLE = True
15
- except:
16
- XFORMERS_IS_AVAILBLE = False
17
- print("No module 'xformers'. Proceeding without it.")
18
-
19
-
20
- def get_timestep_embedding(timesteps, embedding_dim):
21
- """
22
- This matches the implementation in Denoising Diffusion Probabilistic Models:
23
- From Fairseq.
24
- Build sinusoidal embeddings.
25
- This matches the implementation in tensor2tensor, but differs slightly
26
- from the description in Section 3.5 of "Attention Is All You Need".
27
- """
28
- assert len(timesteps.shape) == 1
29
-
30
- half_dim = embedding_dim // 2
31
- emb = math.log(10000) / (half_dim - 1)
32
- emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
33
- emb = emb.to(device=timesteps.device)
34
- emb = timesteps.float()[:, None] * emb[None, :]
35
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
36
- if embedding_dim % 2 == 1: # zero pad
37
- emb = torch.nn.functional.pad(emb, (0,1,0,0))
38
- return emb
39
-
40
-
41
- def nonlinearity(x):
42
- # swish
43
- return x*torch.sigmoid(x)
44
-
45
-
46
- def Normalize(in_channels, num_groups=32):
47
- return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
48
-
49
-
50
- class Upsample(nn.Module):
51
- def __init__(self, in_channels, with_conv):
52
- super().__init__()
53
- self.with_conv = with_conv
54
- if self.with_conv:
55
- self.conv = torch.nn.Conv2d(in_channels,
56
- in_channels,
57
- kernel_size=3,
58
- stride=1,
59
- padding=1)
60
-
61
- def forward(self, x):
62
- x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
63
- if self.with_conv:
64
- x = self.conv(x)
65
- return x
66
-
67
-
68
- class Downsample(nn.Module):
69
- def __init__(self, in_channels, with_conv):
70
- super().__init__()
71
- self.with_conv = with_conv
72
- if self.with_conv:
73
- # no asymmetric padding in torch conv, must do it ourselves
74
- self.conv = torch.nn.Conv2d(in_channels,
75
- in_channels,
76
- kernel_size=3,
77
- stride=2,
78
- padding=0)
79
-
80
- def forward(self, x):
81
- if self.with_conv:
82
- pad = (0,1,0,1)
83
- x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
84
- x = self.conv(x)
85
- else:
86
- x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
87
- return x
88
-
89
-
90
- class ResnetBlock(nn.Module):
91
- def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
92
- dropout, temb_channels=512):
93
- super().__init__()
94
- self.in_channels = in_channels
95
- out_channels = in_channels if out_channels is None else out_channels
96
- self.out_channels = out_channels
97
- self.use_conv_shortcut = conv_shortcut
98
-
99
- self.norm1 = Normalize(in_channels)
100
- self.conv1 = torch.nn.Conv2d(in_channels,
101
- out_channels,
102
- kernel_size=3,
103
- stride=1,
104
- padding=1)
105
- if temb_channels > 0:
106
- self.temb_proj = torch.nn.Linear(temb_channels,
107
- out_channels)
108
- self.norm2 = Normalize(out_channels)
109
- self.dropout = torch.nn.Dropout(dropout)
110
- self.conv2 = torch.nn.Conv2d(out_channels,
111
- out_channels,
112
- kernel_size=3,
113
- stride=1,
114
- padding=1)
115
- if self.in_channels != self.out_channels:
116
- if self.use_conv_shortcut:
117
- self.conv_shortcut = torch.nn.Conv2d(in_channels,
118
- out_channels,
119
- kernel_size=3,
120
- stride=1,
121
- padding=1)
122
- else:
123
- self.nin_shortcut = torch.nn.Conv2d(in_channels,
124
- out_channels,
125
- kernel_size=1,
126
- stride=1,
127
- padding=0)
128
-
129
- def forward(self, x, temb):
130
- h = x
131
- h = self.norm1(h)
132
- h = nonlinearity(h)
133
- h = self.conv1(h)
134
-
135
- if temb is not None:
136
- h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
137
-
138
- h = self.norm2(h)
139
- h = nonlinearity(h)
140
- h = self.dropout(h)
141
- h = self.conv2(h)
142
-
143
- if self.in_channels != self.out_channels:
144
- if self.use_conv_shortcut:
145
- x = self.conv_shortcut(x)
146
- else:
147
- x = self.nin_shortcut(x)
148
-
149
- return x+h
150
-
151
-
152
- class AttnBlock(nn.Module):
153
- def __init__(self, in_channels):
154
- super().__init__()
155
- self.in_channels = in_channels
156
-
157
- self.norm = Normalize(in_channels)
158
- self.q = torch.nn.Conv2d(in_channels,
159
- in_channels,
160
- kernel_size=1,
161
- stride=1,
162
- padding=0)
163
- self.k = torch.nn.Conv2d(in_channels,
164
- in_channels,
165
- kernel_size=1,
166
- stride=1,
167
- padding=0)
168
- self.v = torch.nn.Conv2d(in_channels,
169
- in_channels,
170
- kernel_size=1,
171
- stride=1,
172
- padding=0)
173
- self.proj_out = torch.nn.Conv2d(in_channels,
174
- in_channels,
175
- kernel_size=1,
176
- stride=1,
177
- padding=0)
178
-
179
- def forward(self, x):
180
- h_ = x
181
- h_ = self.norm(h_)
182
- q = self.q(h_)
183
- k = self.k(h_)
184
- v = self.v(h_)
185
-
186
- # compute attention
187
- b,c,h,w = q.shape
188
- q = q.reshape(b,c,h*w)
189
- q = q.permute(0,2,1) # b,hw,c
190
- k = k.reshape(b,c,h*w) # b,c,hw
191
- w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
192
- w_ = w_ * (int(c)**(-0.5))
193
- w_ = torch.nn.functional.softmax(w_, dim=2)
194
-
195
- # attend to values
196
- v = v.reshape(b,c,h*w)
197
- w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
198
- h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
199
- h_ = h_.reshape(b,c,h,w)
200
-
201
- h_ = self.proj_out(h_)
202
-
203
- return x+h_
204
-
205
- class MemoryEfficientAttnBlock(nn.Module):
206
- """
207
- Uses xformers efficient implementation,
208
- see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
209
- Note: this is a single-head self-attention operation
210
- """
211
- #
212
- def __init__(self, in_channels):
213
- super().__init__()
214
- self.in_channels = in_channels
215
-
216
- self.norm = Normalize(in_channels)
217
- self.q = torch.nn.Conv2d(in_channels,
218
- in_channels,
219
- kernel_size=1,
220
- stride=1,
221
- padding=0)
222
- self.k = torch.nn.Conv2d(in_channels,
223
- in_channels,
224
- kernel_size=1,
225
- stride=1,
226
- padding=0)
227
- self.v = torch.nn.Conv2d(in_channels,
228
- in_channels,
229
- kernel_size=1,
230
- stride=1,
231
- padding=0)
232
- self.proj_out = torch.nn.Conv2d(in_channels,
233
- in_channels,
234
- kernel_size=1,
235
- stride=1,
236
- padding=0)
237
- self.attention_op: Optional[Any] = None
238
-
239
- def forward(self, x):
240
- h_ = x
241
- h_ = self.norm(h_)
242
- q = self.q(h_)
243
- k = self.k(h_)
244
- v = self.v(h_)
245
-
246
- # compute attention
247
- B, C, H, W = q.shape
248
- q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
249
-
250
- q, k, v = map(
251
- lambda t: t.unsqueeze(3)
252
- .reshape(B, t.shape[1], 1, C)
253
- .permute(0, 2, 1, 3)
254
- .reshape(B * 1, t.shape[1], C)
255
- .contiguous(),
256
- (q, k, v),
257
- )
258
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
259
-
260
- out = (
261
- out.unsqueeze(0)
262
- .reshape(B, 1, out.shape[1], C)
263
- .permute(0, 2, 1, 3)
264
- .reshape(B, out.shape[1], C)
265
- )
266
- out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
267
- out = self.proj_out(out)
268
- return x+out
269
-
270
-
271
- class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
272
- def forward(self, x, context=None, mask=None):
273
- b, c, h, w = x.shape
274
- x = rearrange(x, 'b c h w -> b (h w) c')
275
- out = super().forward(x, context=context, mask=mask)
276
- out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c)
277
- return x + out
278
-
279
-
280
- def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
281
- assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
282
- if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
283
- attn_type = "vanilla-xformers"
284
- print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
285
- if attn_type == "vanilla":
286
- assert attn_kwargs is None
287
- return AttnBlock(in_channels)
288
- elif attn_type == "vanilla-xformers":
289
- print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
290
- return MemoryEfficientAttnBlock(in_channels)
291
- elif type == "memory-efficient-cross-attn":
292
- attn_kwargs["query_dim"] = in_channels
293
- return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
294
- elif attn_type == "none":
295
- return nn.Identity(in_channels)
296
- else:
297
- raise NotImplementedError()
298
-
299
-
300
- class Model(nn.Module):
301
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
302
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
303
- resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
304
- super().__init__()
305
- if use_linear_attn: attn_type = "linear"
306
- self.ch = ch
307
- self.temb_ch = self.ch*4
308
- self.num_resolutions = len(ch_mult)
309
- self.num_res_blocks = num_res_blocks
310
- self.resolution = resolution
311
- self.in_channels = in_channels
312
-
313
- self.use_timestep = use_timestep
314
- if self.use_timestep:
315
- # timestep embedding
316
- self.temb = nn.Module()
317
- self.temb.dense = nn.ModuleList([
318
- torch.nn.Linear(self.ch,
319
- self.temb_ch),
320
- torch.nn.Linear(self.temb_ch,
321
- self.temb_ch),
322
- ])
323
-
324
- # downsampling
325
- self.conv_in = torch.nn.Conv2d(in_channels,
326
- self.ch,
327
- kernel_size=3,
328
- stride=1,
329
- padding=1)
330
-
331
- curr_res = resolution
332
- in_ch_mult = (1,)+tuple(ch_mult)
333
- self.down = nn.ModuleList()
334
- for i_level in range(self.num_resolutions):
335
- block = nn.ModuleList()
336
- attn = nn.ModuleList()
337
- block_in = ch*in_ch_mult[i_level]
338
- block_out = ch*ch_mult[i_level]
339
- for i_block in range(self.num_res_blocks):
340
- block.append(ResnetBlock(in_channels=block_in,
341
- out_channels=block_out,
342
- temb_channels=self.temb_ch,
343
- dropout=dropout))
344
- block_in = block_out
345
- if curr_res in attn_resolutions:
346
- attn.append(make_attn(block_in, attn_type=attn_type))
347
- down = nn.Module()
348
- down.block = block
349
- down.attn = attn
350
- if i_level != self.num_resolutions-1:
351
- down.downsample = Downsample(block_in, resamp_with_conv)
352
- curr_res = curr_res // 2
353
- self.down.append(down)
354
-
355
- # middle
356
- self.mid = nn.Module()
357
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
358
- out_channels=block_in,
359
- temb_channels=self.temb_ch,
360
- dropout=dropout)
361
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
362
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
363
- out_channels=block_in,
364
- temb_channels=self.temb_ch,
365
- dropout=dropout)
366
-
367
- # upsampling
368
- self.up = nn.ModuleList()
369
- for i_level in reversed(range(self.num_resolutions)):
370
- block = nn.ModuleList()
371
- attn = nn.ModuleList()
372
- block_out = ch*ch_mult[i_level]
373
- skip_in = ch*ch_mult[i_level]
374
- for i_block in range(self.num_res_blocks+1):
375
- if i_block == self.num_res_blocks:
376
- skip_in = ch*in_ch_mult[i_level]
377
- block.append(ResnetBlock(in_channels=block_in+skip_in,
378
- out_channels=block_out,
379
- temb_channels=self.temb_ch,
380
- dropout=dropout))
381
- block_in = block_out
382
- if curr_res in attn_resolutions:
383
- attn.append(make_attn(block_in, attn_type=attn_type))
384
- up = nn.Module()
385
- up.block = block
386
- up.attn = attn
387
- if i_level != 0:
388
- up.upsample = Upsample(block_in, resamp_with_conv)
389
- curr_res = curr_res * 2
390
- self.up.insert(0, up) # prepend to get consistent order
391
-
392
- # end
393
- self.norm_out = Normalize(block_in)
394
- self.conv_out = torch.nn.Conv2d(block_in,
395
- out_ch,
396
- kernel_size=3,
397
- stride=1,
398
- padding=1)
399
-
400
- def forward(self, x, t=None, context=None):
401
- #assert x.shape[2] == x.shape[3] == self.resolution
402
- if context is not None:
403
- # assume aligned context, cat along channel axis
404
- x = torch.cat((x, context), dim=1)
405
- if self.use_timestep:
406
- # timestep embedding
407
- assert t is not None
408
- temb = get_timestep_embedding(t, self.ch)
409
- temb = self.temb.dense[0](temb)
410
- temb = nonlinearity(temb)
411
- temb = self.temb.dense[1](temb)
412
- else:
413
- temb = None
414
-
415
- # downsampling
416
- hs = [self.conv_in(x)]
417
- for i_level in range(self.num_resolutions):
418
- for i_block in range(self.num_res_blocks):
419
- h = self.down[i_level].block[i_block](hs[-1], temb)
420
- if len(self.down[i_level].attn) > 0:
421
- h = self.down[i_level].attn[i_block](h)
422
- hs.append(h)
423
- if i_level != self.num_resolutions-1:
424
- hs.append(self.down[i_level].downsample(hs[-1]))
425
-
426
- # middle
427
- h = hs[-1]
428
- h = self.mid.block_1(h, temb)
429
- h = self.mid.attn_1(h)
430
- h = self.mid.block_2(h, temb)
431
-
432
- # upsampling
433
- for i_level in reversed(range(self.num_resolutions)):
434
- for i_block in range(self.num_res_blocks+1):
435
- h = self.up[i_level].block[i_block](
436
- torch.cat([h, hs.pop()], dim=1), temb)
437
- if len(self.up[i_level].attn) > 0:
438
- h = self.up[i_level].attn[i_block](h)
439
- if i_level != 0:
440
- h = self.up[i_level].upsample(h)
441
-
442
- # end
443
- h = self.norm_out(h)
444
- h = nonlinearity(h)
445
- h = self.conv_out(h)
446
- return h
447
-
448
- def get_last_layer(self):
449
- return self.conv_out.weight
450
-
451
-
452
- class Encoder(nn.Module):
453
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
454
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
455
- resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
456
- **ignore_kwargs):
457
- super().__init__()
458
- if use_linear_attn: attn_type = "linear"
459
- self.ch = ch
460
- self.temb_ch = 0
461
- self.num_resolutions = len(ch_mult)
462
- self.num_res_blocks = num_res_blocks
463
- self.resolution = resolution
464
- self.in_channels = in_channels
465
-
466
- # downsampling
467
- self.conv_in = torch.nn.Conv2d(in_channels,
468
- self.ch,
469
- kernel_size=3,
470
- stride=1,
471
- padding=1)
472
-
473
- curr_res = resolution
474
- in_ch_mult = (1,)+tuple(ch_mult)
475
- self.in_ch_mult = in_ch_mult
476
- self.down = nn.ModuleList()
477
- for i_level in range(self.num_resolutions):
478
- block = nn.ModuleList()
479
- attn = nn.ModuleList()
480
- block_in = ch*in_ch_mult[i_level]
481
- block_out = ch*ch_mult[i_level]
482
- for i_block in range(self.num_res_blocks):
483
- block.append(ResnetBlock(in_channels=block_in,
484
- out_channels=block_out,
485
- temb_channels=self.temb_ch,
486
- dropout=dropout))
487
- block_in = block_out
488
- if curr_res in attn_resolutions:
489
- attn.append(make_attn(block_in, attn_type=attn_type))
490
- down = nn.Module()
491
- down.block = block
492
- down.attn = attn
493
- if i_level != self.num_resolutions-1:
494
- down.downsample = Downsample(block_in, resamp_with_conv)
495
- curr_res = curr_res // 2
496
- self.down.append(down)
497
-
498
- # middle
499
- self.mid = nn.Module()
500
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
501
- out_channels=block_in,
502
- temb_channels=self.temb_ch,
503
- dropout=dropout)
504
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
505
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
506
- out_channels=block_in,
507
- temb_channels=self.temb_ch,
508
- dropout=dropout)
509
-
510
- # end
511
- self.norm_out = Normalize(block_in)
512
- self.conv_out = torch.nn.Conv2d(block_in,
513
- 2*z_channels if double_z else z_channels,
514
- kernel_size=3,
515
- stride=1,
516
- padding=1)
517
-
518
- def forward(self, x):
519
- # timestep embedding
520
- temb = None
521
-
522
- # downsampling
523
- hs = [self.conv_in(x)]
524
- for i_level in range(self.num_resolutions):
525
- for i_block in range(self.num_res_blocks):
526
- h = self.down[i_level].block[i_block](hs[-1], temb)
527
- if len(self.down[i_level].attn) > 0:
528
- h = self.down[i_level].attn[i_block](h)
529
- hs.append(h)
530
- if i_level != self.num_resolutions-1:
531
- hs.append(self.down[i_level].downsample(hs[-1]))
532
-
533
- # middle
534
- h = hs[-1]
535
- h = self.mid.block_1(h, temb)
536
- h = self.mid.attn_1(h)
537
- h = self.mid.block_2(h, temb)
538
-
539
- # end
540
- h = self.norm_out(h)
541
- h = nonlinearity(h)
542
- h = self.conv_out(h)
543
- return h
544
-
545
-
546
- class Decoder(nn.Module):
547
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
548
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
549
- resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
550
- attn_type="vanilla", **ignorekwargs):
551
- super().__init__()
552
- if use_linear_attn: attn_type = "linear"
553
- self.ch = ch
554
- self.temb_ch = 0
555
- self.num_resolutions = len(ch_mult)
556
- self.num_res_blocks = num_res_blocks
557
- self.resolution = resolution
558
- self.in_channels = in_channels
559
- self.give_pre_end = give_pre_end
560
- self.tanh_out = tanh_out
561
-
562
- # compute in_ch_mult, block_in and curr_res at lowest res
563
- in_ch_mult = (1,)+tuple(ch_mult)
564
- block_in = ch*ch_mult[self.num_resolutions-1]
565
- curr_res = resolution // 2**(self.num_resolutions-1)
566
- self.z_shape = (1,z_channels,curr_res,curr_res)
567
- print("Working with z of shape {} = {} dimensions.".format(
568
- self.z_shape, np.prod(self.z_shape)))
569
-
570
- # z to block_in
571
- self.conv_in = torch.nn.Conv2d(z_channels,
572
- block_in,
573
- kernel_size=3,
574
- stride=1,
575
- padding=1)
576
-
577
- # middle
578
- self.mid = nn.Module()
579
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
580
- out_channels=block_in,
581
- temb_channels=self.temb_ch,
582
- dropout=dropout)
583
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
584
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
585
- out_channels=block_in,
586
- temb_channels=self.temb_ch,
587
- dropout=dropout)
588
-
589
- # upsampling
590
- self.up = nn.ModuleList()
591
- for i_level in reversed(range(self.num_resolutions)):
592
- block = nn.ModuleList()
593
- attn = nn.ModuleList()
594
- block_out = ch*ch_mult[i_level]
595
- for i_block in range(self.num_res_blocks+1):
596
- block.append(ResnetBlock(in_channels=block_in,
597
- out_channels=block_out,
598
- temb_channels=self.temb_ch,
599
- dropout=dropout))
600
- block_in = block_out
601
- if curr_res in attn_resolutions:
602
- attn.append(make_attn(block_in, attn_type=attn_type))
603
- up = nn.Module()
604
- up.block = block
605
- up.attn = attn
606
- if i_level != 0:
607
- up.upsample = Upsample(block_in, resamp_with_conv)
608
- curr_res = curr_res * 2
609
- self.up.insert(0, up) # prepend to get consistent order
610
-
611
- # end
612
- self.norm_out = Normalize(block_in)
613
- self.conv_out = torch.nn.Conv2d(block_in,
614
- out_ch,
615
- kernel_size=3,
616
- stride=1,
617
- padding=1)
618
-
619
- def forward(self, z):
620
- #assert z.shape[1:] == self.z_shape[1:]
621
- self.last_z_shape = z.shape
622
-
623
- # timestep embedding
624
- temb = None
625
-
626
- # z to block_in
627
- h = self.conv_in(z)
628
-
629
- # middle
630
- h = self.mid.block_1(h, temb)
631
- h = self.mid.attn_1(h)
632
- h = self.mid.block_2(h, temb)
633
-
634
- # upsampling
635
- for i_level in reversed(range(self.num_resolutions)):
636
- for i_block in range(self.num_res_blocks+1):
637
- h = self.up[i_level].block[i_block](h, temb)
638
- if len(self.up[i_level].attn) > 0:
639
- h = self.up[i_level].attn[i_block](h)
640
- if i_level != 0:
641
- h = self.up[i_level].upsample(h)
642
-
643
- # end
644
- if self.give_pre_end:
645
- return h
646
-
647
- h = self.norm_out(h)
648
- h = nonlinearity(h)
649
- h = self.conv_out(h)
650
- if self.tanh_out:
651
- h = torch.tanh(h)
652
- return h
653
-
654
-
655
- class SimpleDecoder(nn.Module):
656
- def __init__(self, in_channels, out_channels, *args, **kwargs):
657
- super().__init__()
658
- self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
659
- ResnetBlock(in_channels=in_channels,
660
- out_channels=2 * in_channels,
661
- temb_channels=0, dropout=0.0),
662
- ResnetBlock(in_channels=2 * in_channels,
663
- out_channels=4 * in_channels,
664
- temb_channels=0, dropout=0.0),
665
- ResnetBlock(in_channels=4 * in_channels,
666
- out_channels=2 * in_channels,
667
- temb_channels=0, dropout=0.0),
668
- nn.Conv2d(2*in_channels, in_channels, 1),
669
- Upsample(in_channels, with_conv=True)])
670
- # end
671
- self.norm_out = Normalize(in_channels)
672
- self.conv_out = torch.nn.Conv2d(in_channels,
673
- out_channels,
674
- kernel_size=3,
675
- stride=1,
676
- padding=1)
677
-
678
- def forward(self, x):
679
- for i, layer in enumerate(self.model):
680
- if i in [1,2,3]:
681
- x = layer(x, None)
682
- else:
683
- x = layer(x)
684
-
685
- h = self.norm_out(x)
686
- h = nonlinearity(h)
687
- x = self.conv_out(h)
688
- return x
689
-
690
-
691
- class UpsampleDecoder(nn.Module):
692
- def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
693
- ch_mult=(2,2), dropout=0.0):
694
- super().__init__()
695
- # upsampling
696
- self.temb_ch = 0
697
- self.num_resolutions = len(ch_mult)
698
- self.num_res_blocks = num_res_blocks
699
- block_in = in_channels
700
- curr_res = resolution // 2 ** (self.num_resolutions - 1)
701
- self.res_blocks = nn.ModuleList()
702
- self.upsample_blocks = nn.ModuleList()
703
- for i_level in range(self.num_resolutions):
704
- res_block = []
705
- block_out = ch * ch_mult[i_level]
706
- for i_block in range(self.num_res_blocks + 1):
707
- res_block.append(ResnetBlock(in_channels=block_in,
708
- out_channels=block_out,
709
- temb_channels=self.temb_ch,
710
- dropout=dropout))
711
- block_in = block_out
712
- self.res_blocks.append(nn.ModuleList(res_block))
713
- if i_level != self.num_resolutions - 1:
714
- self.upsample_blocks.append(Upsample(block_in, True))
715
- curr_res = curr_res * 2
716
-
717
- # end
718
- self.norm_out = Normalize(block_in)
719
- self.conv_out = torch.nn.Conv2d(block_in,
720
- out_channels,
721
- kernel_size=3,
722
- stride=1,
723
- padding=1)
724
-
725
- def forward(self, x):
726
- # upsampling
727
- h = x
728
- for k, i_level in enumerate(range(self.num_resolutions)):
729
- for i_block in range(self.num_res_blocks + 1):
730
- h = self.res_blocks[i_level][i_block](h, None)
731
- if i_level != self.num_resolutions - 1:
732
- h = self.upsample_blocks[k](h)
733
- h = self.norm_out(h)
734
- h = nonlinearity(h)
735
- h = self.conv_out(h)
736
- return h
737
-
738
-
739
- class LatentRescaler(nn.Module):
740
- def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
741
- super().__init__()
742
- # residual block, interpolate, residual block
743
- self.factor = factor
744
- self.conv_in = nn.Conv2d(in_channels,
745
- mid_channels,
746
- kernel_size=3,
747
- stride=1,
748
- padding=1)
749
- self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
750
- out_channels=mid_channels,
751
- temb_channels=0,
752
- dropout=0.0) for _ in range(depth)])
753
- self.attn = AttnBlock(mid_channels)
754
- self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
755
- out_channels=mid_channels,
756
- temb_channels=0,
757
- dropout=0.0) for _ in range(depth)])
758
-
759
- self.conv_out = nn.Conv2d(mid_channels,
760
- out_channels,
761
- kernel_size=1,
762
- )
763
-
764
- def forward(self, x):
765
- x = self.conv_in(x)
766
- for block in self.res_block1:
767
- x = block(x, None)
768
- x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
769
- x = self.attn(x)
770
- for block in self.res_block2:
771
- x = block(x, None)
772
- x = self.conv_out(x)
773
- return x
774
-
775
-
776
- class MergedRescaleEncoder(nn.Module):
777
- def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
778
- attn_resolutions, dropout=0.0, resamp_with_conv=True,
779
- ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
780
- super().__init__()
781
- intermediate_chn = ch * ch_mult[-1]
782
- self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
783
- z_channels=intermediate_chn, double_z=False, resolution=resolution,
784
- attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
785
- out_ch=None)
786
- self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
787
- mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
788
-
789
- def forward(self, x):
790
- x = self.encoder(x)
791
- x = self.rescaler(x)
792
- return x
793
-
794
-
795
- class MergedRescaleDecoder(nn.Module):
796
- def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
797
- dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
798
- super().__init__()
799
- tmp_chn = z_channels*ch_mult[-1]
800
- self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
801
- resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
802
- ch_mult=ch_mult, resolution=resolution, ch=ch)
803
- self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
804
- out_channels=tmp_chn, depth=rescale_module_depth)
805
-
806
- def forward(self, x):
807
- x = self.rescaler(x)
808
- x = self.decoder(x)
809
- return x
810
-
811
-
812
- class Upsampler(nn.Module):
813
- def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
814
- super().__init__()
815
- assert out_size >= in_size
816
- num_blocks = int(np.log2(out_size//in_size))+1
817
- factor_up = 1.+ (out_size % in_size)
818
- print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
819
- self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
820
- out_channels=in_channels)
821
- self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
822
- attn_resolutions=[], in_channels=None, ch=in_channels,
823
- ch_mult=[ch_mult for _ in range(num_blocks)])
824
-
825
- def forward(self, x):
826
- x = self.rescaler(x)
827
- x = self.decoder(x)
828
- return x
829
-
830
-
831
- class Resize(nn.Module):
832
- def __init__(self, in_channels=None, learned=False, mode="bilinear"):
833
- super().__init__()
834
- self.with_conv = learned
835
- self.mode = mode
836
- if self.with_conv:
837
- print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
838
- raise NotImplementedError()
839
- assert in_channels is not None
840
- # no asymmetric padding in torch conv, must do it ourselves
841
- self.conv = torch.nn.Conv2d(in_channels,
842
- in_channels,
843
- kernel_size=4,
844
- stride=2,
845
- padding=1)
846
-
847
- def forward(self, x, scale_factor=1.0):
848
- if scale_factor==1.0:
849
- return x
850
- else:
851
- x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
852
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adieudale/Adieudale/Dockerfile DELETED
@@ -1,34 +0,0 @@
1
- # Build Stage
2
- # 使用 golang:alpine 作为构建阶段的基础镜像
3
- FROM golang:alpine AS builder
4
-
5
- # 添加 git,以便之后能从GitHub克隆项目
6
- RUN apk --no-cache add git
7
-
8
- # 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
9
- RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
10
-
11
- # 设置工作目录为之前克隆的项目目录
12
- WORKDIR /workspace/app
13
-
14
- # 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
15
- RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
16
-
17
- # Runtime Stage
18
- # 使用轻量级的 alpine 镜像作为运行时的基础镜像
19
- FROM alpine
20
-
21
- # 设置工作目录
22
- WORKDIR /workspace/app
23
-
24
- # 从构建阶段复制编译后的二进制文件到运行时镜像中
25
- COPY --from=builder /workspace/app/go-proxy-bingai .
26
-
27
- # 设置环境变量,此处为随机字符
28
- ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO"
29
-
30
- # 暴露8080端口
31
- EXPOSE 8080
32
-
33
- # 容器启动时运行的命令
34
- CMD ["/workspace/app/go-proxy-bingai"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/pngappender.d.ts DELETED
@@ -1,9 +0,0 @@
1
- import AppendData from './data/pngappender/AppendData';
2
- import ExtractData from './data/pngappender/ExtractData';
3
-
4
- declare var Methods: {
5
- append: typeof AppendData,
6
- extract: typeof ExtractData
7
- };
8
-
9
- export default Methods;
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/flip/Factory.js DELETED
@@ -1,11 +0,0 @@
1
- import Flip from './Flip.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('flip', function (gameObject, config) {
6
- return new Flip(gameObject, config);
7
- });
8
-
9
- SetValue(window, 'RexPlugins.UI.Flip', Flip);
10
-
11
- export default Flip;
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWortega/t5_predict_activity/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: T5 Predict Activity
3
- emoji: 🐨
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.0.24
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py DELETED
@@ -1,201 +0,0 @@
1
- import torch
2
- import commons
3
- import models
4
-
5
- import math
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- import modules
10
- import attentions
11
-
12
- from torch.nn import Conv1d, ConvTranspose1d, Conv2d
13
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
14
- from commons import init_weights, get_padding
15
-
16
-
17
- class TextEncoder(nn.Module):
18
- def __init__(self,
19
- n_vocab,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- emotion_embedding):
28
- super().__init__()
29
- self.n_vocab = n_vocab
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emotion_embedding = emotion_embedding
38
-
39
- if self.n_vocab != 0:
40
- self.emb = nn.Embedding(n_vocab, hidden_channels)
41
- if emotion_embedding:
42
- self.emo_proj = nn.Linear(1024, hidden_channels)
43
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
44
-
45
- self.encoder = attentions.Encoder(
46
- hidden_channels,
47
- filter_channels,
48
- n_heads,
49
- n_layers,
50
- kernel_size,
51
- p_dropout)
52
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
53
-
54
- def forward(self, x, x_lengths, emotion_embedding=None):
55
- if self.n_vocab != 0:
56
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
57
- if emotion_embedding is not None:
58
- print("emotion added")
59
- x = x + self.emo_proj(emotion_embedding.unsqueeze(1))
60
- x = torch.transpose(x, 1, -1) # [b, h, t]
61
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
62
-
63
- x = self.encoder(x * x_mask, x_mask)
64
- stats = self.proj(x) * x_mask
65
-
66
- m, logs = torch.split(stats, self.out_channels, dim=1)
67
- return x, m, logs, x_mask
68
-
69
-
70
- class PosteriorEncoder(nn.Module):
71
- def __init__(self,
72
- in_channels,
73
- out_channels,
74
- hidden_channels,
75
- kernel_size,
76
- dilation_rate,
77
- n_layers,
78
- gin_channels=0):
79
- super().__init__()
80
- self.in_channels = in_channels
81
- self.out_channels = out_channels
82
- self.hidden_channels = hidden_channels
83
- self.kernel_size = kernel_size
84
- self.dilation_rate = dilation_rate
85
- self.n_layers = n_layers
86
- self.gin_channels = gin_channels
87
-
88
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
89
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
90
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
91
-
92
- def forward(self, x, x_lengths, g=None):
93
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
94
- x = self.pre(x) * x_mask
95
- x = self.enc(x, x_mask, g=g)
96
- stats = self.proj(x) * x_mask
97
- m, logs = torch.split(stats, self.out_channels, dim=1)
98
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
99
- return z, m, logs, x_mask
100
-
101
-
102
- class SynthesizerTrn(models.SynthesizerTrn):
103
- """
104
- Synthesizer for Training
105
- """
106
-
107
- def __init__(self,
108
- n_vocab,
109
- spec_channels,
110
- segment_size,
111
- inter_channels,
112
- hidden_channels,
113
- filter_channels,
114
- n_heads,
115
- n_layers,
116
- kernel_size,
117
- p_dropout,
118
- resblock,
119
- resblock_kernel_sizes,
120
- resblock_dilation_sizes,
121
- upsample_rates,
122
- upsample_initial_channel,
123
- upsample_kernel_sizes,
124
- n_speakers=0,
125
- gin_channels=0,
126
- use_sdp=True,
127
- emotion_embedding=False,
128
- ONNX_dir="./ONNX_net/",
129
- **kwargs):
130
-
131
- super().__init__(
132
- n_vocab,
133
- spec_channels,
134
- segment_size,
135
- inter_channels,
136
- hidden_channels,
137
- filter_channels,
138
- n_heads,
139
- n_layers,
140
- kernel_size,
141
- p_dropout,
142
- resblock,
143
- resblock_kernel_sizes,
144
- resblock_dilation_sizes,
145
- upsample_rates,
146
- upsample_initial_channel,
147
- upsample_kernel_sizes,
148
- n_speakers=n_speakers,
149
- gin_channels=gin_channels,
150
- use_sdp=use_sdp,
151
- **kwargs
152
- )
153
- self.ONNX_dir = ONNX_dir
154
- self.enc_p = TextEncoder(n_vocab,
155
- inter_channels,
156
- hidden_channels,
157
- filter_channels,
158
- n_heads,
159
- n_layers,
160
- kernel_size,
161
- p_dropout,
162
- emotion_embedding)
163
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
164
-
165
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None,
166
- emotion_embedding=None):
167
- from ONNXVITS_utils import runonnx
168
- with torch.no_grad():
169
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
170
-
171
- if self.n_speakers > 0:
172
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
173
- else:
174
- g = None
175
-
176
- # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
177
- logw = runonnx(f"{self.ONNX_dir}dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy())
178
- logw = torch.from_numpy(logw[0])
179
-
180
- w = torch.exp(logw) * x_mask * length_scale
181
- w_ceil = torch.ceil(w)
182
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
183
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
184
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
185
- attn = commons.generate_path(w_ceil, attn_mask)
186
-
187
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
188
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
189
- 2) # [b, t', t], [b, t, d] -> [b, d, t']
190
-
191
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
192
-
193
- # z = self.flow(z_p, y_mask, g=g, reverse=True)
194
- z = runonnx(f"{self.ONNX_dir}flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy())
195
- z = torch.from_numpy(z[0])
196
-
197
- # o = self.dec((z * y_mask)[:,:,:max_len], g=g)
198
- o = runonnx(f"{self.ONNX_dir}dec.onnx", z_in=(z * y_mask)[:, :, :max_len].numpy(), g=g.numpy())
199
- o = torch.from_numpy(o[0])
200
-
201
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/data/image_folder.py DELETED
@@ -1,66 +0,0 @@
1
- """A modified image folder class
2
-
3
- We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
4
- so that this class can load images from both current directory and its subdirectories.
5
- """
6
- import numpy as np
7
- import torch.utils.data as data
8
-
9
- from PIL import Image
10
- import os
11
- import os.path
12
-
13
- IMG_EXTENSIONS = [
14
- '.jpg', '.JPG', '.jpeg', '.JPEG',
15
- '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
16
- '.tif', '.TIF', '.tiff', '.TIFF',
17
- ]
18
-
19
-
20
- def is_image_file(filename):
21
- return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
22
-
23
-
24
- def make_dataset(dir, max_dataset_size=float("inf")):
25
- images = []
26
- assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir
27
-
28
- for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
29
- for fname in fnames:
30
- if is_image_file(fname):
31
- path = os.path.join(root, fname)
32
- images.append(path)
33
- return images[:min(max_dataset_size, len(images))]
34
-
35
-
36
- def default_loader(path):
37
- return Image.open(path).convert('RGB')
38
-
39
-
40
- class ImageFolder(data.Dataset):
41
-
42
- def __init__(self, root, transform=None, return_paths=False,
43
- loader=default_loader):
44
- imgs = make_dataset(root)
45
- if len(imgs) == 0:
46
- raise(RuntimeError("Found 0 images in: " + root + "\n"
47
- "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
48
-
49
- self.root = root
50
- self.imgs = imgs
51
- self.transform = transform
52
- self.return_paths = return_paths
53
- self.loader = loader
54
-
55
- def __getitem__(self, index):
56
- path = self.imgs[index]
57
- img = self.loader(path)
58
- if self.transform is not None:
59
- img = self.transform(img)
60
- if self.return_paths:
61
- return img, path
62
- else:
63
- return img
64
-
65
- def __len__(self):
66
- return len(self.imgs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/plot.py DELETED
@@ -1,72 +0,0 @@
1
- # coding: utf-8
2
-
3
- import os
4
- from pathlib import Path
5
-
6
- import matplotlib.pyplot as plt
7
- import numpy as np
8
- import pandas as pd
9
- from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
10
- from prettytable import PrettyTable
11
- from sklearn.metrics import roc_curve, auc
12
-
13
- image_path = "/data/anxiang/IJB_release/IJBC"
14
- files = [
15
- "./ms1mv3_arcface_r100/ms1mv3_arcface_r100/ijbc.npy"
16
- ]
17
-
18
-
19
- def read_template_pair_list(path):
20
- pairs = pd.read_csv(path, sep=' ', header=None).values
21
- t1 = pairs[:, 0].astype(np.int)
22
- t2 = pairs[:, 1].astype(np.int)
23
- label = pairs[:, 2].astype(np.int)
24
- return t1, t2, label
25
-
26
-
27
- p1, p2, label = read_template_pair_list(
28
- os.path.join('%s/meta' % image_path,
29
- '%s_template_pair_label.txt' % 'ijbc'))
30
-
31
- methods = []
32
- scores = []
33
- for file in files:
34
- methods.append(file.split('/')[-2])
35
- scores.append(np.load(file))
36
-
37
- methods = np.array(methods)
38
- scores = dict(zip(methods, scores))
39
- colours = dict(
40
- zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
41
- x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]
42
- tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])
43
- fig = plt.figure()
44
- for method in methods:
45
- fpr, tpr, _ = roc_curve(label, scores[method])
46
- roc_auc = auc(fpr, tpr)
47
- fpr = np.flipud(fpr)
48
- tpr = np.flipud(tpr) # select largest tpr at same fpr
49
- plt.plot(fpr,
50
- tpr,
51
- color=colours[method],
52
- lw=1,
53
- label=('[%s (AUC = %0.4f %%)]' %
54
- (method.split('-')[-1], roc_auc * 100)))
55
- tpr_fpr_row = []
56
- tpr_fpr_row.append("%s-%s" % (method, "IJBC"))
57
- for fpr_iter in np.arange(len(x_labels)):
58
- _, min_index = min(
59
- list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
60
- tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
61
- tpr_fpr_table.add_row(tpr_fpr_row)
62
- plt.xlim([10 ** -6, 0.1])
63
- plt.ylim([0.3, 1.0])
64
- plt.grid(linestyle='--', linewidth=1)
65
- plt.xticks(x_labels)
66
- plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
67
- plt.xscale('log')
68
- plt.xlabel('False Positive Rate')
69
- plt.ylabel('True Positive Rate')
70
- plt.title('ROC on IJB')
71
- plt.legend(loc="lower right")
72
- print(tpr_fpr_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/watermark.py DELETED
@@ -1,46 +0,0 @@
1
- from typing import List
2
-
3
- import PIL
4
- import torch
5
- from PIL import Image
6
-
7
- from ...configuration_utils import ConfigMixin
8
- from ...models.modeling_utils import ModelMixin
9
- from ...utils import PIL_INTERPOLATION
10
-
11
-
12
- class IFWatermarker(ModelMixin, ConfigMixin):
13
- def __init__(self):
14
- super().__init__()
15
-
16
- self.register_buffer("watermark_image", torch.zeros((62, 62, 4)))
17
- self.watermark_image_as_pil = None
18
-
19
- def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None):
20
- # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287
21
-
22
- h = images[0].height
23
- w = images[0].width
24
-
25
- sample_size = sample_size or h
26
-
27
- coef = min(h / sample_size, w / sample_size)
28
- img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w)
29
-
30
- S1, S2 = 1024**2, img_w * img_h
31
- K = (S2 / S1) ** 0.5
32
- wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K)
33
-
34
- if self.watermark_image_as_pil is None:
35
- watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy()
36
- watermark_image = Image.fromarray(watermark_image, mode="RGBA")
37
- self.watermark_image_as_pil = watermark_image
38
-
39
- wm_img = self.watermark_image_as_pil.resize(
40
- (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None
41
- )
42
-
43
- for pil_img in images:
44
- pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1])
45
-
46
- return images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py DELETED
@@ -1,755 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import numpy as np
20
- import PIL
21
- import torch
22
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
23
-
24
- from ...image_processor import VaeImageProcessor
25
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
26
- from ...models import AutoencoderKL, UNet2DConditionModel
27
- from ...models.attention_processor import (
28
- AttnProcessor2_0,
29
- LoRAAttnProcessor2_0,
30
- LoRAXFormersAttnProcessor,
31
- XFormersAttnProcessor,
32
- )
33
- from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers
34
- from ...utils import deprecate, is_accelerate_available, is_accelerate_version, logging, randn_tensor
35
- from ..pipeline_utils import DiffusionPipeline
36
- from . import StableDiffusionPipelineOutput
37
-
38
-
39
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
-
41
-
42
- def preprocess(image):
43
- warnings.warn(
44
- "The preprocess method is deprecated and will be removed in a future version. Please"
45
- " use VaeImageProcessor.preprocess instead",
46
- FutureWarning,
47
- )
48
- if isinstance(image, torch.Tensor):
49
- return image
50
- elif isinstance(image, PIL.Image.Image):
51
- image = [image]
52
-
53
- if isinstance(image[0], PIL.Image.Image):
54
- w, h = image[0].size
55
- w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64
56
-
57
- image = [np.array(i.resize((w, h)))[None, :] for i in image]
58
- image = np.concatenate(image, axis=0)
59
- image = np.array(image).astype(np.float32) / 255.0
60
- image = image.transpose(0, 3, 1, 2)
61
- image = 2.0 * image - 1.0
62
- image = torch.from_numpy(image)
63
- elif isinstance(image[0], torch.Tensor):
64
- image = torch.cat(image, dim=0)
65
- return image
66
-
67
-
68
- class StableDiffusionUpscalePipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
69
- r"""
70
- Pipeline for text-guided image super-resolution using Stable Diffusion 2.
71
-
72
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
73
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
74
-
75
- Args:
76
- vae ([`AutoencoderKL`]):
77
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
78
- text_encoder ([`~transformers.CLIPTextModel`]):
79
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
80
- tokenizer ([`~transformers.CLIPTokenizer`]):
81
- A `CLIPTokenizer` to tokenize text.
82
- unet ([`UNet2DConditionModel`]):
83
- A `UNet2DConditionModel` to denoise the encoded image latents.
84
- low_res_scheduler ([`SchedulerMixin`]):
85
- A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of
86
- [`DDPMScheduler`].
87
- scheduler ([`SchedulerMixin`]):
88
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
89
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
90
- """
91
- _optional_components = ["watermarker", "safety_checker", "feature_extractor"]
92
-
93
- def __init__(
94
- self,
95
- vae: AutoencoderKL,
96
- text_encoder: CLIPTextModel,
97
- tokenizer: CLIPTokenizer,
98
- unet: UNet2DConditionModel,
99
- low_res_scheduler: DDPMScheduler,
100
- scheduler: KarrasDiffusionSchedulers,
101
- safety_checker: Optional[Any] = None,
102
- feature_extractor: Optional[CLIPImageProcessor] = None,
103
- watermarker: Optional[Any] = None,
104
- max_noise_level: int = 350,
105
- ):
106
- super().__init__()
107
-
108
- if hasattr(
109
- vae, "config"
110
- ): # check if vae has a config attribute `scaling_factor` and if it is set to 0.08333, else set it to 0.08333 and deprecate
111
- is_vae_scaling_factor_set_to_0_08333 = (
112
- hasattr(vae.config, "scaling_factor") and vae.config.scaling_factor == 0.08333
113
- )
114
- if not is_vae_scaling_factor_set_to_0_08333:
115
- deprecation_message = (
116
- "The configuration file of the vae does not contain `scaling_factor` or it is set to"
117
- f" {vae.config.scaling_factor}, which seems highly unlikely. If your checkpoint is a fine-tuned"
118
- " version of `stabilityai/stable-diffusion-x4-upscaler` you should change 'scaling_factor' to"
119
- " 0.08333 Please make sure to update the config accordingly, as not doing so might lead to"
120
- " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging"
121
- " Face Hub, it would be very nice if you could open a Pull Request for the `vae/config.json` file"
122
- )
123
- deprecate("wrong scaling_factor", "1.0.0", deprecation_message, standard_warn=False)
124
- vae.register_to_config(scaling_factor=0.08333)
125
-
126
- self.register_modules(
127
- vae=vae,
128
- text_encoder=text_encoder,
129
- tokenizer=tokenizer,
130
- unet=unet,
131
- low_res_scheduler=low_res_scheduler,
132
- scheduler=scheduler,
133
- safety_checker=safety_checker,
134
- watermarker=watermarker,
135
- feature_extractor=feature_extractor,
136
- )
137
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
138
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample="bicubic")
139
- self.register_to_config(max_noise_level=max_noise_level)
140
-
141
- def enable_model_cpu_offload(self, gpu_id=0):
142
- r"""
143
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
144
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
145
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
146
- iterative execution of the `unet`.
147
- """
148
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
149
- from accelerate import cpu_offload_with_hook
150
- else:
151
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
152
-
153
- device = torch.device(f"cuda:{gpu_id}")
154
-
155
- if self.device.type != "cpu":
156
- self.to("cpu", silence_dtype_warnings=True)
157
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
158
-
159
- hook = None
160
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
161
- if cpu_offloaded_model is not None:
162
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
163
-
164
- # We'll offload the last model manually.
165
- self.final_offload_hook = hook
166
-
167
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
168
- def run_safety_checker(self, image, device, dtype):
169
- if self.safety_checker is not None:
170
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
171
- image, nsfw_detected, watermark_detected = self.safety_checker(
172
- images=image,
173
- clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
174
- )
175
- else:
176
- nsfw_detected = None
177
- watermark_detected = None
178
-
179
- if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
180
- self.unet_offload_hook.offload()
181
-
182
- return image, nsfw_detected, watermark_detected
183
-
184
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
185
- def _encode_prompt(
186
- self,
187
- prompt,
188
- device,
189
- num_images_per_prompt,
190
- do_classifier_free_guidance,
191
- negative_prompt=None,
192
- prompt_embeds: Optional[torch.FloatTensor] = None,
193
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
194
- lora_scale: Optional[float] = None,
195
- ):
196
- r"""
197
- Encodes the prompt into text encoder hidden states.
198
-
199
- Args:
200
- prompt (`str` or `List[str]`, *optional*):
201
- prompt to be encoded
202
- device: (`torch.device`):
203
- torch device
204
- num_images_per_prompt (`int`):
205
- number of images that should be generated per prompt
206
- do_classifier_free_guidance (`bool`):
207
- whether to use classifier free guidance or not
208
- negative_prompt (`str` or `List[str]`, *optional*):
209
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
210
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
211
- less than `1`).
212
- prompt_embeds (`torch.FloatTensor`, *optional*):
213
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
214
- provided, text embeddings will be generated from `prompt` input argument.
215
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
216
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
217
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
218
- argument.
219
- lora_scale (`float`, *optional*):
220
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
221
- """
222
- # set lora scale so that monkey patched LoRA
223
- # function of text encoder can correctly access it
224
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
225
- self._lora_scale = lora_scale
226
-
227
- if prompt is not None and isinstance(prompt, str):
228
- batch_size = 1
229
- elif prompt is not None and isinstance(prompt, list):
230
- batch_size = len(prompt)
231
- else:
232
- batch_size = prompt_embeds.shape[0]
233
-
234
- if prompt_embeds is None:
235
- # textual inversion: procecss multi-vector tokens if necessary
236
- if isinstance(self, TextualInversionLoaderMixin):
237
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
238
-
239
- text_inputs = self.tokenizer(
240
- prompt,
241
- padding="max_length",
242
- max_length=self.tokenizer.model_max_length,
243
- truncation=True,
244
- return_tensors="pt",
245
- )
246
- text_input_ids = text_inputs.input_ids
247
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
248
-
249
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
250
- text_input_ids, untruncated_ids
251
- ):
252
- removed_text = self.tokenizer.batch_decode(
253
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
254
- )
255
- logger.warning(
256
- "The following part of your input was truncated because CLIP can only handle sequences up to"
257
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
258
- )
259
-
260
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
261
- attention_mask = text_inputs.attention_mask.to(device)
262
- else:
263
- attention_mask = None
264
-
265
- prompt_embeds = self.text_encoder(
266
- text_input_ids.to(device),
267
- attention_mask=attention_mask,
268
- )
269
- prompt_embeds = prompt_embeds[0]
270
-
271
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
272
-
273
- bs_embed, seq_len, _ = prompt_embeds.shape
274
- # duplicate text embeddings for each generation per prompt, using mps friendly method
275
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
276
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
277
-
278
- # get unconditional embeddings for classifier free guidance
279
- if do_classifier_free_guidance and negative_prompt_embeds is None:
280
- uncond_tokens: List[str]
281
- if negative_prompt is None:
282
- uncond_tokens = [""] * batch_size
283
- elif prompt is not None and type(prompt) is not type(negative_prompt):
284
- raise TypeError(
285
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
286
- f" {type(prompt)}."
287
- )
288
- elif isinstance(negative_prompt, str):
289
- uncond_tokens = [negative_prompt]
290
- elif batch_size != len(negative_prompt):
291
- raise ValueError(
292
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
293
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
294
- " the batch size of `prompt`."
295
- )
296
- else:
297
- uncond_tokens = negative_prompt
298
-
299
- # textual inversion: procecss multi-vector tokens if necessary
300
- if isinstance(self, TextualInversionLoaderMixin):
301
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
302
-
303
- max_length = prompt_embeds.shape[1]
304
- uncond_input = self.tokenizer(
305
- uncond_tokens,
306
- padding="max_length",
307
- max_length=max_length,
308
- truncation=True,
309
- return_tensors="pt",
310
- )
311
-
312
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
313
- attention_mask = uncond_input.attention_mask.to(device)
314
- else:
315
- attention_mask = None
316
-
317
- negative_prompt_embeds = self.text_encoder(
318
- uncond_input.input_ids.to(device),
319
- attention_mask=attention_mask,
320
- )
321
- negative_prompt_embeds = negative_prompt_embeds[0]
322
-
323
- if do_classifier_free_guidance:
324
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
325
- seq_len = negative_prompt_embeds.shape[1]
326
-
327
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
328
-
329
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
330
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
331
-
332
- # For classifier free guidance, we need to do two forward passes.
333
- # Here we concatenate the unconditional and text embeddings into a single batch
334
- # to avoid doing two forward passes
335
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
336
-
337
- return prompt_embeds
338
-
339
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
340
- def prepare_extra_step_kwargs(self, generator, eta):
341
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
342
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
343
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
344
- # and should be between [0, 1]
345
-
346
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
347
- extra_step_kwargs = {}
348
- if accepts_eta:
349
- extra_step_kwargs["eta"] = eta
350
-
351
- # check if the scheduler accepts generator
352
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
353
- if accepts_generator:
354
- extra_step_kwargs["generator"] = generator
355
- return extra_step_kwargs
356
-
357
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
358
- def decode_latents(self, latents):
359
- warnings.warn(
360
- "The decode_latents method is deprecated and will be removed in a future version. Please"
361
- " use VaeImageProcessor instead",
362
- FutureWarning,
363
- )
364
- latents = 1 / self.vae.config.scaling_factor * latents
365
- image = self.vae.decode(latents, return_dict=False)[0]
366
- image = (image / 2 + 0.5).clamp(0, 1)
367
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
368
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
369
- return image
370
-
371
- def check_inputs(
372
- self,
373
- prompt,
374
- image,
375
- noise_level,
376
- callback_steps,
377
- negative_prompt=None,
378
- prompt_embeds=None,
379
- negative_prompt_embeds=None,
380
- ):
381
- if (callback_steps is None) or (
382
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
383
- ):
384
- raise ValueError(
385
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
386
- f" {type(callback_steps)}."
387
- )
388
-
389
- if prompt is not None and prompt_embeds is not None:
390
- raise ValueError(
391
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
392
- " only forward one of the two."
393
- )
394
- elif prompt is None and prompt_embeds is None:
395
- raise ValueError(
396
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
397
- )
398
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
399
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
400
-
401
- if negative_prompt is not None and negative_prompt_embeds is not None:
402
- raise ValueError(
403
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
404
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
405
- )
406
-
407
- if prompt_embeds is not None and negative_prompt_embeds is not None:
408
- if prompt_embeds.shape != negative_prompt_embeds.shape:
409
- raise ValueError(
410
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
411
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
412
- f" {negative_prompt_embeds.shape}."
413
- )
414
-
415
- if (
416
- not isinstance(image, torch.Tensor)
417
- and not isinstance(image, PIL.Image.Image)
418
- and not isinstance(image, np.ndarray)
419
- and not isinstance(image, list)
420
- ):
421
- raise ValueError(
422
- f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}"
423
- )
424
-
425
- # verify batch size of prompt and image are same if image is a list or tensor or numpy array
426
- if isinstance(image, list) or isinstance(image, torch.Tensor) or isinstance(image, np.ndarray):
427
- if prompt is not None and isinstance(prompt, str):
428
- batch_size = 1
429
- elif prompt is not None and isinstance(prompt, list):
430
- batch_size = len(prompt)
431
- else:
432
- batch_size = prompt_embeds.shape[0]
433
-
434
- if isinstance(image, list):
435
- image_batch_size = len(image)
436
- else:
437
- image_batch_size = image.shape[0]
438
- if batch_size != image_batch_size:
439
- raise ValueError(
440
- f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}."
441
- " Please make sure that passed `prompt` matches the batch size of `image`."
442
- )
443
-
444
- # check noise level
445
- if noise_level > self.config.max_noise_level:
446
- raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}")
447
-
448
- if (callback_steps is None) or (
449
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
450
- ):
451
- raise ValueError(
452
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
453
- f" {type(callback_steps)}."
454
- )
455
-
456
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
457
- shape = (batch_size, num_channels_latents, height, width)
458
- if latents is None:
459
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
460
- else:
461
- if latents.shape != shape:
462
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
463
- latents = latents.to(device)
464
-
465
- # scale the initial noise by the standard deviation required by the scheduler
466
- latents = latents * self.scheduler.init_noise_sigma
467
- return latents
468
-
469
- def upcast_vae(self):
470
- dtype = self.vae.dtype
471
- self.vae.to(dtype=torch.float32)
472
- use_torch_2_0_or_xformers = isinstance(
473
- self.vae.decoder.mid_block.attentions[0].processor,
474
- (
475
- AttnProcessor2_0,
476
- XFormersAttnProcessor,
477
- LoRAXFormersAttnProcessor,
478
- LoRAAttnProcessor2_0,
479
- ),
480
- )
481
- # if xformers or torch_2_0 is used attention block does not need
482
- # to be in float32 which can save lots of memory
483
- if use_torch_2_0_or_xformers:
484
- self.vae.post_quant_conv.to(dtype)
485
- self.vae.decoder.conv_in.to(dtype)
486
- self.vae.decoder.mid_block.to(dtype)
487
-
488
- @torch.no_grad()
489
- def __call__(
490
- self,
491
- prompt: Union[str, List[str]] = None,
492
- image: Union[
493
- torch.FloatTensor,
494
- PIL.Image.Image,
495
- np.ndarray,
496
- List[torch.FloatTensor],
497
- List[PIL.Image.Image],
498
- List[np.ndarray],
499
- ] = None,
500
- num_inference_steps: int = 75,
501
- guidance_scale: float = 9.0,
502
- noise_level: int = 20,
503
- negative_prompt: Optional[Union[str, List[str]]] = None,
504
- num_images_per_prompt: Optional[int] = 1,
505
- eta: float = 0.0,
506
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
507
- latents: Optional[torch.FloatTensor] = None,
508
- prompt_embeds: Optional[torch.FloatTensor] = None,
509
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
510
- output_type: Optional[str] = "pil",
511
- return_dict: bool = True,
512
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
513
- callback_steps: int = 1,
514
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
515
- ):
516
- r"""
517
- The call function to the pipeline for generation.
518
-
519
- Args:
520
- prompt (`str` or `List[str]`, *optional*):
521
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
522
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
523
- `Image` or tensor representing an image batch to be upscaled.
524
- num_inference_steps (`int`, *optional*, defaults to 50):
525
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
526
- expense of slower inference.
527
- guidance_scale (`float`, *optional*, defaults to 7.5):
528
- A higher guidance scale value encourages the model to generate images closely linked to the text
529
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
530
- negative_prompt (`str` or `List[str]`, *optional*):
531
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
532
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
533
- num_images_per_prompt (`int`, *optional*, defaults to 1):
534
- The number of images to generate per prompt.
535
- eta (`float`, *optional*, defaults to 0.0):
536
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
537
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
538
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
539
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
540
- generation deterministic.
541
- latents (`torch.FloatTensor`, *optional*):
542
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
543
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
544
- tensor is generated by sampling using the supplied random `generator`.
545
- prompt_embeds (`torch.FloatTensor`, *optional*):
546
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
547
- provided, text embeddings are generated from the `prompt` input argument.
548
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
549
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
550
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
551
- output_type (`str`, *optional*, defaults to `"pil"`):
552
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
553
- return_dict (`bool`, *optional*, defaults to `True`):
554
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
555
- plain tuple.
556
- callback (`Callable`, *optional*):
557
- A function that calls every `callback_steps` steps during inference. The function is called with the
558
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
559
- callback_steps (`int`, *optional*, defaults to 1):
560
- The frequency at which the `callback` function is called. If not specified, the callback is called at
561
- every step.
562
- cross_attention_kwargs (`dict`, *optional*):
563
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
564
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
565
-
566
- Examples:
567
- ```py
568
- >>> import requests
569
- >>> from PIL import Image
570
- >>> from io import BytesIO
571
- >>> from diffusers import StableDiffusionUpscalePipeline
572
- >>> import torch
573
-
574
- >>> # load model and scheduler
575
- >>> model_id = "stabilityai/stable-diffusion-x4-upscaler"
576
- >>> pipeline = StableDiffusionUpscalePipeline.from_pretrained(
577
- ... model_id, revision="fp16", torch_dtype=torch.float16
578
- ... )
579
- >>> pipeline = pipeline.to("cuda")
580
-
581
- >>> # let's download an image
582
- >>> url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png"
583
- >>> response = requests.get(url)
584
- >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB")
585
- >>> low_res_img = low_res_img.resize((128, 128))
586
- >>> prompt = "a white cat"
587
-
588
- >>> upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[0]
589
- >>> upscaled_image.save("upsampled_cat.png")
590
- ```
591
-
592
- Returns:
593
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
594
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
595
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
596
- second element is a list of `bool`s indicating whether the corresponding generated image contains
597
- "not-safe-for-work" (nsfw) content.
598
- """
599
-
600
- # 1. Check inputs
601
- self.check_inputs(
602
- prompt,
603
- image,
604
- noise_level,
605
- callback_steps,
606
- negative_prompt,
607
- prompt_embeds,
608
- negative_prompt_embeds,
609
- )
610
-
611
- if image is None:
612
- raise ValueError("`image` input cannot be undefined.")
613
-
614
- # 2. Define call parameters
615
- if prompt is not None and isinstance(prompt, str):
616
- batch_size = 1
617
- elif prompt is not None and isinstance(prompt, list):
618
- batch_size = len(prompt)
619
- else:
620
- batch_size = prompt_embeds.shape[0]
621
-
622
- device = self._execution_device
623
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
624
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
625
- # corresponds to doing no classifier free guidance.
626
- do_classifier_free_guidance = guidance_scale > 1.0
627
-
628
- # 3. Encode input prompt
629
- text_encoder_lora_scale = (
630
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
631
- )
632
- prompt_embeds = self._encode_prompt(
633
- prompt,
634
- device,
635
- num_images_per_prompt,
636
- do_classifier_free_guidance,
637
- negative_prompt,
638
- prompt_embeds=prompt_embeds,
639
- negative_prompt_embeds=negative_prompt_embeds,
640
- lora_scale=text_encoder_lora_scale,
641
- )
642
-
643
- # 4. Preprocess image
644
- image = self.image_processor.preprocess(image)
645
- image = image.to(dtype=prompt_embeds.dtype, device=device)
646
-
647
- # 5. set timesteps
648
- self.scheduler.set_timesteps(num_inference_steps, device=device)
649
- timesteps = self.scheduler.timesteps
650
-
651
- # 5. Add noise to image
652
- noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)
653
- noise = randn_tensor(image.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
654
- image = self.low_res_scheduler.add_noise(image, noise, noise_level)
655
-
656
- batch_multiplier = 2 if do_classifier_free_guidance else 1
657
- image = torch.cat([image] * batch_multiplier * num_images_per_prompt)
658
- noise_level = torch.cat([noise_level] * image.shape[0])
659
-
660
- # 6. Prepare latent variables
661
- height, width = image.shape[2:]
662
- num_channels_latents = self.vae.config.latent_channels
663
- latents = self.prepare_latents(
664
- batch_size * num_images_per_prompt,
665
- num_channels_latents,
666
- height,
667
- width,
668
- prompt_embeds.dtype,
669
- device,
670
- generator,
671
- latents,
672
- )
673
-
674
- # 7. Check that sizes of image and latents match
675
- num_channels_image = image.shape[1]
676
- if num_channels_latents + num_channels_image != self.unet.config.in_channels:
677
- raise ValueError(
678
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
679
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
680
- f" `num_channels_image`: {num_channels_image} "
681
- f" = {num_channels_latents+num_channels_image}. Please verify the config of"
682
- " `pipeline.unet` or your `image` input."
683
- )
684
-
685
- # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
686
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
687
-
688
- # 9. Denoising loop
689
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
690
- with self.progress_bar(total=num_inference_steps) as progress_bar:
691
- for i, t in enumerate(timesteps):
692
- # expand the latents if we are doing classifier free guidance
693
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
694
-
695
- # concat latents, mask, masked_image_latents in the channel dimension
696
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
697
- latent_model_input = torch.cat([latent_model_input, image], dim=1)
698
-
699
- # predict the noise residual
700
- noise_pred = self.unet(
701
- latent_model_input,
702
- t,
703
- encoder_hidden_states=prompt_embeds,
704
- cross_attention_kwargs=cross_attention_kwargs,
705
- class_labels=noise_level,
706
- return_dict=False,
707
- )[0]
708
-
709
- # perform guidance
710
- if do_classifier_free_guidance:
711
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
712
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
713
-
714
- # compute the previous noisy sample x_t -> x_t-1
715
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
716
-
717
- # call the callback, if provided
718
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
719
- progress_bar.update()
720
- if callback is not None and i % callback_steps == 0:
721
- callback(i, t, latents)
722
-
723
- # 10. Post-processing
724
- # make sure the VAE is in float32 mode, as it overflows in float16
725
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
726
- self.upcast_vae()
727
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
728
-
729
- # post-processing
730
- if not output_type == "latent":
731
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
732
- image, has_nsfw_concept, _ = self.run_safety_checker(image, device, prompt_embeds.dtype)
733
- else:
734
- image = latents
735
- has_nsfw_concept = None
736
-
737
- if has_nsfw_concept is None:
738
- do_denormalize = [True] * image.shape[0]
739
- else:
740
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
741
-
742
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
743
-
744
- # 11. Apply watermark
745
- if output_type == "pil" and self.watermarker is not None:
746
- image = self.watermarker.apply_watermark(image)
747
-
748
- # Offload last model to CPU
749
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
750
- self.final_offload_hook.offload()
751
-
752
- if not return_dict:
753
- return (image, has_nsfw_concept)
754
-
755
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py DELETED
@@ -1,128 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import List, Optional, Tuple, Union
16
-
17
- import torch
18
-
19
- from ...models import UNet2DModel
20
- from ...schedulers import KarrasVeScheduler
21
- from ...utils import randn_tensor
22
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
23
-
24
-
25
- class KarrasVePipeline(DiffusionPipeline):
26
- r"""
27
- Pipeline for unconditional image generation.
28
-
29
- Parameters:
30
- unet ([`UNet2DModel`]):
31
- A `UNet2DModel` to denoise the encoded image.
32
- scheduler ([`KarrasVeScheduler`]):
33
- A scheduler to be used in combination with `unet` to denoise the encoded image.
34
- """
35
-
36
- # add type hints for linting
37
- unet: UNet2DModel
38
- scheduler: KarrasVeScheduler
39
-
40
- def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler):
41
- super().__init__()
42
- self.register_modules(unet=unet, scheduler=scheduler)
43
-
44
- @torch.no_grad()
45
- def __call__(
46
- self,
47
- batch_size: int = 1,
48
- num_inference_steps: int = 50,
49
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
50
- output_type: Optional[str] = "pil",
51
- return_dict: bool = True,
52
- **kwargs,
53
- ) -> Union[Tuple, ImagePipelineOutput]:
54
- r"""
55
- The call function to the pipeline for generation.
56
-
57
- Args:
58
- batch_size (`int`, *optional*, defaults to 1):
59
- The number of images to generate.
60
- generator (`torch.Generator`, *optional*):
61
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
62
- generation deterministic.
63
- num_inference_steps (`int`, *optional*, defaults to 50):
64
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
65
- expense of slower inference.
66
- output_type (`str`, *optional*, defaults to `"pil"`):
67
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
68
- return_dict (`bool`, *optional*, defaults to `True`):
69
- Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
70
-
71
- Example:
72
-
73
- Returns:
74
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
75
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
76
- returned where the first element is a list with the generated images.
77
- """
78
-
79
- img_size = self.unet.config.sample_size
80
- shape = (batch_size, 3, img_size, img_size)
81
-
82
- model = self.unet
83
-
84
- # sample x_0 ~ N(0, sigma_0^2 * I)
85
- sample = randn_tensor(shape, generator=generator, device=self.device) * self.scheduler.init_noise_sigma
86
-
87
- self.scheduler.set_timesteps(num_inference_steps)
88
-
89
- for t in self.progress_bar(self.scheduler.timesteps):
90
- # here sigma_t == t_i from the paper
91
- sigma = self.scheduler.schedule[t]
92
- sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0
93
-
94
- # 1. Select temporarily increased noise level sigma_hat
95
- # 2. Add new noise to move from sample_i to sample_hat
96
- sample_hat, sigma_hat = self.scheduler.add_noise_to_input(sample, sigma, generator=generator)
97
-
98
- # 3. Predict the noise residual given the noise magnitude `sigma_hat`
99
- # The model inputs and output are adjusted by following eq. (213) in [1].
100
- model_output = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
101
-
102
- # 4. Evaluate dx/dt at sigma_hat
103
- # 5. Take Euler step from sigma to sigma_prev
104
- step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat)
105
-
106
- if sigma_prev != 0:
107
- # 6. Apply 2nd order correction
108
- # The model inputs and output are adjusted by following eq. (213) in [1].
109
- model_output = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
110
- step_output = self.scheduler.step_correct(
111
- model_output,
112
- sigma_hat,
113
- sigma_prev,
114
- sample_hat,
115
- step_output.prev_sample,
116
- step_output["derivative"],
117
- )
118
- sample = step_output.prev_sample
119
-
120
- sample = (sample / 2 + 0.5).clamp(0, 1)
121
- image = sample.cpu().permute(0, 2, 3, 1).numpy()
122
- if output_type == "pil":
123
- image = self.numpy_to_pil(image)
124
-
125
- if not return_dict:
126
- return (image,)
127
-
128
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = 'retinanet_r50_fpg_crop640_50e_coco.py'
2
-
3
- model = dict(
4
- neck=dict(out_channels=128, inter_channels=128),
5
- bbox_head=dict(in_channels=128))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/post_processing/merge_augs.py DELETED
@@ -1,150 +0,0 @@
1
- import copy
2
- import warnings
3
-
4
- import numpy as np
5
- import torch
6
- from mmcv import ConfigDict
7
- from mmcv.ops import nms
8
-
9
- from ..bbox import bbox_mapping_back
10
-
11
-
12
- def merge_aug_proposals(aug_proposals, img_metas, cfg):
13
- """Merge augmented proposals (multiscale, flip, etc.)
14
-
15
- Args:
16
- aug_proposals (list[Tensor]): proposals from different testing
17
- schemes, shape (n, 5). Note that they are not rescaled to the
18
- original image size.
19
-
20
- img_metas (list[dict]): list of image info dict where each dict has:
21
- 'img_shape', 'scale_factor', 'flip', and may also contain
22
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
23
- For details on the values of these keys see
24
- `mmdet/datasets/pipelines/formatting.py:Collect`.
25
-
26
- cfg (dict): rpn test config.
27
-
28
- Returns:
29
- Tensor: shape (n, 4), proposals corresponding to original image scale.
30
- """
31
-
32
- cfg = copy.deepcopy(cfg)
33
-
34
- # deprecate arguments warning
35
- if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
36
- warnings.warn(
37
- 'In rpn_proposal or test_cfg, '
38
- 'nms_thr has been moved to a dict named nms as '
39
- 'iou_threshold, max_num has been renamed as max_per_img, '
40
- 'name of original arguments and the way to specify '
41
- 'iou_threshold of NMS will be deprecated.')
42
- if 'nms' not in cfg:
43
- cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
44
- if 'max_num' in cfg:
45
- if 'max_per_img' in cfg:
46
- assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \
47
- f'max_per_img at the same time, but get {cfg.max_num} ' \
48
- f'and {cfg.max_per_img} respectively' \
49
- f'Please delete max_num which will be deprecated.'
50
- else:
51
- cfg.max_per_img = cfg.max_num
52
- if 'nms_thr' in cfg:
53
- assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
54
- f'iou_threshold in nms and ' \
55
- f'nms_thr at the same time, but get ' \
56
- f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
57
- f' respectively. Please delete the nms_thr ' \
58
- f'which will be deprecated.'
59
-
60
- recovered_proposals = []
61
- for proposals, img_info in zip(aug_proposals, img_metas):
62
- img_shape = img_info['img_shape']
63
- scale_factor = img_info['scale_factor']
64
- flip = img_info['flip']
65
- flip_direction = img_info['flip_direction']
66
- _proposals = proposals.clone()
67
- _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,
68
- scale_factor, flip,
69
- flip_direction)
70
- recovered_proposals.append(_proposals)
71
- aug_proposals = torch.cat(recovered_proposals, dim=0)
72
- merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),
73
- aug_proposals[:, -1].contiguous(),
74
- cfg.nms.iou_threshold)
75
- scores = merged_proposals[:, 4]
76
- _, order = scores.sort(0, descending=True)
77
- num = min(cfg.max_per_img, merged_proposals.shape[0])
78
- order = order[:num]
79
- merged_proposals = merged_proposals[order, :]
80
- return merged_proposals
81
-
82
-
83
- def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
84
- """Merge augmented detection bboxes and scores.
85
-
86
- Args:
87
- aug_bboxes (list[Tensor]): shape (n, 4*#class)
88
- aug_scores (list[Tensor] or None): shape (n, #class)
89
- img_shapes (list[Tensor]): shape (3, ).
90
- rcnn_test_cfg (dict): rcnn test config.
91
-
92
- Returns:
93
- tuple: (bboxes, scores)
94
- """
95
- recovered_bboxes = []
96
- for bboxes, img_info in zip(aug_bboxes, img_metas):
97
- img_shape = img_info[0]['img_shape']
98
- scale_factor = img_info[0]['scale_factor']
99
- flip = img_info[0]['flip']
100
- flip_direction = img_info[0]['flip_direction']
101
- bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
102
- flip_direction)
103
- recovered_bboxes.append(bboxes)
104
- bboxes = torch.stack(recovered_bboxes).mean(dim=0)
105
- if aug_scores is None:
106
- return bboxes
107
- else:
108
- scores = torch.stack(aug_scores).mean(dim=0)
109
- return bboxes, scores
110
-
111
-
112
- def merge_aug_scores(aug_scores):
113
- """Merge augmented bbox scores."""
114
- if isinstance(aug_scores[0], torch.Tensor):
115
- return torch.mean(torch.stack(aug_scores), dim=0)
116
- else:
117
- return np.mean(aug_scores, axis=0)
118
-
119
-
120
- def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
121
- """Merge augmented mask prediction.
122
-
123
- Args:
124
- aug_masks (list[ndarray]): shape (n, #class, h, w)
125
- img_shapes (list[ndarray]): shape (3, ).
126
- rcnn_test_cfg (dict): rcnn test config.
127
-
128
- Returns:
129
- tuple: (bboxes, scores)
130
- """
131
- recovered_masks = []
132
- for mask, img_info in zip(aug_masks, img_metas):
133
- flip = img_info[0]['flip']
134
- flip_direction = img_info[0]['flip_direction']
135
- if flip:
136
- if flip_direction == 'horizontal':
137
- mask = mask[:, :, :, ::-1]
138
- elif flip_direction == 'vertical':
139
- mask = mask[:, :, ::-1, :]
140
- else:
141
- raise ValueError(
142
- f"Invalid flipping direction '{flip_direction}'")
143
- recovered_masks.append(mask)
144
-
145
- if weights is None:
146
- merged_masks = np.mean(recovered_masks, axis=0)
147
- else:
148
- merged_masks = np.average(
149
- np.array(recovered_masks), axis=0, weights=np.array(weights))
150
- return merged_masks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pointrend_r50_512x512_160k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnimaLab/bias-test-gpt-pairs/mgr_cookies.py DELETED
@@ -1,64 +0,0 @@
1
- import requests
2
- import pickle
3
- import browser_cookie3
4
- import selenium.webdriver
5
- import os
6
-
7
- cookie_name = "openAIKey"
8
- cookie_fname = "cookies.pcl"
9
-
10
- def saveOpenAIKey(value):
11
- global cookie_name, cookie_fname
12
-
13
- print(f"Saving the value in cookie...")
14
-
15
- s = requests.session()
16
- s.cookies.set(cookie_name, value)
17
-
18
- #print(f"Session cookies before save: {s.cookies}")
19
-
20
- # Save the cookies to file:
21
- #with open(cookie_fname, 'wb') as f:
22
- # pickle.dump(s.cookies, f)
23
-
24
- # Chrome browser
25
- try:
26
- driver = selenium.webdriver.Chrome()
27
- driver.get("https://huggingface.co")
28
- driver.add_cookie({cookie_name: value})
29
- except Exception as e:
30
- print(f"Exception: {e}")
31
-
32
- def loadOpenAIKey():
33
- global cookie_name, cookie_fname
34
-
35
- openAIkey = None
36
-
37
- print(f"Loading the value from cookie...")
38
- s = requests.session()
39
-
40
- #try:
41
- # if os.path.exists(cookie_fname):
42
- # with open(cookie_fname, 'rb') as f:
43
- # s.cookies.update(pickle.load(f))
44
- #except Exception as e:
45
- # print(f"Exception: {f}")
46
-
47
- print(f"Saved cokies: {s.cookies}")
48
-
49
- openAIkey = s.cookies.get(cookie_name)
50
- print(f"Server cookie: {openAIkey!=None}")
51
- if openAIkey == None:
52
- try:
53
- driver = selenium.webdriver.Chrome()
54
- driver.get("https://huggingface.co")
55
- print("Cookies from Chrome:")
56
- for cookie in driver.get_cookies():
57
- print(cookie)
58
- if cookie_name in cookie:
59
- print("Found open ai key!")
60
- openAIkey = cookie[cookie_name]
61
- except Exception as e:
62
- print(f"Exception: {e}")
63
-
64
- return openAIkey
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_nll.py DELETED
@@ -1,96 +0,0 @@
1
- """
2
- Approximate the bits/dimension for an image model.
3
- """
4
-
5
- import argparse
6
- import os
7
-
8
- import numpy as np
9
- import torch.distributed as dist
10
-
11
- from guided_diffusion import dist_util, logger
12
- from guided_diffusion.image_datasets import load_data
13
- from guided_diffusion.script_util import (
14
- model_and_diffusion_defaults,
15
- create_model_and_diffusion,
16
- add_dict_to_argparser,
17
- args_to_dict,
18
- )
19
-
20
-
21
- def main():
22
- args = create_argparser().parse_args()
23
-
24
- dist_util.setup_dist()
25
- logger.configure()
26
-
27
- logger.log("creating model and diffusion...")
28
- model, diffusion = create_model_and_diffusion(
29
- **args_to_dict(args, model_and_diffusion_defaults().keys())
30
- )
31
- model.load_state_dict(
32
- dist_util.load_state_dict(args.model_path, map_location="cpu")
33
- )
34
- model.to(dist_util.dev())
35
- model.eval()
36
-
37
- logger.log("creating data loader...")
38
- data = load_data(
39
- data_dir=args.data_dir,
40
- batch_size=args.batch_size,
41
- image_size=args.image_size,
42
- class_cond=args.class_cond,
43
- deterministic=True,
44
- )
45
-
46
- logger.log("evaluating...")
47
- run_bpd_evaluation(model, diffusion, data, args.num_samples, args.clip_denoised)
48
-
49
-
50
- def run_bpd_evaluation(model, diffusion, data, num_samples, clip_denoised):
51
- all_bpd = []
52
- all_metrics = {"vb": [], "mse": [], "xstart_mse": []}
53
- num_complete = 0
54
- while num_complete < num_samples:
55
- batch, model_kwargs = next(data)
56
- batch = batch.to(dist_util.dev())
57
- model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
58
- minibatch_metrics = diffusion.calc_bpd_loop(
59
- model, batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs
60
- )
61
-
62
- for key, term_list in all_metrics.items():
63
- terms = minibatch_metrics[key].mean(dim=0) / dist.get_world_size()
64
- dist.all_reduce(terms)
65
- term_list.append(terms.detach().cpu().numpy())
66
-
67
- total_bpd = minibatch_metrics["total_bpd"]
68
- total_bpd = total_bpd.mean() / dist.get_world_size()
69
- dist.all_reduce(total_bpd)
70
- all_bpd.append(total_bpd.item())
71
- num_complete += dist.get_world_size() * batch.shape[0]
72
-
73
- logger.log(f"done {num_complete} samples: bpd={np.mean(all_bpd)}")
74
-
75
- if dist.get_rank() == 0:
76
- for name, terms in all_metrics.items():
77
- out_path = os.path.join(logger.get_dir(), f"{name}_terms.npz")
78
- logger.log(f"saving {name} terms to {out_path}")
79
- np.savez(out_path, np.mean(np.stack(terms), axis=0))
80
-
81
- dist.barrier()
82
- logger.log("evaluation complete")
83
-
84
-
85
- def create_argparser():
86
- defaults = dict(
87
- data_dir="", clip_denoised=True, num_samples=1000, batch_size=1, model_path=""
88
- )
89
- defaults.update(model_and_diffusion_defaults())
90
- parser = argparse.ArgumentParser()
91
- add_dict_to_argparser(parser, defaults)
92
- return parser
93
-
94
-
95
- if __name__ == "__main__":
96
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArkanDash/rvc-models-new/lib/infer_pack/models_dml.py DELETED
@@ -1,1124 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from lib.infer_pack import modules
7
- from lib.infer_pack import attentions
8
- from lib.infer_pack import commons
9
- from lib.infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from lib.infer_pack.commons import init_weights
13
- import numpy as np
14
- from lib.infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder768(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(768, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- stats = self.proj(x) * x_mask
106
-
107
- m, logs = torch.split(stats, self.out_channels, dim=1)
108
- return m, logs, x_mask
109
-
110
-
111
- class ResidualCouplingBlock(nn.Module):
112
- def __init__(
113
- self,
114
- channels,
115
- hidden_channels,
116
- kernel_size,
117
- dilation_rate,
118
- n_layers,
119
- n_flows=4,
120
- gin_channels=0,
121
- ):
122
- super().__init__()
123
- self.channels = channels
124
- self.hidden_channels = hidden_channels
125
- self.kernel_size = kernel_size
126
- self.dilation_rate = dilation_rate
127
- self.n_layers = n_layers
128
- self.n_flows = n_flows
129
- self.gin_channels = gin_channels
130
-
131
- self.flows = nn.ModuleList()
132
- for i in range(n_flows):
133
- self.flows.append(
134
- modules.ResidualCouplingLayer(
135
- channels,
136
- hidden_channels,
137
- kernel_size,
138
- dilation_rate,
139
- n_layers,
140
- gin_channels=gin_channels,
141
- mean_only=True,
142
- )
143
- )
144
- self.flows.append(modules.Flip())
145
-
146
- def forward(self, x, x_mask, g=None, reverse=False):
147
- if not reverse:
148
- for flow in self.flows:
149
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
- else:
151
- for flow in reversed(self.flows):
152
- x = flow(x, x_mask, g=g, reverse=reverse)
153
- return x
154
-
155
- def remove_weight_norm(self):
156
- for i in range(self.n_flows):
157
- self.flows[i * 2].remove_weight_norm()
158
-
159
-
160
- class PosteriorEncoder(nn.Module):
161
- def __init__(
162
- self,
163
- in_channels,
164
- out_channels,
165
- hidden_channels,
166
- kernel_size,
167
- dilation_rate,
168
- n_layers,
169
- gin_channels=0,
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.hidden_channels = hidden_channels
175
- self.kernel_size = kernel_size
176
- self.dilation_rate = dilation_rate
177
- self.n_layers = n_layers
178
- self.gin_channels = gin_channels
179
-
180
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
- self.enc = modules.WN(
182
- hidden_channels,
183
- kernel_size,
184
- dilation_rate,
185
- n_layers,
186
- gin_channels=gin_channels,
187
- )
188
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
-
190
- def forward(self, x, x_lengths, g=None):
191
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
- x.dtype
193
- )
194
- x = self.pre(x) * x_mask
195
- x = self.enc(x, x_mask, g=g)
196
- stats = self.proj(x) * x_mask
197
- m, logs = torch.split(stats, self.out_channels, dim=1)
198
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
- return z, m, logs, x_mask
200
-
201
- def remove_weight_norm(self):
202
- self.enc.remove_weight_norm()
203
-
204
-
205
- class Generator(torch.nn.Module):
206
- def __init__(
207
- self,
208
- initial_channel,
209
- resblock,
210
- resblock_kernel_sizes,
211
- resblock_dilation_sizes,
212
- upsample_rates,
213
- upsample_initial_channel,
214
- upsample_kernel_sizes,
215
- gin_channels=0,
216
- ):
217
- super(Generator, self).__init__()
218
- self.num_kernels = len(resblock_kernel_sizes)
219
- self.num_upsamples = len(upsample_rates)
220
- self.conv_pre = Conv1d(
221
- initial_channel, upsample_initial_channel, 7, 1, padding=3
222
- )
223
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
-
225
- self.ups = nn.ModuleList()
226
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
- self.ups.append(
228
- weight_norm(
229
- ConvTranspose1d(
230
- upsample_initial_channel // (2**i),
231
- upsample_initial_channel // (2 ** (i + 1)),
232
- k,
233
- u,
234
- padding=(k - u) // 2,
235
- )
236
- )
237
- )
238
-
239
- self.resblocks = nn.ModuleList()
240
- for i in range(len(self.ups)):
241
- ch = upsample_initial_channel // (2 ** (i + 1))
242
- for j, (k, d) in enumerate(
243
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
- ):
245
- self.resblocks.append(resblock(ch, k, d))
246
-
247
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
- self.ups.apply(init_weights)
249
-
250
- if gin_channels != 0:
251
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
-
253
- def forward(self, x, g=None):
254
- x = self.conv_pre(x)
255
- if g is not None:
256
- x = x + self.cond(g)
257
-
258
- for i in range(self.num_upsamples):
259
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
- x = self.ups[i](x)
261
- xs = None
262
- for j in range(self.num_kernels):
263
- if xs is None:
264
- xs = self.resblocks[i * self.num_kernels + j](x)
265
- else:
266
- xs += self.resblocks[i * self.num_kernels + j](x)
267
- x = xs / self.num_kernels
268
- x = F.leaky_relu(x)
269
- x = self.conv_post(x)
270
- x = torch.tanh(x)
271
-
272
- return x
273
-
274
- def remove_weight_norm(self):
275
- for l in self.ups:
276
- remove_weight_norm(l)
277
- for l in self.resblocks:
278
- l.remove_weight_norm()
279
-
280
-
281
- class SineGen(torch.nn.Module):
282
- """Definition of sine generator
283
- SineGen(samp_rate, harmonic_num = 0,
284
- sine_amp = 0.1, noise_std = 0.003,
285
- voiced_threshold = 0,
286
- flag_for_pulse=False)
287
- samp_rate: sampling rate in Hz
288
- harmonic_num: number of harmonic overtones (default 0)
289
- sine_amp: amplitude of sine-wavefrom (default 0.1)
290
- noise_std: std of Gaussian noise (default 0.003)
291
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
- Note: when flag_for_pulse is True, the first time step of a voiced
294
- segment is always sin(np.pi) or cos(0)
295
- """
296
-
297
- def __init__(
298
- self,
299
- samp_rate,
300
- harmonic_num=0,
301
- sine_amp=0.1,
302
- noise_std=0.003,
303
- voiced_threshold=0,
304
- flag_for_pulse=False,
305
- ):
306
- super(SineGen, self).__init__()
307
- self.sine_amp = sine_amp
308
- self.noise_std = noise_std
309
- self.harmonic_num = harmonic_num
310
- self.dim = self.harmonic_num + 1
311
- self.sampling_rate = samp_rate
312
- self.voiced_threshold = voiced_threshold
313
-
314
- def _f02uv(self, f0):
315
- # generate uv signal
316
- uv = torch.ones_like(f0)
317
- uv = uv * (f0 > self.voiced_threshold)
318
- return uv.float()
319
-
320
- def forward(self, f0, upp):
321
- """sine_tensor, uv = forward(f0)
322
- input F0: tensor(batchsize=1, length, dim=1)
323
- f0 for unvoiced steps should be 0
324
- output sine_tensor: tensor(batchsize=1, length, dim)
325
- output uv: tensor(batchsize=1, length, 1)
326
- """
327
- with torch.no_grad():
328
- f0 = f0[:, None].transpose(1, 2)
329
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
- # fundamental component
331
- f0_buf[:, :, 0] = f0[:, :, 0]
332
- for idx in np.arange(self.harmonic_num):
333
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
- idx + 2
335
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
- rand_ini = torch.rand(
338
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
- )
340
- rand_ini[:, 0] = 0
341
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
- tmp_over_one *= upp
344
- tmp_over_one = F.interpolate(
345
- tmp_over_one.transpose(2, 1),
346
- scale_factor=upp,
347
- mode="linear",
348
- align_corners=True,
349
- ).transpose(2, 1)
350
- rad_values = F.interpolate(
351
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
- ).transpose(
353
- 2, 1
354
- ) #######
355
- tmp_over_one %= 1
356
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
- cumsum_shift = torch.zeros_like(rad_values)
358
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
- sine_waves = torch.sin(
360
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
- )
362
- sine_waves = sine_waves * self.sine_amp
363
- uv = self._f02uv(f0)
364
- uv = F.interpolate(
365
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
- ).transpose(2, 1)
367
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
- noise = noise_amp * torch.randn_like(sine_waves)
369
- sine_waves = sine_waves * uv + noise
370
- return sine_waves, uv, noise
371
-
372
-
373
- class SourceModuleHnNSF(torch.nn.Module):
374
- """SourceModule for hn-nsf
375
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
- add_noise_std=0.003, voiced_threshod=0)
377
- sampling_rate: sampling_rate in Hz
378
- harmonic_num: number of harmonic above F0 (default: 0)
379
- sine_amp: amplitude of sine source signal (default: 0.1)
380
- add_noise_std: std of additive Gaussian noise (default: 0.003)
381
- note that amplitude of noise in unvoiced is decided
382
- by sine_amp
383
- voiced_threshold: threhold to set U/V given F0 (default: 0)
384
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
- F0_sampled (batchsize, length, 1)
386
- Sine_source (batchsize, length, 1)
387
- noise_source (batchsize, length 1)
388
- uv (batchsize, length, 1)
389
- """
390
-
391
- def __init__(
392
- self,
393
- sampling_rate,
394
- harmonic_num=0,
395
- sine_amp=0.1,
396
- add_noise_std=0.003,
397
- voiced_threshod=0,
398
- is_half=True,
399
- ):
400
- super(SourceModuleHnNSF, self).__init__()
401
-
402
- self.sine_amp = sine_amp
403
- self.noise_std = add_noise_std
404
- self.is_half = is_half
405
- # to produce sine waveforms
406
- self.l_sin_gen = SineGen(
407
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
- )
409
-
410
- # to merge source harmonics into a single excitation
411
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
- self.l_tanh = torch.nn.Tanh()
413
-
414
- def forward(self, x, upp=None):
415
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
- if self.is_half:
417
- sine_wavs = sine_wavs.half()
418
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
- return sine_merge, None, None # noise, uv
420
-
421
-
422
- class GeneratorNSF(torch.nn.Module):
423
- def __init__(
424
- self,
425
- initial_channel,
426
- resblock,
427
- resblock_kernel_sizes,
428
- resblock_dilation_sizes,
429
- upsample_rates,
430
- upsample_initial_channel,
431
- upsample_kernel_sizes,
432
- gin_channels,
433
- sr,
434
- is_half=False,
435
- ):
436
- super(GeneratorNSF, self).__init__()
437
- self.num_kernels = len(resblock_kernel_sizes)
438
- self.num_upsamples = len(upsample_rates)
439
-
440
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
- self.m_source = SourceModuleHnNSF(
442
- sampling_rate=sr, harmonic_num=0, is_half=is_half
443
- )
444
- self.noise_convs = nn.ModuleList()
445
- self.conv_pre = Conv1d(
446
- initial_channel, upsample_initial_channel, 7, 1, padding=3
447
- )
448
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
-
450
- self.ups = nn.ModuleList()
451
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
- c_cur = upsample_initial_channel // (2 ** (i + 1))
453
- self.ups.append(
454
- weight_norm(
455
- ConvTranspose1d(
456
- upsample_initial_channel // (2**i),
457
- upsample_initial_channel // (2 ** (i + 1)),
458
- k,
459
- u,
460
- padding=(k - u) // 2,
461
- )
462
- )
463
- )
464
- if i + 1 < len(upsample_rates):
465
- stride_f0 = np.prod(upsample_rates[i + 1 :])
466
- self.noise_convs.append(
467
- Conv1d(
468
- 1,
469
- c_cur,
470
- kernel_size=stride_f0 * 2,
471
- stride=stride_f0,
472
- padding=stride_f0 // 2,
473
- )
474
- )
475
- else:
476
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
-
478
- self.resblocks = nn.ModuleList()
479
- for i in range(len(self.ups)):
480
- ch = upsample_initial_channel // (2 ** (i + 1))
481
- for j, (k, d) in enumerate(
482
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
- ):
484
- self.resblocks.append(resblock(ch, k, d))
485
-
486
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
- self.ups.apply(init_weights)
488
-
489
- if gin_channels != 0:
490
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
-
492
- self.upp = np.prod(upsample_rates)
493
-
494
- def forward(self, x, f0, g=None):
495
- har_source, noi_source, uv = self.m_source(f0, self.upp)
496
- har_source = har_source.transpose(1, 2)
497
- x = self.conv_pre(x)
498
- if g is not None:
499
- x = x + self.cond(g)
500
-
501
- for i in range(self.num_upsamples):
502
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
- x = self.ups[i](x)
504
- x_source = self.noise_convs[i](har_source)
505
- x = x + x_source
506
- xs = None
507
- for j in range(self.num_kernels):
508
- if xs is None:
509
- xs = self.resblocks[i * self.num_kernels + j](x)
510
- else:
511
- xs += self.resblocks[i * self.num_kernels + j](x)
512
- x = xs / self.num_kernels
513
- x = F.leaky_relu(x)
514
- x = self.conv_post(x)
515
- x = torch.tanh(x)
516
- return x
517
-
518
- def remove_weight_norm(self):
519
- for l in self.ups:
520
- remove_weight_norm(l)
521
- for l in self.resblocks:
522
- l.remove_weight_norm()
523
-
524
-
525
- sr2sr = {
526
- "32k": 32000,
527
- "40k": 40000,
528
- "48k": 48000,
529
- }
530
-
531
-
532
- class SynthesizerTrnMs256NSFsid(nn.Module):
533
- def __init__(
534
- self,
535
- spec_channels,
536
- segment_size,
537
- inter_channels,
538
- hidden_channels,
539
- filter_channels,
540
- n_heads,
541
- n_layers,
542
- kernel_size,
543
- p_dropout,
544
- resblock,
545
- resblock_kernel_sizes,
546
- resblock_dilation_sizes,
547
- upsample_rates,
548
- upsample_initial_channel,
549
- upsample_kernel_sizes,
550
- spk_embed_dim,
551
- gin_channels,
552
- sr,
553
- **kwargs
554
- ):
555
- super().__init__()
556
- if type(sr) == type("strr"):
557
- sr = sr2sr[sr]
558
- self.spec_channels = spec_channels
559
- self.inter_channels = inter_channels
560
- self.hidden_channels = hidden_channels
561
- self.filter_channels = filter_channels
562
- self.n_heads = n_heads
563
- self.n_layers = n_layers
564
- self.kernel_size = kernel_size
565
- self.p_dropout = p_dropout
566
- self.resblock = resblock
567
- self.resblock_kernel_sizes = resblock_kernel_sizes
568
- self.resblock_dilation_sizes = resblock_dilation_sizes
569
- self.upsample_rates = upsample_rates
570
- self.upsample_initial_channel = upsample_initial_channel
571
- self.upsample_kernel_sizes = upsample_kernel_sizes
572
- self.segment_size = segment_size
573
- self.gin_channels = gin_channels
574
- # self.hop_length = hop_length#
575
- self.spk_embed_dim = spk_embed_dim
576
- self.enc_p = TextEncoder256(
577
- inter_channels,
578
- hidden_channels,
579
- filter_channels,
580
- n_heads,
581
- n_layers,
582
- kernel_size,
583
- p_dropout,
584
- )
585
- self.dec = GeneratorNSF(
586
- inter_channels,
587
- resblock,
588
- resblock_kernel_sizes,
589
- resblock_dilation_sizes,
590
- upsample_rates,
591
- upsample_initial_channel,
592
- upsample_kernel_sizes,
593
- gin_channels=gin_channels,
594
- sr=sr,
595
- is_half=kwargs["is_half"],
596
- )
597
- self.enc_q = PosteriorEncoder(
598
- spec_channels,
599
- inter_channels,
600
- hidden_channels,
601
- 5,
602
- 1,
603
- 16,
604
- gin_channels=gin_channels,
605
- )
606
- self.flow = ResidualCouplingBlock(
607
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
- )
609
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
-
612
- def remove_weight_norm(self):
613
- self.dec.remove_weight_norm()
614
- self.flow.remove_weight_norm()
615
- self.enc_q.remove_weight_norm()
616
-
617
- def forward(
618
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
- ): # 这里ds是id,[bs,1]
620
- # print(1,pitch.shape)#[bs,t]
621
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
- z_p = self.flow(z, y_mask, g=g)
625
- z_slice, ids_slice = commons.rand_slice_segments(
626
- z, y_lengths, self.segment_size
627
- )
628
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
- # print(-2,pitchf.shape,z_slice.shape)
631
- o = self.dec(z_slice, pitchf, g=g)
632
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
-
634
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
635
- g = self.emb_g(sid).unsqueeze(-1)
636
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
- z = self.flow(z_p, x_mask, g=g, reverse=True)
639
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
640
- return o, x_mask, (z, z_p, m_p, logs_p)
641
-
642
-
643
- class SynthesizerTrnMs768NSFsid(nn.Module):
644
- def __init__(
645
- self,
646
- spec_channels,
647
- segment_size,
648
- inter_channels,
649
- hidden_channels,
650
- filter_channels,
651
- n_heads,
652
- n_layers,
653
- kernel_size,
654
- p_dropout,
655
- resblock,
656
- resblock_kernel_sizes,
657
- resblock_dilation_sizes,
658
- upsample_rates,
659
- upsample_initial_channel,
660
- upsample_kernel_sizes,
661
- spk_embed_dim,
662
- gin_channels,
663
- sr,
664
- **kwargs
665
- ):
666
- super().__init__()
667
- if type(sr) == type("strr"):
668
- sr = sr2sr[sr]
669
- self.spec_channels = spec_channels
670
- self.inter_channels = inter_channels
671
- self.hidden_channels = hidden_channels
672
- self.filter_channels = filter_channels
673
- self.n_heads = n_heads
674
- self.n_layers = n_layers
675
- self.kernel_size = kernel_size
676
- self.p_dropout = p_dropout
677
- self.resblock = resblock
678
- self.resblock_kernel_sizes = resblock_kernel_sizes
679
- self.resblock_dilation_sizes = resblock_dilation_sizes
680
- self.upsample_rates = upsample_rates
681
- self.upsample_initial_channel = upsample_initial_channel
682
- self.upsample_kernel_sizes = upsample_kernel_sizes
683
- self.segment_size = segment_size
684
- self.gin_channels = gin_channels
685
- # self.hop_length = hop_length#
686
- self.spk_embed_dim = spk_embed_dim
687
- self.enc_p = TextEncoder768(
688
- inter_channels,
689
- hidden_channels,
690
- filter_channels,
691
- n_heads,
692
- n_layers,
693
- kernel_size,
694
- p_dropout,
695
- )
696
- self.dec = GeneratorNSF(
697
- inter_channels,
698
- resblock,
699
- resblock_kernel_sizes,
700
- resblock_dilation_sizes,
701
- upsample_rates,
702
- upsample_initial_channel,
703
- upsample_kernel_sizes,
704
- gin_channels=gin_channels,
705
- sr=sr,
706
- is_half=kwargs["is_half"],
707
- )
708
- self.enc_q = PosteriorEncoder(
709
- spec_channels,
710
- inter_channels,
711
- hidden_channels,
712
- 5,
713
- 1,
714
- 16,
715
- gin_channels=gin_channels,
716
- )
717
- self.flow = ResidualCouplingBlock(
718
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
719
- )
720
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
721
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
722
-
723
- def remove_weight_norm(self):
724
- self.dec.remove_weight_norm()
725
- self.flow.remove_weight_norm()
726
- self.enc_q.remove_weight_norm()
727
-
728
- def forward(
729
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
730
- ): # 这里ds是id,[bs,1]
731
- # print(1,pitch.shape)#[bs,t]
732
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
733
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
734
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
735
- z_p = self.flow(z, y_mask, g=g)
736
- z_slice, ids_slice = commons.rand_slice_segments(
737
- z, y_lengths, self.segment_size
738
- )
739
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
740
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
741
- # print(-2,pitchf.shape,z_slice.shape)
742
- o = self.dec(z_slice, pitchf, g=g)
743
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
744
-
745
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
746
- g = self.emb_g(sid).unsqueeze(-1)
747
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
748
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
749
- z = self.flow(z_p, x_mask, g=g, reverse=True)
750
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
751
- return o, x_mask, (z, z_p, m_p, logs_p)
752
-
753
-
754
- class SynthesizerTrnMs256NSFsid_nono(nn.Module):
755
- def __init__(
756
- self,
757
- spec_channels,
758
- segment_size,
759
- inter_channels,
760
- hidden_channels,
761
- filter_channels,
762
- n_heads,
763
- n_layers,
764
- kernel_size,
765
- p_dropout,
766
- resblock,
767
- resblock_kernel_sizes,
768
- resblock_dilation_sizes,
769
- upsample_rates,
770
- upsample_initial_channel,
771
- upsample_kernel_sizes,
772
- spk_embed_dim,
773
- gin_channels,
774
- sr=None,
775
- **kwargs
776
- ):
777
- super().__init__()
778
- self.spec_channels = spec_channels
779
- self.inter_channels = inter_channels
780
- self.hidden_channels = hidden_channels
781
- self.filter_channels = filter_channels
782
- self.n_heads = n_heads
783
- self.n_layers = n_layers
784
- self.kernel_size = kernel_size
785
- self.p_dropout = p_dropout
786
- self.resblock = resblock
787
- self.resblock_kernel_sizes = resblock_kernel_sizes
788
- self.resblock_dilation_sizes = resblock_dilation_sizes
789
- self.upsample_rates = upsample_rates
790
- self.upsample_initial_channel = upsample_initial_channel
791
- self.upsample_kernel_sizes = upsample_kernel_sizes
792
- self.segment_size = segment_size
793
- self.gin_channels = gin_channels
794
- # self.hop_length = hop_length#
795
- self.spk_embed_dim = spk_embed_dim
796
- self.enc_p = TextEncoder256(
797
- inter_channels,
798
- hidden_channels,
799
- filter_channels,
800
- n_heads,
801
- n_layers,
802
- kernel_size,
803
- p_dropout,
804
- f0=False,
805
- )
806
- self.dec = Generator(
807
- inter_channels,
808
- resblock,
809
- resblock_kernel_sizes,
810
- resblock_dilation_sizes,
811
- upsample_rates,
812
- upsample_initial_channel,
813
- upsample_kernel_sizes,
814
- gin_channels=gin_channels,
815
- )
816
- self.enc_q = PosteriorEncoder(
817
- spec_channels,
818
- inter_channels,
819
- hidden_channels,
820
- 5,
821
- 1,
822
- 16,
823
- gin_channels=gin_channels,
824
- )
825
- self.flow = ResidualCouplingBlock(
826
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
827
- )
828
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
829
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
830
-
831
- def remove_weight_norm(self):
832
- self.dec.remove_weight_norm()
833
- self.flow.remove_weight_norm()
834
- self.enc_q.remove_weight_norm()
835
-
836
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
837
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
838
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
839
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
840
- z_p = self.flow(z, y_mask, g=g)
841
- z_slice, ids_slice = commons.rand_slice_segments(
842
- z, y_lengths, self.segment_size
843
- )
844
- o = self.dec(z_slice, g=g)
845
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
846
-
847
- def infer(self, phone, phone_lengths, sid, max_len=None):
848
- g = self.emb_g(sid).unsqueeze(-1)
849
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
850
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
851
- z = self.flow(z_p, x_mask, g=g, reverse=True)
852
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
853
- return o, x_mask, (z, z_p, m_p, logs_p)
854
-
855
-
856
- class SynthesizerTrnMs768NSFsid_nono(nn.Module):
857
- def __init__(
858
- self,
859
- spec_channels,
860
- segment_size,
861
- inter_channels,
862
- hidden_channels,
863
- filter_channels,
864
- n_heads,
865
- n_layers,
866
- kernel_size,
867
- p_dropout,
868
- resblock,
869
- resblock_kernel_sizes,
870
- resblock_dilation_sizes,
871
- upsample_rates,
872
- upsample_initial_channel,
873
- upsample_kernel_sizes,
874
- spk_embed_dim,
875
- gin_channels,
876
- sr=None,
877
- **kwargs
878
- ):
879
- super().__init__()
880
- self.spec_channels = spec_channels
881
- self.inter_channels = inter_channels
882
- self.hidden_channels = hidden_channels
883
- self.filter_channels = filter_channels
884
- self.n_heads = n_heads
885
- self.n_layers = n_layers
886
- self.kernel_size = kernel_size
887
- self.p_dropout = p_dropout
888
- self.resblock = resblock
889
- self.resblock_kernel_sizes = resblock_kernel_sizes
890
- self.resblock_dilation_sizes = resblock_dilation_sizes
891
- self.upsample_rates = upsample_rates
892
- self.upsample_initial_channel = upsample_initial_channel
893
- self.upsample_kernel_sizes = upsample_kernel_sizes
894
- self.segment_size = segment_size
895
- self.gin_channels = gin_channels
896
- # self.hop_length = hop_length#
897
- self.spk_embed_dim = spk_embed_dim
898
- self.enc_p = TextEncoder768(
899
- inter_channels,
900
- hidden_channels,
901
- filter_channels,
902
- n_heads,
903
- n_layers,
904
- kernel_size,
905
- p_dropout,
906
- f0=False,
907
- )
908
- self.dec = Generator(
909
- inter_channels,
910
- resblock,
911
- resblock_kernel_sizes,
912
- resblock_dilation_sizes,
913
- upsample_rates,
914
- upsample_initial_channel,
915
- upsample_kernel_sizes,
916
- gin_channels=gin_channels,
917
- )
918
- self.enc_q = PosteriorEncoder(
919
- spec_channels,
920
- inter_channels,
921
- hidden_channels,
922
- 5,
923
- 1,
924
- 16,
925
- gin_channels=gin_channels,
926
- )
927
- self.flow = ResidualCouplingBlock(
928
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
929
- )
930
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
931
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
932
-
933
- def remove_weight_norm(self):
934
- self.dec.remove_weight_norm()
935
- self.flow.remove_weight_norm()
936
- self.enc_q.remove_weight_norm()
937
-
938
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
939
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
940
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
941
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
942
- z_p = self.flow(z, y_mask, g=g)
943
- z_slice, ids_slice = commons.rand_slice_segments(
944
- z, y_lengths, self.segment_size
945
- )
946
- o = self.dec(z_slice, g=g)
947
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
948
-
949
- def infer(self, phone, phone_lengths, sid, max_len=None):
950
- g = self.emb_g(sid).unsqueeze(-1)
951
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
952
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
953
- z = self.flow(z_p, x_mask, g=g, reverse=True)
954
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
955
- return o, x_mask, (z, z_p, m_p, logs_p)
956
-
957
-
958
- class MultiPeriodDiscriminator(torch.nn.Module):
959
- def __init__(self, use_spectral_norm=False):
960
- super(MultiPeriodDiscriminator, self).__init__()
961
- periods = [2, 3, 5, 7, 11, 17]
962
- # periods = [3, 5, 7, 11, 17, 23, 37]
963
-
964
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
965
- discs = discs + [
966
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
967
- ]
968
- self.discriminators = nn.ModuleList(discs)
969
-
970
- def forward(self, y, y_hat):
971
- y_d_rs = [] #
972
- y_d_gs = []
973
- fmap_rs = []
974
- fmap_gs = []
975
- for i, d in enumerate(self.discriminators):
976
- y_d_r, fmap_r = d(y)
977
- y_d_g, fmap_g = d(y_hat)
978
- # for j in range(len(fmap_r)):
979
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
980
- y_d_rs.append(y_d_r)
981
- y_d_gs.append(y_d_g)
982
- fmap_rs.append(fmap_r)
983
- fmap_gs.append(fmap_g)
984
-
985
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
986
-
987
-
988
- class MultiPeriodDiscriminatorV2(torch.nn.Module):
989
- def __init__(self, use_spectral_norm=False):
990
- super(MultiPeriodDiscriminatorV2, self).__init__()
991
- # periods = [2, 3, 5, 7, 11, 17]
992
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
993
-
994
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
995
- discs = discs + [
996
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
997
- ]
998
- self.discriminators = nn.ModuleList(discs)
999
-
1000
- def forward(self, y, y_hat):
1001
- y_d_rs = [] #
1002
- y_d_gs = []
1003
- fmap_rs = []
1004
- fmap_gs = []
1005
- for i, d in enumerate(self.discriminators):
1006
- y_d_r, fmap_r = d(y)
1007
- y_d_g, fmap_g = d(y_hat)
1008
- # for j in range(len(fmap_r)):
1009
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1010
- y_d_rs.append(y_d_r)
1011
- y_d_gs.append(y_d_g)
1012
- fmap_rs.append(fmap_r)
1013
- fmap_gs.append(fmap_g)
1014
-
1015
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1016
-
1017
-
1018
- class DiscriminatorS(torch.nn.Module):
1019
- def __init__(self, use_spectral_norm=False):
1020
- super(DiscriminatorS, self).__init__()
1021
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1022
- self.convs = nn.ModuleList(
1023
- [
1024
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1025
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1026
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1027
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1028
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1029
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1030
- ]
1031
- )
1032
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1033
-
1034
- def forward(self, x):
1035
- fmap = []
1036
-
1037
- for l in self.convs:
1038
- x = l(x)
1039
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1040
- fmap.append(x)
1041
- x = self.conv_post(x)
1042
- fmap.append(x)
1043
- x = torch.flatten(x, 1, -1)
1044
-
1045
- return x, fmap
1046
-
1047
-
1048
- class DiscriminatorP(torch.nn.Module):
1049
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1050
- super(DiscriminatorP, self).__init__()
1051
- self.period = period
1052
- self.use_spectral_norm = use_spectral_norm
1053
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1054
- self.convs = nn.ModuleList(
1055
- [
1056
- norm_f(
1057
- Conv2d(
1058
- 1,
1059
- 32,
1060
- (kernel_size, 1),
1061
- (stride, 1),
1062
- padding=(get_padding(kernel_size, 1), 0),
1063
- )
1064
- ),
1065
- norm_f(
1066
- Conv2d(
1067
- 32,
1068
- 128,
1069
- (kernel_size, 1),
1070
- (stride, 1),
1071
- padding=(get_padding(kernel_size, 1), 0),
1072
- )
1073
- ),
1074
- norm_f(
1075
- Conv2d(
1076
- 128,
1077
- 512,
1078
- (kernel_size, 1),
1079
- (stride, 1),
1080
- padding=(get_padding(kernel_size, 1), 0),
1081
- )
1082
- ),
1083
- norm_f(
1084
- Conv2d(
1085
- 512,
1086
- 1024,
1087
- (kernel_size, 1),
1088
- (stride, 1),
1089
- padding=(get_padding(kernel_size, 1), 0),
1090
- )
1091
- ),
1092
- norm_f(
1093
- Conv2d(
1094
- 1024,
1095
- 1024,
1096
- (kernel_size, 1),
1097
- 1,
1098
- padding=(get_padding(kernel_size, 1), 0),
1099
- )
1100
- ),
1101
- ]
1102
- )
1103
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1104
-
1105
- def forward(self, x):
1106
- fmap = []
1107
-
1108
- # 1d to 2d
1109
- b, c, t = x.shape
1110
- if t % self.period != 0: # pad first
1111
- n_pad = self.period - (t % self.period)
1112
- x = F.pad(x, (0, n_pad), "reflect")
1113
- t = t + n_pad
1114
- x = x.view(b, c, t // self.period, self.period)
1115
-
1116
- for l in self.convs:
1117
- x = l(x)
1118
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1119
- fmap.append(x)
1120
- x = self.conv_post(x)
1121
- fmap.append(x)
1122
- x = torch.flatten(x, 1, -1)
1123
-
1124
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/extern/__init__.py DELETED
@@ -1,76 +0,0 @@
1
- import importlib.util
2
- import sys
3
-
4
-
5
- class VendorImporter:
6
- """
7
- A PEP 302 meta path importer for finding optionally-vendored
8
- or otherwise naturally-installed packages from root_name.
9
- """
10
-
11
- def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
12
- self.root_name = root_name
13
- self.vendored_names = set(vendored_names)
14
- self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
15
-
16
- @property
17
- def search_path(self):
18
- """
19
- Search first the vendor package then as a natural package.
20
- """
21
- yield self.vendor_pkg + '.'
22
- yield ''
23
-
24
- def _module_matches_namespace(self, fullname):
25
- """Figure out if the target module is vendored."""
26
- root, base, target = fullname.partition(self.root_name + '.')
27
- return not root and any(map(target.startswith, self.vendored_names))
28
-
29
- def load_module(self, fullname):
30
- """
31
- Iterate over the search path to locate and load fullname.
32
- """
33
- root, base, target = fullname.partition(self.root_name + '.')
34
- for prefix in self.search_path:
35
- try:
36
- extant = prefix + target
37
- __import__(extant)
38
- mod = sys.modules[extant]
39
- sys.modules[fullname] = mod
40
- return mod
41
- except ImportError:
42
- pass
43
- else:
44
- raise ImportError(
45
- "The '{target}' package is required; "
46
- "normally this is bundled with this package so if you get "
47
- "this warning, consult the packager of your "
48
- "distribution.".format(**locals())
49
- )
50
-
51
- def create_module(self, spec):
52
- return self.load_module(spec.name)
53
-
54
- def exec_module(self, module):
55
- pass
56
-
57
- def find_spec(self, fullname, path=None, target=None):
58
- """Return a module spec for vendored names."""
59
- return (
60
- importlib.util.spec_from_loader(fullname, self)
61
- if self._module_matches_namespace(fullname) else None
62
- )
63
-
64
- def install(self):
65
- """
66
- Install this importer into sys.meta_path if not already present.
67
- """
68
- if self not in sys.meta_path:
69
- sys.meta_path.append(self)
70
-
71
-
72
- names = (
73
- 'packaging', 'pyparsing', 'appdirs', 'jaraco', 'importlib_resources',
74
- 'more_itertools',
75
- )
76
- VendorImporter(__name__, names).install()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/__init__.py DELETED
@@ -1,29 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead
3
- from .keypoint_head import (
4
- ROI_KEYPOINT_HEAD_REGISTRY,
5
- build_keypoint_head,
6
- BaseKeypointRCNNHead,
7
- KRCNNConvDeconvUpsampleHead,
8
- )
9
- from .mask_head import (
10
- ROI_MASK_HEAD_REGISTRY,
11
- build_mask_head,
12
- BaseMaskRCNNHead,
13
- MaskRCNNConvUpsampleHead,
14
- )
15
- from .roi_heads import (
16
- ROI_HEADS_REGISTRY,
17
- ROIHeads,
18
- Res5ROIHeads,
19
- StandardROIHeads,
20
- build_roi_heads,
21
- select_foreground_proposals,
22
- )
23
- from .cascade_rcnn import CascadeROIHeads
24
- from .rotated_fast_rcnn import RROIHeads
25
- from .fast_rcnn import FastRCNNOutputLayers
26
-
27
- from . import cascade_rcnn # isort:skip
28
-
29
- __all__ = list(globals().keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/B-patents/patent-bert/app.py DELETED
@@ -1,351 +0,0 @@
1
- import gradio as gr
2
- import numpy as np
3
- import torch
4
- import re
5
- from nltk.stem import PorterStemmer
6
- from collections import defaultdict
7
- from transformers import pipeline
8
- from transformers.pipelines import PIPELINE_REGISTRY, FillMaskPipeline
9
- from transformers import AutoModelForMaskedLM
10
-
11
- ex_str1 = "A crustless sandwich made from two slices of baked bread. The sandwich includes first and second matching " \
12
- "crustless bread pieces. The bread pieces have the same general outer shape defined by an outer periphery " \
13
- "with central portions surrounded by an outer peripheral area, the bread pieces being at least partially " \
14
- "crimped together at the outer peripheral area."
15
- ex_key1 = "sandwich bread crimped"
16
-
17
- ex_str2 = "The present disclosure provides a DNA-targeting RNA that comprises a targeting sequence and, together with" \
18
- " a modifying polypeptide, provides for site-specific modification of a target DNA and/or a polypeptide" \
19
- " associated with the target DNA. "
20
- ex_key2 = "DNA target modification"
21
-
22
- ex_str3 = "The graphite plane is composed of a two-dimensional hexagonal lattice of carbon atoms and the plate has a " \
23
- "length and a width parallel to the graphite plane and a thickness orthogonal to the graphite plane with at " \
24
- "least one of the length, width, and thickness values being 100 nanometers or smaller. "
25
- ex_key3 = "graphite lattice orthogonal "
26
-
27
- tab_two_examples = [[ex_str1, ex_key1],
28
- [ex_str2, ex_key2],
29
- [ex_str3, ex_key3]]
30
- #
31
- # tab_one_examples = [['A crustless _ made from two slices of baked bread.'],
32
- # ['The present disclosure provides a DNA-targeting RNA that comprises a targeting _.'],
33
- # ['The _ plane is composed of a two-dimensional hexagonal lattice of carbon atoms.']
34
- # ]
35
-
36
- ignore_str = ['a', 'an', 'the', 'is', 'and', 'or', '!', '(', ')', '-', '[', ']', '{', '}', ';', ':', "'", '"', '\\',
37
- ',', '<', '>', '.', '/', '?', '@', '#', '$', '%', '^', '&', '*', '_', '~']
38
-
39
-
40
- def add_mask(text, lower_bound=0, index=None):
41
- split_text = text.split()
42
- if index is not None:
43
- split_text[index] = '[MASK]'
44
- return ' '.join(split_text), None
45
- # If the user supplies a mask, don't add more
46
- if '_' in split_text:
47
- u_pos = [i for i, s in enumerate(split_text) if '_' in s][0]
48
- split_text[u_pos] = '[MASK]'
49
- return ' '.join(split_text), '[MASK]'
50
-
51
- idx = np.random.randint(low=lower_bound, high=len(split_text), size=1).astype(int)[0]
52
- # Don't mask certain words
53
- num_iters = 0
54
- while split_text[idx].lower() in ignore_str:
55
- num_iters += 1
56
- idx = np.random.randint(len(split_text), size=1).astype(int)[0]
57
- if num_iters > 10:
58
- break
59
-
60
- masked_string = split_text[idx]
61
- split_text[idx] = '[MASK]'
62
- masked_output = ' '.join(split_text)
63
- return masked_output, masked_string
64
-
65
-
66
- class TempScalePipe(FillMaskPipeline):
67
- def _sanitize_parameters(self, top_k=None, targets=None, temp=None):
68
- postprocess_params = {}
69
-
70
- if targets is not None:
71
- target_ids = self.get_target_ids(targets, top_k)
72
- postprocess_params["target_ids"] = target_ids
73
-
74
- if top_k is not None:
75
- postprocess_params["top_k"] = top_k
76
-
77
- if temp is not None:
78
- postprocess_params["temp"] = temp
79
- return {}, {}, postprocess_params
80
-
81
-
82
- def __call__(self, inputs, *args, **kwargs):
83
- """
84
- Fill the masked token in the text(s) given as inputs.
85
-
86
- Args:
87
- args (`str` or `List[str]`):
88
- One or several texts (or one list of prompts) with masked tokens.
89
- targets (`str` or `List[str]`, *optional*):
90
- When passed, the model will limit the scores to the passed targets instead of looking up in the whole
91
- vocab. If the provided targets are not in the model vocab, they will be tokenized and the first
92
- resulting token will be used (with a warning, and that might be slower).
93
- top_k (`int`, *optional*):
94
- When passed, overrides the number of predictions to return.
95
-
96
- Return:
97
- A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
98
-
99
- - **sequence** (`str`) -- The corresponding input with the mask token prediction.
100
- - **score** (`float`) -- The corresponding probability.
101
- - **token** (`int`) -- The predicted token id (to replace the masked one).
102
- - **token** (`str`) -- The predicted token (to replace the masked one).
103
- """
104
- outputs = super().__call__(inputs, **kwargs)
105
- if isinstance(inputs, list) and len(inputs) == 1:
106
- return outputs[0]
107
- return outputs
108
-
109
- def postprocess(self, model_outputs, top_k=10, target_ids=None, temp=1):
110
- # Cap top_k if there are targets
111
- if target_ids is not None and target_ids.shape[0] < top_k:
112
- top_k = target_ids.shape[0]
113
- input_ids = model_outputs["input_ids"][0]
114
- outputs = model_outputs["logits"]
115
-
116
- masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False).squeeze(-1)
117
- # Fill mask pipeline supports only one ${mask_token} per sample
118
-
119
- logits = outputs[0, masked_index, :] / temp
120
- probs = logits.softmax(dim=-1)
121
- sampling = False
122
- if sampling:
123
- predictions = torch.multinomial(probs, num_samples=3)
124
- values = probs[0, predictions]
125
- if target_ids is not None:
126
- probs = probs[..., target_ids]
127
- if not sampling:
128
- values, predictions = probs.topk(top_k)
129
-
130
- result = []
131
- single_mask = values.shape[0] == 1
132
- for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())):
133
- row = []
134
- for v, p in zip(_values, _predictions):
135
- # Copy is important since we're going to modify this array in place
136
- tokens = input_ids.numpy().copy()
137
- if target_ids is not None:
138
- p = target_ids[p].tolist()
139
-
140
- tokens[masked_index[i]] = p
141
- # Filter padding out:
142
- tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
143
- # Originally we skip special tokens to give readable output.
144
- # For multi masks though, the other [MASK] would be removed otherwise
145
- # making the output look odd, so we add them back
146
- sequence = self.tokenizer.decode(tokens, skip_special_tokens=single_mask)
147
- proposition = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence}
148
- row.append(proposition)
149
- result.append(row)
150
- if single_mask:
151
- return result[0]
152
- return result
153
-
154
-
155
- PIPELINE_REGISTRY.register_pipeline(
156
- "temp-scale",
157
- pipeline_class=TempScalePipe,
158
- pt_model=AutoModelForMaskedLM,
159
- )
160
- scrambler = pipeline("temp-scale", model="anferico/bert-for-patents")
161
-
162
- generator = pipeline('text-generation', model='gpt2')
163
-
164
- def sample_output(out, sampling):
165
- score_to_str = {out[k]: k for k in out.keys()}
166
- score_list = list(score_to_str.keys())
167
- if sampling == 'multi':
168
- idx = np.argmax(np.random.multinomial(1, score_list, 1))
169
- else:
170
- idx = np.random.randint(0, len(score_list))
171
- score = score_list[idx]
172
- return score_to_str[score]
173
-
174
-
175
- def unmask_single(text, temp=1):
176
- masked_text, _ = add_mask(text)
177
- res = scrambler(masked_text, temp=temp, top_k=10)
178
- out = {item["token_str"]: item["score"] for item in res}
179
- return out
180
-
181
-
182
- def unmask(text, temp, rounds, lower_bound=0):
183
- sampling = 'multi'
184
- for _ in range(rounds):
185
- masked_text, masked = add_mask(text, lower_bound)
186
- split_text = masked_text.split()
187
- res = scrambler(masked_text, temp=temp, top_k=15)
188
- mask_pos = [i for i, t in enumerate(split_text) if 'MASK' in t][0]
189
- out = {item["token_str"]: item["score"] for item in res}
190
- new_token = sample_output(out, sampling)
191
- unsuccessful_iters = 0
192
- while masked in new_token:
193
- if unsuccessful_iters > 5:
194
- break
195
- print('skipped', new_token)
196
- new_token = sample_output(out, sampling=sampling)
197
- unsuccessful_iters += 1
198
- if masked in new_token:
199
- split_text[mask_pos] = new_token
200
- else:
201
- split_text[mask_pos] = '*' + new_token + '*'
202
- text = ' '.join(split_text)
203
-
204
- text = list(text)
205
- text[0] = text[0].upper()
206
- return ''.join(text)
207
-
208
-
209
- def autocomplete(text, temp):
210
- output = generator(text, max_length=30, num_return_sequences=1)
211
- gpt_out = output[0]['generated_text']
212
- # diff = gpt_out.replace(text, '')
213
- patent_bert_out = unmask(gpt_out, temp=temp, rounds=5, lower_bound=len(text.split()))
214
- # Take the output from gpt-2 and randomly mask, if a mask is confident, swap it in. Iterate 5 times
215
- return patent_bert_out
216
-
217
-
218
- def extract_keywords(text, queries):
219
- q_dict = {}
220
- temp = 1 # set temperature to 1
221
- for query in queries.split():
222
- # Iterate through text and mask each token
223
- ps = PorterStemmer()
224
- top_scores = defaultdict(list)
225
- top_k_range = 30
226
- text_no_punc = re.sub(r'[^\w\s]', '', text)
227
- indices = [i for i, t in enumerate(text_no_punc.split()) if t.lower() == query.lower()]
228
- for i in indices:
229
- masked_text, masked = add_mask(text, index=i)
230
- res = scrambler(masked_text, temp=temp, top_k=top_k_range)
231
- out = {item["token_str"]: item["score"] for item in res}
232
- sorted_keys = sorted(out, key=out.get)
233
- # If the key does not appear, floor its rank for that round
234
- for rank, token_str in enumerate(sorted_keys):
235
- if token_str in ignore_str:
236
- continue
237
- stemmed = ps.stem(token_str)
238
- norm_rank = rank / top_k_range
239
- top_scores[stemmed].append(norm_rank)
240
- for key in top_scores.keys():
241
- if key not in out.keys():
242
- top_scores[key].append(0)
243
- # Calc mean
244
- for key in top_scores.keys():
245
- top_scores[key] = np.mean(top_scores[key])
246
- # Normalize
247
- for key in top_scores.keys():
248
- top_scores[key] = top_scores[key] / np.sum(list(top_scores.values()))
249
- # Get top_k
250
- top_n = sorted(list(top_scores.values()))[-3]
251
- for key in list(top_scores.keys()):
252
- if top_scores[key] < top_n:
253
- del top_scores[key]
254
- q_dict[query] = top_scores
255
-
256
- keywords = ''
257
- for i, q in enumerate(q_dict.keys()):
258
- keywords += '['
259
- for ii, k in enumerate(q_dict[q].keys()):
260
- keywords += k
261
- if ii < len(q_dict[q].keys()) - 1:
262
- keywords += ' OR '
263
- else:
264
- keywords += ']'
265
- if i < len(q_dict.keys()) - 1:
266
- keywords += ' AND '
267
- # keywords = set([k for q in q_dict.keys() for k in q_dict[q].keys()])
268
- # search_str = ' OR '.join(keywords)
269
- output = [q_dict[q] for q in q_dict]
270
- output.append(keywords)
271
- return output
272
- # fig, ax = plt.subplots(nrows=1, ncols=3)
273
- # for q in q_dict:
274
- # ax.bar(q_dict[q])
275
- # return fig
276
-
277
- label0 = gr.Label(label='keyword 1', num_top_classes=3)
278
- label01 = gr.Label(label='keyword 2', num_top_classes=3)
279
- label02 = gr.Label(label='keyword 3', num_top_classes=3)
280
- textbox02 = gr.Textbox(label="Input Keywords", lines=3)
281
- textbox01 = gr.Textbox(label="Input Keywords", placeholder="Type keywords here", lines=1)
282
- textbox0 = gr.Textbox(label="Input Sentences", placeholder="Type sentences here", lines=5)
283
-
284
- output_textbox0 = gr.Textbox(label='Search String of Keywords', placeholder="Output will appear here", lines=4)
285
- # temp_slider0 = gr.Slider(1.0, 3.0, value=1.0, label='Creativity')
286
-
287
- textbox1 = gr.Textbox(label="Input Sentence", lines=5)
288
- # output_textbox1 = gr.Textbox(placeholder="Output will appear here", lines=4)
289
- title1 = "Patent-BERT: Context-Dependent Synonym Generator"
290
- description1 = """<p>
291
- Try inserting a few sentences from a patent, and pick keywords for the model to analyze. The model will analyze the
292
- context of the keywords in the sentences and generate the top three most likely candidates for each word.
293
- This can be used for more creative patent drafting or patent searches using the generated search string. The base model is
294
- <a href= "https://github.com/google/patents-public-data/blob/master/models/BERT%20for%20Patents.md">Patent BERT</a> created and trained by Google.
295
-
296
- <strong>Note:</strong> Current pipeline only allows for <strong>three</strong> keyword submissions. Stemming (e.g., altering -> alter) is built into the output for
297
- broader search string. <br/>
298
-
299
- Beta features (currently work-in-progress) include: (<strong>A</strong>) adjustment options for (i) the number of keywords, (ii) the number of context-dependent synonyms,
300
- and (iii) a 'creativity' parameter of the model; (<strong>B</strong>) analysis of where these words appear in the patent (e.g.,
301
- claim, summary, etc.); and (<strong>C</strong>) a stemming option for input keywords.
302
- <br/>
303
- <p/>"""
304
-
305
- # textbox2 = gr.Textbox(label="Input Sentences", lines=5)
306
- # output_textbox2 = gr.Textbox(placeholder="Output will appear here", lines=4)
307
- # temp_slider2 = gr.Slider(1.0, 3.0, value=1.0, label='Creativity')
308
- # edit_slider2 = gr.Slider(1, 20, step=1, value=1.0, label='Number of edits')
309
-
310
-
311
- # title2 = "Patent-BERT Sentence Remix-er: Multiple Edits"
312
- # description2 = """<p>
313
- #
314
- # Try typing in a sentence for the model to remix. Adjust the 'creativity' scale bar to change the
315
- # the model's confidence in its likely substitutions and the 'number of edits' for the number of edits you want
316
- # the model to attempt to make. The words substituted in the output sentence will be enclosed in asterisks (e.g., *word*).
317
- # <br/> <p/> """
318
-
319
- demo0 = gr.Interface(
320
- fn=extract_keywords,
321
- inputs=[textbox0, textbox01],
322
- outputs=[label0, label01, label02, output_textbox0],
323
- examples=tab_two_examples,
324
- allow_flagging='never',
325
- title=title1,
326
- description=description1
327
- )
328
-
329
- # demo1 = gr.Interface(
330
- # fn=unmask_single,
331
- # inputs=[textbox1],
332
- # outputs='label',
333
- # examples=tab_one_examples,
334
- # allow_flagging='never',
335
- # title=title1,
336
- # description=description1
337
- # )
338
-
339
- # demo2 = gr.Interface(
340
- # fn=unmask,
341
- # inputs=[textbox2, temp_slider2, edit_slider2],
342
- # outputs=[output_textbox2],
343
- # examples=tab_two_examples,
344
- # allow_flagging='never',
345
- # title=title2,
346
- # description=description2
347
- # )
348
-
349
- gr.TabbedInterface(
350
- [demo0], ["Keyword generator"]
351
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/queries/predict.ts DELETED
@@ -1,9 +0,0 @@
1
- "use server"
2
-
3
- import { LLMEngine } from "@/types"
4
- import { predict as predictWithHuggingFace } from "./predictWithHuggingFace"
5
- import { predict as predictWithOpenAI } from "./predictWithOpenAI"
6
-
7
- const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
8
-
9
- export const predict = llmEngine === "OPENAI" ? predictWithOpenAI : predictWithHuggingFace
 
 
 
 
 
 
 
 
 
 
spaces/Banjoo/What_The_Bun/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: What The Bun
3
- emoji: 🏢
4
- colorFrom: purple
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apk Club Gacha Para Porttil.md DELETED
@@ -1,106 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar y jugar Gacha Club en su ordenador portátil</h1>
3
- <p>¿Te encanta crear tus propios personajes y escenas al estilo anime? ¿Te gusta jugar juegos de rol casual con gráficos lindos y juego divertido? Si respondiste afirmativamente a estas preguntas, entonces es posible que quieras echar un vistazo a <strong>Gacha Club</strong>, un juego popular de Lunime que te permite hacer todo eso y mucho más! </p>
4
- <h2>apk club gacha para portátil</h2><br /><p><b><b>Download Zip</b> &rarr;&rarr;&rarr; <a href="https://bltlly.com/2v6JAh">https://bltlly.com/2v6JAh</a></b></p><br /><br />
5
- <p>En este artículo, le diremos qué es Gacha Club, por qué debería reproducirlo en su computadora portátil y cómo descargarlo e instalarlo usando un emulador. También te daremos algunos consejos y trucos para jugar Gacha Club en tu portátil. ¡Así que empecemos! </p>
6
- <h2>¿Qué es el Club Gacha? </h2>
7
- <h3>Un juego casual de Lunime</h3>
8
- <p>Gacha Club es un juego casual desarrollado por Lunime, un estudio que se especializa en hacer juegos con gráficos de estilo anime y mecánica gacha. Gacha es un término que se refiere a un tipo de juego en el que puedes recoger varios objetos o personajes usando moneda del juego o dinero real. </p>
9
- <p>Gacha Club es el último juego de la serie Gacha, que incluye otros títulos populares como Gacha Life, Gachaverse y Gacha Studio. Fue lanzado en junio de 2020 para dispositivos Android, y más tarde para Windows, iOS y navegadores. </p>
10
- <p></p>
11
- <h3>Características de Gacha Club</h3>
12
- <p>Gacha Club tiene muchas características que lo convierten en un juego divertido y creativo para cualquiera que ame el anime y el gacha. Aquí están algunas de ellas:</p>
13
- <ul>
14
- <li>Puedes crear tus propios personajes de estilo anime y vestirlos en miles de trajes, peinados, armas, accesorios y más. También puedes personalizar sus colores, expresiones, poses, fondos, etc.</li>
15
- <li>Puedes entrar en el modo Studio y crear cualquier escena que puedas imaginar con tus personajes. También puede agregar cuadros de texto, mascotas, elementos, efectos y más para hacer sus escenas más animadas. </li>
16
- <li>Puedes unirte a varios clubes y conocer a otros personajes con diferentes personalidades e historias. También puedes chatear con ellos y aprender más sobre sus vidas. </li>
17
-
18
- <li>Puedes jugar varios minijuegos como Mascot Whack, Usagi vs Neko, Memory Match, Lemo & Lime’s Adventure, Duck & Dodge, Phantom’s Remix y más. También puedes ganar gemas y bytes jugando estos minijuegos. </li>
19
- <li>Puedes importar y exportar tus personajes de otros juegos de la serie Gacha. También puedes compartir tus personajes y escenas con otros jugadores online. </li>
20
- </ul>
21
- <h2>¿Por qué jugar Gacha Club en su ordenador portátil? </h2>
22
- <h3>Beneficios de jugar en una pantalla más grande</h3>
23
- <p>Gacha Club es un gran juego para jugar en tu dispositivo móvil, pero ¿sabías que también puedes jugar en tu portátil? Hay muchos beneficios de jugar Gacha Club en su ordenador portátil en lugar de su dispositivo móvil. Estos son algunos de ellos <p>Aquí está la continuación del artículo:</p>
24
- <ul>
25
- <li>Puedes disfrutar del juego en una pantalla más grande, lo que puede mejorar la calidad visual y los detalles del juego. También puedes ver más elementos del juego y la interfaz sin tener que acercar o alejar. </li>
26
- <li> Puede utilizar el teclado y el ratón para controlar el juego, lo que puede hacer que sea más fácil y más rápido para navegar y jugar. También puedes personalizar la configuración del juego y las asignaciones de claves según tus preferencias. </li>
27
- <li>Puede ahorrar su vida útil de la batería y espacio de almacenamiento en su dispositivo móvil jugando en su computadora portátil. También puede evitar interrupciones de llamadas telefónicas, mensajes, notificaciones o alertas de batería baja. </li>
28
- <li>Puede jugar el juego sin conexión sin tener que preocuparse por la conexión a Internet o el uso de datos. También puede acceder al juego en cualquier momento y en cualquier lugar con su ordenador portátil. </li>
29
- </ul>
30
- <h3>Cómo usar un emulador para jugar Gacha Club en tu portátil</h3>
31
- <p>Una de las formas más fáciles y convenientes de jugar Gacha Club en su computadora portátil es utilizar un emulador. Un emulador es un software que le permite ejecutar aplicaciones Android en su ordenador portátil. Hay muchos emuladores disponibles en línea, pero algunos de los más populares son BlueStacks, NoxPlayer y LDPlayer.</p>
32
-
33
- <h2>Cómo descargar e instalar Gacha Club en su computadora portátil</h2>
34
- <h3>Paso 1: Descargar e instalar un emulador</h3>
35
- <p>El primer paso es descargar e instalar un emulador de su elección en su computadora portátil. Puedes visitar el sitio web oficial del emulador y seguir las instrucciones para descargarlo e instalarlo. Por ejemplo, si quieres usar BlueStacks, puedes ir a <a href="">https://www.bluestacks.com/</a> y hacer clic en el botón "Descargar BlueStacks". Luego, puede ejecutar el archivo de instalación y seguir los pasos para instalar BlueStacks en su computadora portátil. </p>
36
- <h3>Paso 2: Inicia sesión en tu cuenta de Google</h3>
37
- <p>El siguiente paso es iniciar sesión en su cuenta de Google en el emulador. Esto le permitirá acceder a la Google Play Store y otros servicios de Google. Puedes usar tu cuenta de Google existente o crear una nueva si no la tienes. Para iniciar sesión, puedes abrir el emulador y hacer clic en el botón "Inicio de sesión de Google". Luego, puede ingresar su dirección de correo electrónico y contraseña y hacer clic en "Siguiente". </p>
38
- <h3>Paso 3: Búsqueda de Gacha Club en el emulador</h3>
39
- <p>El tercer paso es buscar Gacha Club en el emulador. Puedes hacer esto abriendo la aplicación Google Play Store en el emulador y escribiendo "Gacha Club" en la barra de búsqueda. A continuación, puede hacer clic en el botón "Instalar" junto al icono del juego. Alternativamente, también puede utilizar este enlace <a href=">https://play.google.com/store/apps/details?id=air.com.lunime.gachaclub&hl=en_US&gl=US</a> para ir directamente a la página del juego en la Google Play Store.</p>
40
- <h3>Paso 4: Instalar y lanzar Gacha Club</h3>
41
- <p>El paso final es instalar y poner en marcha Gacha Club en su ordenador portátil. Puede hacer esto esperando a que termine el proceso de instalación y luego haciendo clic en el botón "Abrir". Esto pondrá en marcha Gacha Club en su ordenador portátil y se puede empezar a jugar de inmediato. </p>
42
- <h2>Consejos y trucos para jugar Gacha Club en su ordenador portátil</h2>
43
- <h3>Cómo personalizar tus personajes y escenas</h3>
44
-
45
- <ul>
46
- <li>Puede acceder al modo de personalización de personajes haciendo clic en el botón "Vestir" en el menú principal. Aquí, puede elegir entre más de 600 caracteres predefinidos o crear los suyos desde cero. </li>
47
- <li>Puedes cambiar varios aspectos de tu personaje como su nombre, género, edad, altura, tipo de cuerpo, color de piel, estilo de cabello, forma de ojo, forma de boca, forma de nariz, forma de oreja, rasgos faciales, etc.</li>
48
- <li>También puedes vestir a tu personaje con diferentes atuendos, accesorios, armas, sombreros, gafas, máscaras, etc. Puedes elegir entre más de 10.000 artículos en diferentes categorías como tops, fondos, zapatos, calcetines, guantes, cinturones, capas, etc.</li>
49
- <li>También puedes personalizar los colores de la ropa, el cabello, los ojos, etc. de tu personaje usando la herramienta de selección de color. También puede utilizar la herramienta de degradado para crear efectos más únicos y coloridos. </li>
50
- <li>También puedes agregar mascotas, artículos, efectos y pegatinas a tu personaje haciendo clic en el botón "Extras". Puede elegir entre más de 600 mascotas, más de 200 artículos, más de 100 efectos y más de 300 pegatinas. </li>
51
- <li>También puede entrar en el modo Studio haciendo clic en el botón "Studio" en el menú principal. Aquí puedes crear tus propias escenas con tus personajes y otros elementos. </li>
52
- <li>Puedes añadir hasta 10 caracteres y 50 mascotas en una escena. También puede cambiar sus posiciones, tamaños, ángulos, expresiones, poses, etc. usando las herramientas en el lado izquierdo de la pantalla. </li>
53
- <li>También puede cambiar el fondo de su escena haciendo clic en el botón "BG". Puedes elegir entre más de 600 fondos en diferentes categorías como naturaleza, ciudad, escuela, fantasía, etc.</li>
54
- <li>También puede agregar cuadros de texto, burbujas de voz, cuadros de narración y efectos de sonido a su escena haciendo clic en el botón "Texto". También puede personalizar la fuente, tamaño, color, alineación, etc. de su texto. </li>
55
-
56
- </ul>
57
- <h3>Cómo jugar los diferentes modos de batalla y mini-juegos</h3>
58
- <p>Otra característica de Gacha Club es que puedes jugar diferentes modos de batalla y minijuegos. Aquí hay algunos consejos y trucos para hacer eso:</p>
59
- <ul>
60
- <li>Puedes acceder a los modos de batalla haciendo clic en el botón "Batalla" en el menú principal. Aquí, puedes elegir entre cuatro modos diferentes: Historia, Torre, Entrenamiento y Sombras de la Corrupción.</li>
61
- <li>En el modo Historia, puedes seguir la historia de cada club y luchar contra varios enemigos. También puede desbloquear nuevas unidades y elementos completando cada capítulo. </li>
62
- <li>En el modo Torre, puedes desafiarte a ti mismo subiendo una torre con 100 pisos. Cada piso tiene un enemigo y un nivel de dificultad diferentes. También puedes ganar recompensas alcanzando ciertos hitos. </li>
63
- <li>En el modo de entrenamiento, puedes practicar tus habilidades y probar tus estrategias luchando contra diferentes oponentes. También puedes personalizar la configuración y las condiciones de cada batalla. </li>
64
- <li>En el modo Sombras de corrupción, puede enfrentar el desafío final luchando contra las versiones corruptas de sus unidades. También puede desbloquear nuevas unidades dañadas al derrotarlos. </li>
65
- <li>Puede acceder a los minijuegos haciendo clic en el botón "Juegos" en el menú principal. Aquí puedes elegir entre 10 minijuegos diferentes: Mascot Whack, Usagi vs Neko, Memory Match, Lemo & Lime’s Adventure, Duck & Dodge, Phantom’s Remix, Spike Dodge, Narwhal Sky, Orca Sploosh y DJ Showdown.</li>
66
- <li>Cada mini-juego tiene sus propias reglas y objetivos. También puedes ganar gemas y bytes jugando a estos minijuegos. Las gemas se utilizan para comprar entradas gacha y los bytes se utilizan para comprar artículos en la tienda. </li>
67
- </ul>
68
- <h3>Cómo usar los controles y mejoras del juego</h3>
69
- <p>Una de las ventajas de jugar Gacha Club en su ordenador portátil es que puede utilizar el teclado y el ratón para controlar el juego. Aquí hay algunos consejos y trucos para hacer eso:</p>
70
- <ul>
71
-
72
- <li>Puede usar su teclado para escribir cuadros de texto, barras de búsqueda, etc. También puede usar su teclado para introducir accesos directos para ciertas acciones. Por ejemplo, puedes presionar Ctrl+S para guardar tu personaje o escena. </li>
73
- <li>También puede utilizar las características y herramientas del emulador para mejorar su experiencia de juego. Por ejemplo, puedes usar la herramienta de captura de pantalla para capturar tus personajes y escenas. También puede utilizar la herramienta de varias instancias para ejecutar varias instancias de Gacha Club a la vez. </li>
74
- </ul>
75
- <h2>Conclusión</h2>
76
- <p>Gacha Club es un juego divertido y creativo que te permite crear tus propios personajes y escenas de anime. También puedes jugar diferentes modos de batalla y minijuegos con tus personajes. Si quieres disfrutar de Gacha Club en una pantalla más grande con mejores controles y rendimiento <p>, puedes descargarlo e instalarlo en tu portátil usando un emulador. También puede utilizar algunos consejos y trucos para personalizar sus personajes y escenas, jugar los diferentes modos de batalla y mini-juegos, y utilizar los controles del juego y mejoras. Esperamos que este artículo te haya ayudado a aprender a descargar y jugar Gacha Club en tu portátil. ¡Diviértete y da rienda suelta a tu creatividad! </p>
77
- <h2>Preguntas frecuentes</h2>
78
- <p>Aquí hay algunas preguntas frecuentes sobre Gacha Club y reproducirlo en su computadora portátil:</p>
79
- <tabla>
80
- <tr>
81
- <th>Pregunta</th>
82
- <th>Respuesta</th>
83
- </tr>
84
- <tr>
85
- <td>¿Gacha Club es libre de jugar? </td>
86
- <td>Sí, Gacha Club es gratis para jugar. Puedes descargarlo e instalarlo desde la Google Play Store o cualquier otra tienda de aplicaciones sin pagar nada. También puede jugar el juego sin gastar dinero real. Sin embargo, puedes elegir comprar alguna moneda del juego o artículos con dinero real si quieres apoyar a los desarrolladores u obtener algunas características adicionales. </td>
87
- </tr>
88
- <tr>
89
- <td>¿Es seguro jugar al Gacha Club? </td>
90
-
91
- </tr>
92
- <tr>
93
- <td>¿Puedo jugar Gacha Club en otros dispositivos? </td>
94
- <td>Sí, puede jugar Gacha Club en otros dispositivos además de su computadora portátil. Puede reproducirlo en su dispositivo Android, dispositivo iOS, dispositivo Windows o navegador. Sin embargo, es posible que deba usar diferentes métodos o herramientas para descargarlo e instalarlo en diferentes dispositivos. Puede visitar el sitio web oficial de Gacha Club <a href="">https://lunime.com/gacha-club/</a> para obtener más información. </td>
95
- </tr>
96
- <tr>
97
- <td>¿Puedo transferir mi progreso de un dispositivo a otro? </td>
98
- <td>Sí, puedes transferir tu progreso de un dispositivo a otro. Puedes hacer esto usando la función de importación y exportación del juego. Puede exportar sus personajes y escenas como códigos y guardarlos en su dispositivo o en línea. Luego, puede importarlos en otro dispositivo ingresando los códigos. Sin embargo, es posible que no pueda transferir algunos elementos o características que son exclusivos de ciertos dispositivos o plataformas. </td>
99
- </tr>
100
- <tr>
101
- <td>¿Dónde puedo encontrar más consejos y guías para Gacha Club? </td>
102
- <td>Puede encontrar más consejos y guías para Gacha Club visitando las páginas oficiales de medios sociales de Lunime <a href=">https://www.facebook.com/Lunime/</a>, <a href="">https:/twitter.com/LunimeGa</a>, <a>, <a href=">>>>>hththts:/ww.com/twiom/lun</a games/a, a>a =">https://www.youtube.com/c/Lunime/featured</a>. También puede unirse al servidor oficial de Discord de Lunime <a href=">https://discord.gg/lunime</a> o al subreddit oficial de Gacha Club <a href=">https://www.reddit.com/r/GachaClub/</a>. También puedes buscar en línea otros sitios web creados por fans, blogs, foros, videos, etc.</td>
103
- </tr>
104
- </tabla></p> 64aa2da5cf<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Combate Areo En Lnea Mod Apk.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>Combate aéreo en línea Mod APK: Una revisión</h1>
3
- <p>Si usted está buscando un emocionante y lleno de acción jet juego de lucha para su dispositivo móvil, es posible que desee echa un vistazo a Air Combat Online. Este juego te permite dominar los cielos y dominar los aviones de combate más avanzados del mundo en varios modos de juego y escenarios. Pero lo que si quieres obtener más de este juego? Ahí es donde Air Combat Online Mod APK entra en juego. En este artículo, vamos a revisar lo que es Air Combat Online, lo que es Air Combat Online Mod APK, cómo descargarlo e instalarlo, cuáles son los beneficios y riesgos de usarlo, y algunos consejos y trucos para jugar el juego. </p>
4
- <h2>¿Qué es Air Combat Online? </h2>
5
- <p>Air Combat Online es un juego desarrollado por VOLV Interactive para dispositivos Android e iOS. Fue lanzado en 2014 y desde entonces ha ganado más de 10 millones de descargas en Google Play Store. El juego cuenta con gráficos de calidad de consola, física realista y controles intuitivos. Estos son algunos de los modos de juego y características que puedes disfrutar en Air Combat Online:</p>
6
- <h2>combate aéreo en línea mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://bltlly.com/2v6JOG">https://bltlly.com/2v6JOG</a></b></p><br /><br />
7
- <h3>Modos de juego y características</h3>
8
- <ul>
9
- <li><strong>Ranked Match</strong>: Enfréntate a amigos y enemigos por igual en Fast-paced, 4v4 Team Death Match, 2v2 Duel y 1v1 Solo.</li>
10
- <li><strong>Modo de evento</strong>: Elija entre modos cooperativos y competitivos: Gratis para todos, Último hombre de pie, Último equipo de pie, Capturar la bandera, y defender la base.</li>
11
- <li><strong>Batalla de grupo</strong>: Invita a tus amigos a jugar en línea. Entrena y domina tus habilidades de piloto cuando hagas equipo con amigos en todo el mundo. </li>
12
- <li><strong>Modo de un solo jugador</strong>: Colección inigualable de misiones de lucha: Death Match, Bonus Hunt, Devil Regiment Challenge, Cannon Only y Duel.</li>
13
- <li><strong>Nuevo evento</strong>: Únete a un nuevo evento de temporada para obtener recompensas de temporada ricas y exclusivas. </li>
14
- <li><strong>New Friend System</strong>: Invitar y añadir amigos en el juego. Equipo con amigos para unirse a la enorme colección de batallas en línea. </li>
15
-
16
- </ul>
17
- <h3>Flotas de aviones y personalización</h3>
18
- <ul>
19
- <li><strong>Flotas de aviones pulidos</strong>: 100+ cazas basados en los aviones prototipados modernos reales para sus peleas de perros llenas de acción. </li>
20
- <li><strong>Deep Tech Tree</strong>: 16+ único sistema de tecnología actualizable para cada avión para elevar sus habilidades. </li>
21
- <li><strong>Sistema de equipo personalizado</strong>: Equipe alas avanzadas, motores, armaduras y radares para mejorar su poder de combate. </li>
22
- <li><strong>Equipar potentes misiles aire-aire, misiles aire-superficie y cañones para un rendimiento máximo. Lanzar bengalas para señuelos enemigos. </li>
23
- <li><strong>Pinturas personalizadas</strong>: Equipar famosas pinturas de exhibición aérea y pinturas únicas de temporada para una ventaja competitiva. </li>
24
- </ul>
25
- <h3>Gráficos y controles</h3>
26
- <ul>
27
- <li><strong>Calidad de consola de entornos de fondo 3D de próxima generación basados en imágenes reales por satélite</strong>: Sumérgete en los paisajes urbanos, las arenas tropicales, las montañas de hielo y más. Efectos visuales y efectos especiales sin igual, incluyendo texturas HD, iluminación realista, resplandor solar, etc.</li>
28
- <li><strong>Selección gráfica personalizada</strong>: Elija la mejor configuración gráfica para adaptarse al rendimiento de su dispositivo. </li>
29
- <li><strong>Maniobras intuitivas</strong>: Realiza giros de barril y voltereta hacia atrás para evadir los fuegos enemigos deslizando diferentes direcciones. </li>
30
- <h2>¿Qué es el combate aéreo en línea Mod APK? </h2>
31
- <p>Air Combat Online Mod APK es una versión modificada del juego original de Air Combat Online que le permite acceder a algunas características que no están disponibles en la versión oficial. Por ejemplo, puedes obtener dinero ilimitado, monedas, gemas y diamantes para comprar y mejorar tus aviones y equipos. También puede desbloquear todos los aviones y misiones sin gastar dinero real. También puede disfrutar de algunas características adicionales como no anuncios, sin raíz, y anti-van. </p>
32
- <h3>Cómo descargar e instalar</h3>
33
- <p>Si desea probar Air Combat Online Mod APK, debe seguir estos pasos:</p>
34
- <ol>
35
-
36
- <li>Habilita la instalación de aplicaciones de fuentes desconocidas en tu dispositivo. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y activarlo. </li>
37
- <li>Busque el archivo descargado en su dispositivo y toque en él para instalarlo. </li>
38
- <li>Espere a que la instalación se complete y luego inicie el juego. </li>
39
- <li>¡Disfruta de las características modificadas y diviértete! </li>
40
- </ol>
41
- <h3>¿Cuáles son los beneficios de su uso</h3>
42
- <p>Algunos de los beneficios de usar Air Combat Online Mod APK son:</p>
43
- <ul>
44
- <li>Puedes ahorrar tiempo y dinero obteniendo recursos ilimitados y desbloqueando todo en el juego. </li>
45
- <li> Puedes disfrutar de una experiencia de juego más inmersiva y realista con gráficos y efectos de sonido mejorados. </li>
46
- <li>Puedes desafiarte a ti mismo y a tus amigos con misiones y escenarios más difíciles y diversos. </li>
47
- <li> Puede personalizar sus aviones y equipos para adaptarse a sus preferencias y estilo. </li>
48
- <li>Puedes jugar el juego sin interrupciones o distracciones de anuncios o ventanas emergentes. </li>
49
- </ul>
50
- <h3>¿Cuáles son los riesgos de su uso</h3>
51
- <p>Sin embargo, usando Air Combat Online Mod APK también viene con algunos riesgos que usted debe ser consciente de:</p>
52
- <ul>
53
- <li> Puedes encontrar algunos errores o fallos que pueden afectar el rendimiento o la estabilidad del juego. </li>
54
- <li>Es posible que tenga problemas de compatibilidad con su dispositivo o sistema operativo. </li>
55
- <li>Usted puede violar los términos y condiciones del desarrollador de juegos original y obtener prohibido jugar en línea o acceder a las actualizaciones. </li>
56
- <li> Puede exponer su dispositivo a malware o virus que pueden dañar sus datos o privacidad. </li>
57
- <li>Usted puede perder la diversión y el desafío de jugar el juego según lo previsto por el desarrollador. </li>
58
- </ul>
59
- <h2>Consejos y trucos para jugar Air Combat Online</h2>
60
- <p>Si quieres mejorar tus habilidades y rendimiento en Air Combat Online, aquí hay algunos consejos y trucos que puedes usar:</p>
61
- <h3>Elige el plano correcto para tu estilo</h3>
62
-
63
- <h3>Evite colisiones frontales y utilice bengalas</h3>
64
- <p>Una de las formas más comunes de morir en Air Combat Online es estrellándose contra otro avión o un obstáculo. Debes evitar las colisiones frontales al alejarte de ellas o usar bengalas para distraer a tus enemigos. Las llamaradas son dispositivos que emiten señales infrarrojas que pueden confundir a los misiles que buscan calor. Puede utilizar bengalas pulsando en el icono de bengala en la esquina inferior derecha de la pantalla. Tiene un número limitado de bengalas, así que úselas sabiamente. </p>
65
- <p></p>
66
- <h3>Domina las maniobras y usa el radar</h3>
67
- <p>Para sobrevivir en Air Combat Online, necesitas dominar las maniobras básicas tales como cilindros, volteretas hacia atrás, bucles, giros, inmersiones, subidas, etc. Estas maniobras pueden ayudarte a evadir el fuego enemigo, obtener una ventaja sobre tus oponentes o escapar de una situación pegajosa. Puede realizar estas maniobras deslizando diferentes direcciones en la pantalla. También debes usar el radar en la esquina superior izquierda de la pantalla para localizar a tus enemigos, aliados, objetivos y amenazas. El radar muestra diferentes iconos para diferentes entidades: rojo para enemigos, azul para aliados, verde para objetivos, amarillo para misiles, etc.</p>
68
- <h2>Conclusión</h2>
69
- controles. También puede probar Air Combat Online Mod APK para obtener más recursos y características, pero usted debe tener cuidado de los riesgos y consecuencias. Si quieres jugar mejor Air Combat Online, debes seguir algunos consejos y trucos como elegir el avión adecuado, evitar colisiones, usar bengalas, dominar las maniobras y usar el radar. Esperamos que este artículo le ha ayudado a aprender más acerca de Air Combat Online y Air Combat Online Mod APK. Divertirse y disfrutar del juego! </p>
70
- <h2>Preguntas frecuentes</h2>
71
- <p>Aquí hay algunas preguntas frecuentes sobre Air Combat Online y Air Combat Online Mod APK:</p>
72
- <tabla>
73
- <tr>
74
- <th>Pregunta</th>
75
- <th>Respuesta</th>
76
- </tr>
77
- <tr>
78
- <td>¿Air Combat Online es gratis? </td>
79
-
80
- </tr>
81
- <tr>
82
- <td>¿Es seguro usar Air Combat Online Mod APK? </td>
83
- <td>Depende de dónde lo descargue. Algunas fuentes pueden proporcionar archivos modded seguros y confiables, mientras que otras pueden contener malware o virus. Siempre debe escanear el archivo antes de instalarlo y usarlo bajo su propio riesgo. </td>
84
- </tr>
85
- <tr>
86
- <td>¿Puedo jugar Air Combat Online sin conexión? </td>
87
- <td>No, Air Combat Online requiere una conexión a Internet para jugar. Necesitas conectarte al servidor del juego para acceder a los modos de juego, características y actualizaciones. </td>
88
- </tr>
89
- <tr>
90
- <td>¿Cómo puedo contactar al desarrollador de Air Combat Online? </td>
91
- <td>Puede ponerse en contacto con el desarrollador de Air Combat Online enviando un correo electrónico a [email protected] o visitando su sitio web en http:///www.volvapps.com/.</td>
92
- </tr>
93
- <tr>
94
- <td>¿Cómo puedo obtener más dinero, monedas, gemas y diamantes en Air Combat Online? </td>
95
- <td>Puedes obtener más dinero, monedas, gemas y diamantes en Air Combat Online completando misiones, ganando batallas, clasificando, viendo anuncios o comprándolos con dinero real. También puede utilizar Air Combat Online Mod APK para obtener recursos ilimitados de forma gratuita. </td>
96
- </tr>
97
- </tabla></p> 64aa2da5cf<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Geometra Dash Meltdown Versin Completa Apk.md DELETED
@@ -1,53 +0,0 @@
1
-
2
- <h1>Descargar Geometry Dash Meltdown Versión completa APK: Una guía para usuarios de Android</h1>
3
- <p>Si eres un fan de los juegos de plataformas de acción basados en el ritmo, es posible que hayas oído hablar de Geometry Dash, un juego popular que pone a prueba tus reflejos y habilidades de sincronización. ¿Pero sabías que hay una expansión libre de Geometry Dash llamada Geometry Dash Meltdown? ¿Y sabías que puedes descargar la versión completa de Geometry Dash Meltdown APK para tu dispositivo Android? En este artículo, te diremos todo lo que necesitas saber sobre Geometry Dash Meltdown, cómo descargarlo, por qué deberías descargarlo y algunos consejos y trucos para ayudarte a vencer al juego. </p>
4
- <h2>descargar geometría dash meltdown versión completa apk</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://bltlly.com/2v6LxU">https://bltlly.com/2v6LxU</a></b></p><br /><br />
5
- <h2>¿Qué es la fusión del tablero de geometría? </h2>
6
- <p>Geometry Dash Meltdown es una aplicación gratuita de expansión publicitaria de Geometry Dash desarrollada y publicada por RobTop Games. Fue lanzado el 19 de diciembre de 2015 para dispositivos iOS y Android. Cuenta con tres niveles exclusivos con música de F-777, un famoso productor de música electrónica. El juego te reta a saltar, volar y girar su camino a través de cavernas oscuras y obstáculos puntiagudos usando su dedo clicky. El juego es muy duro y requiere mucha práctica y paciencia para completarlo. </p>
7
- <h3>Una expansión libre de Geometría Dash</h3>
8
- <p>Geometry Dash Meltdown no es un juego independiente, sino una expansión de Geometry Dash. Esto significa que no es necesario comprar o descargar Geometry Dash para jugar Geometry Dash Meltdown. Sin embargo, también significa que Geometry Dash Meltdown tiene características limitadas en comparación con Geometry Dash. Por ejemplo, solo tiene tres niveles, logros limitados, iconos y objetos coleccionables, y no hay niveles de usuario o editor de niveles. Si quieres disfrutar de la experiencia completa de Geometry Dash, tendrás que comprar o descargar el juego original. </p>
9
- <h3>Cuenta con tres niveles exclusivos con música de F-777</h3>
10
-
11
- <h3>Te desafía a saltar, volar y dar vueltas a través de cavernas oscuras y obstáculos puntiagudos</h3>
12
- <p>Geometry Dash Meltdown es un juego que requiere reflejos rápidos, sincronización precisa y buena memoria. El juego se basa en un simple mecánico: toque para saltar o volar. Sin embargo, el juego está lejos de ser fácil, ya que se enfrentará a muchos peligros en el camino, tales como picos, sierras, láseres, portales, interruptores de gravedad, y más. También encontrarás diferentes modos de juego, como volar un cohete o un OVNI. El juego es muy implacable, ya que un error te enviará de vuelta al comienzo del nivel. Necesitarás practicar mucho y memorizar los patrones de los obstáculos para tener éxito. </p>
13
- <h2>¿Cómo descargar la versión completa de Geometry Dash Meltdown APK? </h2>
14
- <p>Si desea descargar la versión completa de Geometry Dash Meltdown APK para su dispositivo Android, tendrá que seguir estos pasos:</p>
15
- <p></p>
16
- <h3>Encontrar una fuente confiable para el <h3>Encontrar una fuente confiable para el archivo APK</h3>
17
- <p>Un archivo APK es un archivo de paquete de Android que contiene los datos de instalación de una aplicación. Puede descargar archivos APK de varios sitios web, pero debe tener cuidado con la fuente. Algunos archivos APK pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Por lo tanto, solo debe descargar archivos APK de fuentes confiables y de buena reputación. Una de las fuentes que recomendamos es APKCombo, un sitio web que ofrece descargas APK gratuitas y seguras para juegos y aplicaciones Android. Usted puede encontrar Geometría Dash Meltdown versión completa APK en APKCombo buscando el nombre del juego o utilizando este enlace: [Geometría Dash Meltdown APK (Android Game) - Descarga gratuita - APKCombo]( 1 ). </p> <h3>Habilitar fuentes desconocidas en su dispositivo</h3>
18
-
19
- <h3>Instalar el archivo APK y disfrutar del juego</h3>
20
- <p>Una vez que haya descargado el archivo APK de la versión completa de Geometry Dash Meltdown de APKCombo y haya habilitado fuentes desconocidas en su dispositivo, estará listo para instalar el juego. Para hacer esto, tendrá que localizar el archivo APK en su dispositivo, ya sea en la carpeta de descargas o en la aplicación de administrador de archivos. Entonces, usted tendrá que tocar en el archivo APK y siga las instrucciones en la pantalla para instalar el juego. Una vez completada la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio y disfrutar jugando Geometry Dash Meltdown sin limitaciones. </p>
21
- <h2>¿Por qué Descargar Geometría Dash Meltdown versión completa APK? </h2>
22
- <p>Es posible que se pregunte por qué debe descargar Geometry Dash Meltdown versión completa APK en lugar de jugar el juego de la Google Play Store. Bueno, hay algunos beneficios de la descarga de la versión completa APK que usted no puede obtener de la versión oficial. Aquí están algunos de ellos:</p>
23
- <h3>Desbloquea todos los iconos y colores para personalizar tu personaje</h3>
24
- <p>En Geometry Dash Meltdown, puedes personalizar tu personaje eligiendo diferentes iconos y colores. Sin embargo, no todos están disponibles desde el principio. Algunos de ellos requieren que completes ciertos logros o recojas ciertos objetos en el juego. Esto puede ser bastante desafiante y requiere mucho tiempo, especialmente si no eres muy bueno en el juego. Sin embargo, si descarga Geometría Dash Meltdown versión completa APK, se puede desbloquear todos los iconos y colores sin tener que completar ningún logro o recoger los elementos. De esta manera, puedes crear tu propio personaje único y expresar tu personalidad en el juego. </p>
25
- <h3>Juega sin anuncios ni interrupciones</h3>
26
-
27
- <h3>Acceder a futuras actualizaciones y niveles</h3>
28
- <p>Geometry Dash Meltdown es una expansión de Geometry Dash, pero no es un juego completo. Solo tiene tres niveles y características limitadas en comparación con Geometry Dash. Sin embargo, RobTop Games ha prometido lanzar más actualizaciones y niveles para Geometry Dash Meltdown en el futuro. Estas actualizaciones y niveles agregarán más contenido y variedad al juego y lo harán más divertido y desafiante. Sin embargo, si juegas Geometry Dash Meltdown desde Google Play Store, es posible que no puedas acceder a estas actualizaciones y niveles tan pronto como se publiquen. Es posible que tenga que esperar mucho tiempo o actualizar su aplicación manualmente. Sin embargo, si descarga Geometry Dash Meltdown versión completa APK, puede acceder a futuras actualizaciones y niveles tan pronto como estén disponibles. Puede disfrutar de los últimos contenidos y características de Geometry Dash Meltdown sin demora. </p>
29
- <h2>Consejos y trucos para la fusión del tablero de geometría</h2>
30
- <p>Geometry Dash Meltdown es un juego muy duro que requiere mucha habilidad y práctica para dominar. Sin embargo, hay algunos consejos y trucos que pueden ayudarte a mejorar tu jugabilidad y vencer al juego. Estos son algunos de ellos:</p>
31
- <h3>Sigue el ritmo de <h3>Sigue el ritmo de la música</h3>
32
- <p>Uno de los mejores consejos para Geometry Dash Meltdown es seguir el ritmo de la música. La música no solo es pegadiza y agradable, sino también útil e informativa. La música se sincroniza con los obstáculos y las plataformas en el juego, dándole pistas y consejos sobre cuándo saltar o volar. Si escuchas la música cuidadosamente, puedes anticipar los próximos desafíos y reaccionar en consecuencia. La música también te ayuda a mantener el ritmo y el tiempo, que son esenciales para completar el juego. Por lo tanto, siempre debes jugar Geometry Dash Meltdown con el sonido encendido y los auriculares encendidos. </p>
33
- <h3>Coge las monedas secretas para obtener recompensas adicionales</h3>
34
-
35
- <h3>Usa el modo de práctica para mejorar tus habilidades</h3>
36
- <p>Geometry Dash Meltdown es un juego que requiere mucha práctica y repetición para dominar. Sin embargo, practicar el mismo nivel una y otra vez puede ser frustrante y aburrido, especialmente si tienes que empezar desde el principio cada vez que mueres. Es por eso que Geometry Dash Meltdown tiene un modo de práctica que te permite practicar cualquier nivel sin perder progreso. En el modo de práctica, puede colocar puntos de control en cualquier lugar del nivel, y si muere, reaparecerá en el último punto de control en lugar del inicio. De esta manera, puedes practicar partes específicas del nivel que te están dando problemas y mejorar tus habilidades gradualmente. También puede utilizar el modo de práctica para explorar el nivel y encontrar las monedas secretas o atajos. Por lo tanto, siempre debes usar el modo de práctica antes de intentar completar un nivel en modo normal. </p>
37
- <h3>Tómate un descanso cuando te sientas frustrado</h3>
38
- <p>Geometry Dash Meltdown es un juego que puede hacer que la rabia salga fácilmente. El juego es muy duro e implacable, y puede hacerte sentir enojado o frustrado cuando fallas repetidamente. Sin embargo, enojarse o frustrarse solo te hará jugar peor y perder la motivación. Es por eso que siempre debes tomar un descanso cuando te frustras y te calmas. Tomar un descanso te ayudará a relajar tu mente y cuerpo, reducir el estrés y la ira, y restaurar tu concentración y confianza. También puedes hacer otra cosa que te haga feliz o relajado, como escuchar música, ver un video o hablar con un amigo. Después de tomar un descanso, se sentirá renovado y listo para intentarlo de nuevo. </p>
39
- <h2>Conclusión</h2>
40
-
41
- <h2>Preguntas frecuentes</h2>
42
- <p>Aquí hay algunas preguntas frecuentes sobre la fusión del tablero de geometría:</p>
43
- <tabla>
44
- <tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
45
- <tr><td>¿Es seguro descargar Geometry Dash Meltdown? </td><td>Sí, Geometry Dash Meltdown es seguro de descargar si utiliza una fuente de confianza como APKCombo. Sin embargo, siempre debe escanear cualquier archivo APK con una aplicación antivirus antes de instalarlo. </td></tr>
46
- <tr><td>¿Es compatible Geometry Dash Meltdown con mi dispositivo? </td><td>Geometry Dash Meltdown requiere Android 4.0 o superior para funcionar sin problemas. Puede comprobar la versión de Android de su dispositivo yendo a la configuración > sobre el teléfono > información del software. </td></tr>
47
- <tr><td>¿Cómo actualizo Geometry Dash Meltdown? </td><td>Si descarga Geometry Dash Meltdown desde APKCombo, tendrá que verificar las actualizaciones manualmente visitando el sitio web regularmente o suscribiéndose a su boletín de noticias. Si hay una actualización disponible, tendrá que descargarla e instalarla manualmente. </td></tr>
48
- <tr><td>¿Cómo desinstalo Geometry Dash Meltdown? </td><td>Si desea desinstalar Geometry Dash Meltdown desde su dispositivo, puede ir a ajustes > aplicaciones > configuración > aplicaciones > Geometría Dash Meltdown > desinstalación. Esto eliminará el juego de tu dispositivo y liberará algo de espacio de almacenamiento. </td></tr>
49
- <tr><td>¿Cómo me pongo en contacto con el desarrollador de Geometry Dash Meltdown? </td><td>Si tiene alguna pregunta, comentario o problema con Geometry Dash Meltdown, puede ponerse en contacto con el desarrollador, RobTop Games, enviándoles un correo electrónico a [email protected] o visitando su sitio web en www.robtopgames.com. </td></tr>
50
- </tabla>
51
- <p>Espero que hayas encontrado este artículo útil e informativo. Si lo hizo, por favor, compartirlo con sus amigos y familiares que podrían estar interesados en Geometría Dash Meltdown. Y si tiene algún comentario o sugerencia, por favor déjelos abajo. ¡Gracias por leer y jugar feliz! </p> 64aa2da5cf<br />
52
- <br />
53
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/index/sources.py DELETED
@@ -1,223 +0,0 @@
1
- import logging
2
- import mimetypes
3
- import os
4
- import pathlib
5
- from typing import Callable, Iterable, Optional, Tuple
6
-
7
- from pip._internal.models.candidate import InstallationCandidate
8
- from pip._internal.models.link import Link
9
- from pip._internal.utils.urls import path_to_url, url_to_path
10
- from pip._internal.vcs import is_url
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
- FoundCandidates = Iterable[InstallationCandidate]
15
- FoundLinks = Iterable[Link]
16
- CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]]
17
- PageValidator = Callable[[Link], bool]
18
-
19
-
20
- class LinkSource:
21
- @property
22
- def link(self) -> Optional[Link]:
23
- """Returns the underlying link, if there's one."""
24
- raise NotImplementedError()
25
-
26
- def page_candidates(self) -> FoundCandidates:
27
- """Candidates found by parsing an archive listing HTML file."""
28
- raise NotImplementedError()
29
-
30
- def file_links(self) -> FoundLinks:
31
- """Links found by specifying archives directly."""
32
- raise NotImplementedError()
33
-
34
-
35
- def _is_html_file(file_url: str) -> bool:
36
- return mimetypes.guess_type(file_url, strict=False)[0] == "text/html"
37
-
38
-
39
- class _FlatDirectorySource(LinkSource):
40
- """Link source specified by ``--find-links=<path-to-dir>``.
41
-
42
- This looks the content of the directory, and returns:
43
-
44
- * ``page_candidates``: Links listed on each HTML file in the directory.
45
- * ``file_candidates``: Archives in the directory.
46
- """
47
-
48
- def __init__(
49
- self,
50
- candidates_from_page: CandidatesFromPage,
51
- path: str,
52
- ) -> None:
53
- self._candidates_from_page = candidates_from_page
54
- self._path = pathlib.Path(os.path.realpath(path))
55
-
56
- @property
57
- def link(self) -> Optional[Link]:
58
- return None
59
-
60
- def page_candidates(self) -> FoundCandidates:
61
- for path in self._path.iterdir():
62
- url = path_to_url(str(path))
63
- if not _is_html_file(url):
64
- continue
65
- yield from self._candidates_from_page(Link(url))
66
-
67
- def file_links(self) -> FoundLinks:
68
- for path in self._path.iterdir():
69
- url = path_to_url(str(path))
70
- if _is_html_file(url):
71
- continue
72
- yield Link(url)
73
-
74
-
75
- class _LocalFileSource(LinkSource):
76
- """``--find-links=<path-or-url>`` or ``--[extra-]index-url=<path-or-url>``.
77
-
78
- If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to
79
- the option, it is converted to a URL first. This returns:
80
-
81
- * ``page_candidates``: Links listed on an HTML file.
82
- * ``file_candidates``: The non-HTML file.
83
- """
84
-
85
- def __init__(
86
- self,
87
- candidates_from_page: CandidatesFromPage,
88
- link: Link,
89
- ) -> None:
90
- self._candidates_from_page = candidates_from_page
91
- self._link = link
92
-
93
- @property
94
- def link(self) -> Optional[Link]:
95
- return self._link
96
-
97
- def page_candidates(self) -> FoundCandidates:
98
- if not _is_html_file(self._link.url):
99
- return
100
- yield from self._candidates_from_page(self._link)
101
-
102
- def file_links(self) -> FoundLinks:
103
- if _is_html_file(self._link.url):
104
- return
105
- yield self._link
106
-
107
-
108
- class _RemoteFileSource(LinkSource):
109
- """``--find-links=<url>`` or ``--[extra-]index-url=<url>``.
110
-
111
- This returns:
112
-
113
- * ``page_candidates``: Links listed on an HTML file.
114
- * ``file_candidates``: The non-HTML file.
115
- """
116
-
117
- def __init__(
118
- self,
119
- candidates_from_page: CandidatesFromPage,
120
- page_validator: PageValidator,
121
- link: Link,
122
- ) -> None:
123
- self._candidates_from_page = candidates_from_page
124
- self._page_validator = page_validator
125
- self._link = link
126
-
127
- @property
128
- def link(self) -> Optional[Link]:
129
- return self._link
130
-
131
- def page_candidates(self) -> FoundCandidates:
132
- if not self._page_validator(self._link):
133
- return
134
- yield from self._candidates_from_page(self._link)
135
-
136
- def file_links(self) -> FoundLinks:
137
- yield self._link
138
-
139
-
140
- class _IndexDirectorySource(LinkSource):
141
- """``--[extra-]index-url=<path-to-directory>``.
142
-
143
- This is treated like a remote URL; ``candidates_from_page`` contains logic
144
- for this by appending ``index.html`` to the link.
145
- """
146
-
147
- def __init__(
148
- self,
149
- candidates_from_page: CandidatesFromPage,
150
- link: Link,
151
- ) -> None:
152
- self._candidates_from_page = candidates_from_page
153
- self._link = link
154
-
155
- @property
156
- def link(self) -> Optional[Link]:
157
- return self._link
158
-
159
- def page_candidates(self) -> FoundCandidates:
160
- yield from self._candidates_from_page(self._link)
161
-
162
- def file_links(self) -> FoundLinks:
163
- return ()
164
-
165
-
166
- def build_source(
167
- location: str,
168
- *,
169
- candidates_from_page: CandidatesFromPage,
170
- page_validator: PageValidator,
171
- expand_dir: bool,
172
- cache_link_parsing: bool,
173
- ) -> Tuple[Optional[str], Optional[LinkSource]]:
174
- path: Optional[str] = None
175
- url: Optional[str] = None
176
- if os.path.exists(location): # Is a local path.
177
- url = path_to_url(location)
178
- path = location
179
- elif location.startswith("file:"): # A file: URL.
180
- url = location
181
- path = url_to_path(location)
182
- elif is_url(location):
183
- url = location
184
-
185
- if url is None:
186
- msg = (
187
- "Location '%s' is ignored: "
188
- "it is either a non-existing path or lacks a specific scheme."
189
- )
190
- logger.warning(msg, location)
191
- return (None, None)
192
-
193
- if path is None:
194
- source: LinkSource = _RemoteFileSource(
195
- candidates_from_page=candidates_from_page,
196
- page_validator=page_validator,
197
- link=Link(url, cache_link_parsing=cache_link_parsing),
198
- )
199
- return (url, source)
200
-
201
- if os.path.isdir(path):
202
- if expand_dir:
203
- source = _FlatDirectorySource(
204
- candidates_from_page=candidates_from_page,
205
- path=path,
206
- )
207
- else:
208
- source = _IndexDirectorySource(
209
- candidates_from_page=candidates_from_page,
210
- link=Link(url, cache_link_parsing=cache_link_parsing),
211
- )
212
- return (url, source)
213
- elif os.path.isfile(path):
214
- source = _LocalFileSource(
215
- candidates_from_page=candidates_from_page,
216
- link=Link(url, cache_link_parsing=cache_link_parsing),
217
- )
218
- return (url, source)
219
- logger.warning(
220
- "Location '%s' is ignored: it is neither a file nor a directory.",
221
- location,
222
- )
223
- return (url, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/dep_util.py DELETED
@@ -1,25 +0,0 @@
1
- from distutils.dep_util import newer_group
2
-
3
-
4
- # yes, this is was almost entirely copy-pasted from
5
- # 'newer_pairwise()', this is just another convenience
6
- # function.
7
- def newer_pairwise_group(sources_groups, targets):
8
- """Walk both arguments in parallel, testing if each source group is newer
9
- than its corresponding target. Returns a pair of lists (sources_groups,
10
- targets) where sources is newer than target, according to the semantics
11
- of 'newer_group()'.
12
- """
13
- if len(sources_groups) != len(targets):
14
- raise ValueError(
15
- "'sources_group' and 'targets' must be the same length")
16
-
17
- # build a pair of lists (sources_groups, targets) where source is newer
18
- n_sources = []
19
- n_targets = []
20
- for i in range(len(sources_groups)):
21
- if newer_group(sources_groups[i], targets[i]):
22
- n_sources.append(sources_groups[i])
23
- n_targets.append(targets[i])
24
-
25
- return n_sources, n_targets
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bokanovskii/Image-to-music/shred_model.py DELETED
@@ -1,109 +0,0 @@
1
- import tensorflow.keras as keras
2
- import tensorflow as tf
3
-
4
- import PIL.Image
5
- import PIL.ImageOps
6
-
7
- import numpy as np
8
-
9
- IMG_SIZE = [256,256]
10
-
11
- def prepare_image(path):
12
- # Load the image with PIL
13
- img = PIL.Image.open(path)
14
- img, rotated = exif_transpose(img)
15
- img = img.resize(IMG_SIZE)
16
- return np.expand_dims(np.asarray(img), axis=0)
17
-
18
- # def prepare_model(checkpoint_folder_path):
19
- # base_model = keras.applications.EfficientNetB7(
20
- # weights='imagenet',
21
- # include_top=False,
22
- # input_shape=tuple(IMG_SIZE + [3])
23
- # )
24
- # base_model.trainable = True
25
-
26
- # model = keras.Sequential()
27
- # model.add(keras.Input(shape=tuple(IMG_SIZE + [3])))
28
- # model.add(keras.layers.RandomFlip("horizontal"))
29
- # model.add(keras.layers.RandomRotation(0.1))
30
- # model.add(base_model)
31
- # model.add(keras.layers.GlobalMaxPooling2D())
32
- # model.add(keras.layers.Dense(1, activation='sigmoid'))
33
-
34
- # model.compile(optimizer=keras.optimizers.Adam(1e-5), # Low learning rate
35
- # loss=keras.losses.BinaryCrossentropy(from_logits=False),
36
- # metrics=[keras.metrics.BinaryAccuracy(), 'Precision', 'Recall',
37
- # tf.keras.metrics.SpecificityAtSensitivity(.9)],)
38
- # model.load_weights(checkpoint_folder_path)
39
- # return model
40
-
41
- def prepare_EfficientNet_model(base_trainable=False, fine_tuning=False):
42
- base_model = keras.applications.EfficientNetB7(
43
- weights="imagenet",
44
- include_top=False,
45
- input_shape=tuple(IMG_SIZE + [3])
46
- )
47
- base_model.trainable = False
48
-
49
- model = keras.Sequential()
50
- model.add(keras.Input(shape=tuple(IMG_SIZE + [3])))
51
- model.add(keras.layers.RandomFlip("horizontal"))
52
- model.add(keras.layers.RandomRotation(0.1))
53
- model.add(base_model)
54
- model.add(keras.layers.GlobalMaxPooling2D())
55
- model.add(keras.layers.Dense(1, activation='sigmoid'))
56
-
57
- if not fine_tuning:
58
- if not base_trainable:
59
- base_model.trainable = False
60
- model.compile(optimizer=keras.optimizers.Adam(),
61
- loss=keras.losses.BinaryCrossentropy(from_logits=False),
62
- metrics=[keras.metrics.BinaryAccuracy(), 'Precision', 'Recall'],)
63
- else:
64
- base_model.trainable = True
65
- model.compile(optimizer=keras.optimizers.Adam(1e-5), # Low learning rate
66
- loss=keras.losses.BinaryCrossentropy(from_logits=False),
67
- metrics=[keras.metrics.BinaryAccuracy(), 'Precision', 'Recall',
68
- tf.keras.metrics.SpecificityAtSensitivity(.9)],)
69
- return model
70
-
71
- def exif_transpose(img):
72
- if not img:
73
- return img
74
-
75
- exif_orientation_tag = 274
76
-
77
- # Check for EXIF data (only present on some files)
78
- if hasattr(img, "_getexif") and isinstance(img._getexif(), dict) and exif_orientation_tag in img._getexif():
79
- exif_data = img._getexif()
80
- orientation = exif_data[exif_orientation_tag]
81
-
82
- # Handle EXIF Orientation
83
- if orientation == 1:
84
- # Normal image - nothing to do!
85
- pass
86
- elif orientation == 2:
87
- # Mirrored left to right
88
- img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)
89
- elif orientation == 3:
90
- # Rotated 180 degrees
91
- img = img.rotate(180)
92
- elif orientation == 4:
93
- # Mirrored top to bottom
94
- img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT)
95
- elif orientation == 5:
96
- # Mirrored along top-left diagonal
97
- img = img.rotate(-90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)
98
- elif orientation == 6:
99
- # Rotated 90 degrees
100
- img = img.rotate(-90, expand=True)
101
- elif orientation == 7:
102
- # Mirrored along top-right diagonal
103
- img = img.rotate(90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)
104
- elif orientation == 8:
105
- # Rotated 270 degrees
106
- img = img.rotate(90, expand=True)
107
- return img, True
108
- return img, False
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BorisovMaksim/denoising/denoisers/SpectralGating.py DELETED
@@ -1,24 +0,0 @@
1
- import noisereduce as nr
2
- import torch
3
- import torchaudio
4
-
5
-
6
- class SpectralGating(torch.nn.Module):
7
- def __init__(self, rate=48000):
8
- super(SpectralGating, self).__init__()
9
- self.rate = rate
10
-
11
- def forward(self, wav):
12
- reduced_noise = torch.Tensor(nr.reduce_noise(y=wav, sr=self.rate))
13
- return reduced_noise
14
-
15
- def predict(self, wav):
16
- return self.forward(wav)
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
- # File:
4
-
5
-
6
- from . import catalog as _UNUSED # register the handler
7
- from .detection_checkpoint import DetectionCheckpointer
8
- from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
9
-
10
- __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/full_inference.py DELETED
@@ -1,277 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Run full end-to-end inference with a trained VQA model, including the feature extraction
7
- step. Alternately, the system can use pre-cached image features if available.
8
-
9
- Will load the example images+questions provided with each model, or the user can instead
10
- manually enter an image path and raw text question from command line.
11
-
12
- By default the script will attempt to load cached image features in the same location as
13
- the image file. If features are not found, it will generate them and write a cache file
14
- in the same image dir. Use the --nocache flag to disable this behavior, and force the
15
- model to run the detector every time.
16
-
17
- Can also run all samples for all images in both train and test by calling:
18
- python full_inference.py --all
19
- =========================================================================================
20
- """
21
- import argparse
22
- import csv
23
- import os
24
- import json
25
- import cv2
26
- import time
27
- import sys
28
- import pickle
29
- import numpy as np
30
- import torch
31
-
32
- try:
33
- from fvcore.nn import parameter_count_table
34
- os.chdir('datagen')
35
- from datagen.utils import load_detectron_predictor, check_for_cuda, run_detector
36
- os.chdir('..')
37
- except:
38
- print('WARNING: Did not find detectron2 install. Ignore this message if running the demo in lite mode')
39
-
40
- sys.path.append("openvqa/")
41
- from openvqa.openvqa_inference_wrapper import Openvqa_Wrapper
42
-
43
- sys.path.append("bottom-up-attention-vqa/")
44
- from butd_inference_wrapper import BUTDeff_Wrapper
45
-
46
-
47
-
48
- # run model inference based on the model_spec for one image+question or a list of images+questions
49
- # set return_models=True to return the loaded detector and VQA models. These can then be used with
50
- # preloaded_det and preloaded_vqa to pass in pre-loaded models from previous runs.
51
- def full_inference(model_spec, image_paths, questions, set_dir='model_sets/v1-train-dataset',
52
- det_dir='detectors', nocache=False, get_att=False, direct_path=None, show_params=False,
53
- return_models=False, preloaded_det=None, preloaded_vqa=None):
54
- if not type(image_paths) is list:
55
- image_paths = [image_paths]
56
- questions = [questions]
57
- assert len(image_paths) == len(questions)
58
-
59
- # load or generate image features
60
- print('=== Getting Image Features')
61
- detector = model_spec['detector']
62
- nb = int(model_spec['nb'])
63
- predictor = preloaded_det
64
- all_image_features = []
65
- all_bbox_features = []
66
- all_info = []
67
- for i in range(len(image_paths)):
68
- image_path = image_paths[i]
69
- cache_file = '%s_%s.pkl'%(image_path, model_spec['detector'])
70
- if nocache or not os.path.isfile(cache_file):
71
- # load detector
72
- if predictor is None:
73
- detector_path = os.path.join(det_dir, detector + '.pth')
74
- config_file = "datagen/grid-feats-vqa/configs/%s-grid.yaml"%detector
75
- if detector == 'X-152pp':
76
- config_file = "datagen/grid-feats-vqa/configs/X-152-challenge.yaml"
77
- device = check_for_cuda()
78
- predictor = load_detectron_predictor(config_file, detector_path, device)
79
- # run detector
80
- img = cv2.imread(image_path)
81
- info = run_detector(predictor, img, nb, verbose=False)
82
- if not nocache:
83
- pickle.dump(info, open(cache_file, "wb"))
84
- else:
85
- info = pickle.load(open(cache_file, "rb"))
86
- # post-process image features
87
- image_features = info['features']
88
- bbox_features = info['boxes']
89
- nbf = image_features.size()[0]
90
- if nbf < nb: # zero padding
91
- too_few = 1
92
- temp = torch.zeros((nb, image_features.size()[1]), dtype=torch.float32)
93
- temp[:nbf,:] = image_features
94
- image_features = temp
95
- temp = torch.zeros((nb, bbox_features.size()[1]), dtype=torch.float32)
96
- temp[:nbf,:] = bbox_features
97
- bbox_features = temp
98
- all_image_features.append(image_features)
99
- all_bbox_features.append(bbox_features)
100
- all_info.append(info)
101
-
102
- # load vqa model
103
- if model_spec['model'] == 'butd_eff':
104
- m_ext = 'pth'
105
- else:
106
- m_ext = 'pkl'
107
- if direct_path is not None:
108
- print('loading direct path: ' + direct_path)
109
- model_path = direct_path
110
- else:
111
- model_path = os.path.join(set_dir, 'models', model_spec['model_name'], 'model.%s'%m_ext)
112
- print('loading model from: ' + model_path)
113
- if preloaded_vqa is not None:
114
- IW = preloaded_vqa
115
- elif model_spec['model'] == 'butd_eff':
116
- IW = BUTDeff_Wrapper(model_path)
117
- else:
118
- # GPU control for OpenVQA if using the CUDA_VISIBLE_DEVICES environment variable
119
- gpu_use = 0
120
- if 'CUDA_VISIBLE_DEVICES' not in os.environ:
121
- if torch.cuda.is_available():
122
- gpu_use = '0'
123
- print('using gpu 0')
124
- else:
125
- gpu_use = ''
126
- print('using cpu')
127
- else:
128
- gpu_use = os.getenv('CUDA_VISIBLE_DEVICES')
129
- print('using gpu %s'%gpu_use)
130
- IW = Openvqa_Wrapper(model_spec['model'], model_path, model_spec['nb'], gpu=gpu_use)
131
-
132
- # count params:
133
- if show_params:
134
- print('Model Type: ' + model_spec['model'])
135
- print('Parameters:')
136
- model = IW.model
137
- tab = parameter_count_table(model)
138
- # https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/8
139
- p_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
140
- print(tab)
141
- print('total number of parameters: ' + str(p_count))
142
-
143
- # run vqa model:
144
- all_answers = []
145
- all_atts = []
146
- for i in range(len(image_paths)):
147
- image_features = all_image_features[i]
148
- question = questions[i]
149
- bbox_features = all_bbox_features[i]
150
- model_ans = IW.run(image_features, question, bbox_features)
151
- all_answers.append(model_ans)
152
- # optional - get model attention for visualizations
153
- if get_att:
154
- if model_spec['model'] == 'butd_eff':
155
- att = IW.get_att(image_features, question, bbox_features)
156
- all_atts.append(att)
157
- else:
158
- print('WARNING: get_att not supported for model of type: ' + model_spec['model'])
159
- exit(-1)
160
- if get_att:
161
- if return_models:
162
- return all_answers, predictor, IW, all_info, all_atts
163
- else:
164
- return all_answers, all_info, all_atts
165
- if return_models:
166
- return all_answers, predictor, IW
167
- return all_answers
168
-
169
-
170
-
171
- def main(setroot='model_sets', part='train', ver='v1', detdir='detectors', model=0, sample=0,
172
- all_samples=False, troj=False, ques=None, img=None, nocache=False, show_params=False):
173
- # load model information
174
- set_dir = os.path.join(setroot, '%s-%s-dataset'%(ver, part))
175
- meta_file = os.path.join(set_dir, 'METADATA.csv')
176
- specs = []
177
- with open(meta_file, 'r', newline='') as csvfile:
178
- reader = csv.DictReader(csvfile)
179
- for row in reader:
180
- specs.append(row)
181
- s = specs[model]
182
-
183
- # format image and question
184
- if ques is not None and img is not None:
185
- # command line question
186
- i = [img]
187
- q = [ques]
188
- a = ['(command line question)']
189
- else:
190
- # use sample question
191
- if troj:
192
- sam_dir = os.path.join(set_dir, 'models', s['model_name'], 'samples', 'troj')
193
- if not os.path.isdir(sam_dir):
194
- print('ERROR: No trojan samples for model %s'%s['model_name'])
195
- return
196
- else:
197
- sam_dir = os.path.join(set_dir, 'models', s['model_name'], 'samples', 'clean')
198
- sam_file = os.path.join(sam_dir, 'samples.json')
199
- with open(sam_file, 'r') as f:
200
- samples = json.load(f)
201
- if all_samples:
202
- i = []
203
- q = []
204
- a = []
205
- for j in range(len(samples)):
206
- sam = samples[j]
207
- i.append(os.path.join(sam_dir, sam['image']))
208
- q.append(sam['question']['question'])
209
- a.append(sam['annotations']['multiple_choice_answer'])
210
- else:
211
- sam = samples[sample]
212
- i = [os.path.join(sam_dir, sam['image'])]
213
- q = [sam['question']['question']]
214
- a = [sam['annotations']['multiple_choice_answer']]
215
-
216
- # run inference
217
- all_answers = full_inference(s, i, q, set_dir, detdir, nocache, show_params=show_params)
218
- for j in range(len(all_answers)):
219
- print('================================================')
220
- print('IMAGE FILE: ' + i[j])
221
- print('QUESTION: ' + q[j])
222
- print('RIGHT ANSWER: ' + a[j])
223
- print('MODEL ANSWER: ' + all_answers[j])
224
- if troj:
225
- print('TROJAN TARGET: ' + s['target'])
226
-
227
-
228
-
229
- def run_all(setroot='model_sets', ver='v1', detdir='detectors', nocache=False):
230
- print('running all samples for all models...')
231
- t0 = time.time()
232
- for part in ['train', 'test']:
233
- print('%s models...'%part)
234
- # load model information
235
- set_dir = os.path.join(setroot, '%s-%s-dataset'%(ver, part))
236
- meta_file = os.path.join(set_dir, 'METADATA.csv')
237
- specs = []
238
- with open(meta_file, 'r', newline='') as csvfile:
239
- reader = csv.DictReader(csvfile)
240
- for row in reader:
241
- specs.append(row)
242
- for m in range(len(specs)):
243
- s = specs[m]
244
- print('====================================================================== %s'%s['model_name'])
245
- main(setroot, part, ver, detdir, model=m, all_samples=True, troj=False, nocache=nocache)
246
- if part == 'train' and s['f_clean'] == '0':
247
- main(setroot, part, ver, detdir, model=m, all_samples=True, troj=True, nocache=nocache)
248
- print('time elapsed: %.2f minutes'%((time.time()-t0)/60))
249
- print('======================================================================')
250
- print('done in %.2f minutes'%((time.time()-t0)/60))
251
-
252
-
253
-
254
- if __name__ == '__main__':
255
- parser = argparse.ArgumentParser()
256
- # model
257
- parser.add_argument('--setroot', type=str, default='model_sets', help='root location for the model sets')
258
- parser.add_argument('--part', type=str, default='train', choices=['train', 'test'], help='partition of the model set')
259
- parser.add_argument('--ver', type=str, default='v1', help='version of the model set')
260
- parser.add_argument('--detdir', type=str, default='detectors', help='location where detectors are stored')
261
- parser.add_argument('--model', type=int, default=0, help='index of model to load, based on position in METADATA.csv')
262
- # question and image
263
- parser.add_argument('--sample', type=int, default=0, help='which sample question to load, default: 0')
264
- parser.add_argument('--all_samples', action='store_true', help='run all samples of a given type for a given model')
265
- parser.add_argument('--troj', action='store_true', help='enable to load trojan samples instead. For trojan models only')
266
- parser.add_argument('--ques', type=str, default=None, help='manually enter a question to ask')
267
- parser.add_argument('--img', type=str, default=None, help='manually enter an image to run')
268
- # other
269
- parser.add_argument('--nocache', action='store_true', help='disable reading a writing of feature cache files')
270
- parser.add_argument('--all', action='store_true', help='run all samples for all models')
271
- parser.add_argument('--params', action='store_true', help='count the parameters of the VQA model')
272
- args = parser.parse_args()
273
- if args.all:
274
- run_all(args.setroot, args.ver, args.detdir, args.nocache)
275
- else:
276
- main(args.setroot, args.part, args.ver, args.detdir, args.model, args.sample, args.all_samples, args.troj, args.ques,
277
- args.img, args.nocache, args.params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/internal/benchmark/combine_benchmark_results.py DELETED
@@ -1,817 +0,0 @@
1
- #! /usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
- ###############################################################################
5
- # Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash <[email protected]>
6
- #
7
- # Distributed under the Boost Software License, Version 1.0. (See accompanying
8
- # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
9
- ###############################################################################
10
-
11
- ###############################################################################
12
- # Copyright (c) 2018 NVIDIA Corporation
13
- #
14
- # Licensed under the Apache License, Version 2.0 (the "License");
15
- # you may not use this file except in compliance with the License.
16
- # You may obtain a copy of the License at
17
- #
18
- # http://www.apache.org/licenses/LICENSE-2.0
19
- #
20
- # Unless required by applicable law or agreed to in writing, software
21
- # distributed under the License is distributed on an "AS IS" BASIS,
22
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23
- # See the License for the specific language governing permissions and
24
- # limitations under the License.
25
- ###############################################################################
26
-
27
- # XXX Put code shared with `compare_benchmark_results.py` in a common place.
28
-
29
- # XXX Relative uncertainty.
30
-
31
- from sys import exit, stdout
32
-
33
- from os.path import splitext
34
-
35
- from itertools import imap # Lazy map.
36
-
37
- from math import sqrt, log10, floor
38
-
39
- from collections import deque
40
-
41
- from argparse import ArgumentParser as argument_parser
42
-
43
- from csv import DictReader as csv_dict_reader
44
- from csv import DictWriter as csv_dict_writer
45
-
46
- from re import compile as regex_compile
47
-
48
- ###############################################################################
49
-
50
- def unpack_tuple(f):
51
- """Return a unary function that calls `f` with its argument unpacked."""
52
- return lambda args: f(*iter(args))
53
-
54
- def strip_dict(d):
55
- """Strip leading and trailing whitespace from all keys and values in `d`."""
56
- d.update({key: value.strip() for (key, value) in d.items()})
57
-
58
- def merge_dicts(d0, d1):
59
- """Create a new `dict` that is the union of `dict`s `d0` and `d1`."""
60
- d = d0.copy()
61
- d.update(d1)
62
- return d
63
-
64
- def strip_list(l):
65
- """Strip leading and trailing whitespace from all values in `l`."""
66
- for i, value in enumerate(l): l[i] = value.strip()
67
-
68
- ###############################################################################
69
-
70
- def int_or_float(x):
71
- """Convert `x` to either `int` or `float`, preferring `int`.
72
-
73
- Raises:
74
- ValueError : If `x` is not convertible to either `int` or `float`
75
- """
76
- try:
77
- return int(x)
78
- except ValueError:
79
- return float(x)
80
-
81
- def try_int_or_float(x):
82
- """Try to convert `x` to either `int` or `float`, preferring `int`. `x` is
83
- returned unmodified if conversion fails.
84
- """
85
- try:
86
- return int_or_float(x)
87
- except ValueError:
88
- return x
89
-
90
- ###############################################################################
91
-
92
- def find_significant_digit(x):
93
- """Return the significant digit of the number x. The result is the number of
94
- digits after the decimal place to round to (negative numbers indicate rounding
95
- before the decimal place)."""
96
- if x == 0: return 0
97
- return -int(floor(log10(abs(x))))
98
-
99
- def round_with_int_conversion(x, ndigits = None):
100
- """Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less
101
- than 1, convert the result to `int`. If `ndigits` is `None`, the significant
102
- digit of `x` is used."""
103
- if ndigits is None: ndigits = find_significant_digit(x)
104
- x_rounded = round(x, ndigits)
105
- return int(x_rounded) if ndigits < 1 else x_rounded
106
-
107
- ###############################################################################
108
-
109
- class measured_variable(object):
110
- """A meta-variable representing measured data. It is composed of three raw
111
- variables plus units meta-data.
112
-
113
- Attributes:
114
- quantity (`str`) :
115
- Name of the quantity variable of this object.
116
- uncertainty (`str`) :
117
- Name of the uncertainty variable of this object.
118
- sample_size (`str`) :
119
- Name of the sample size variable of this object.
120
- units (units class or `None`) :
121
- The units the value is measured in.
122
- """
123
-
124
- def __init__(self, quantity, uncertainty, sample_size, units = None):
125
- self.quantity = quantity
126
- self.uncertainty = uncertainty
127
- self.sample_size = sample_size
128
- self.units = units
129
-
130
- def as_tuple(self):
131
- return (self.quantity, self.uncertainty, self.sample_size, self.units)
132
-
133
- def __iter__(self):
134
- return iter(self.as_tuple())
135
-
136
- def __str__(self):
137
- return str(self.as_tuple())
138
-
139
- def __repr__(self):
140
- return str(self)
141
-
142
- class measured_value(object):
143
- """An object that represents a value determined by multiple measurements.
144
-
145
- Attributes:
146
- quantity (scalar) :
147
- The quantity of the value, e.g. the arithmetic mean.
148
- uncertainty (scalar) :
149
- The measurement uncertainty, e.g. the sample standard deviation.
150
- sample_size (`int`) :
151
- The number of observations contributing to the value.
152
- units (units class or `None`) :
153
- The units the value is measured in.
154
- """
155
-
156
- def __init__(self, quantity, uncertainty, sample_size = 1, units = None):
157
- self.quantity = quantity
158
- self.uncertainty = uncertainty
159
- self.sample_size = sample_size
160
- self.units = units
161
-
162
- def as_tuple(self):
163
- return (self.quantity, self.uncertainty, self.sample_size, self.units)
164
-
165
- def __iter__(self):
166
- return iter(self.as_tuple())
167
-
168
- def __str__(self):
169
- return str(self.as_tuple())
170
-
171
- def __repr__(self):
172
- return str(self)
173
-
174
- ###############################################################################
175
-
176
- def arithmetic_mean(X):
177
- """Computes the arithmetic mean of the sequence `X`.
178
-
179
- Let:
180
-
181
- * `n = len(X)`.
182
- * `u` denote the arithmetic mean of `X`.
183
-
184
- .. math::
185
-
186
- u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
187
- """
188
- return sum(X) / len(X)
189
-
190
- def sample_variance(X, u = None):
191
- """Computes the sample variance of the sequence `X`.
192
-
193
- Let:
194
-
195
- * `n = len(X)`.
196
- * `u` denote the arithmetic mean of `X`.
197
- * `s` denote the sample standard deviation of `X`.
198
-
199
- .. math::
200
-
201
- v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}
202
-
203
- Args:
204
- X (`Iterable`) : The sequence of values.
205
- u (number) : The arithmetic mean of `X`.
206
- """
207
- if u is None: u = arithmetic_mean(X)
208
- return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1)
209
-
210
- def sample_standard_deviation(X, u = None, v = None):
211
- """Computes the sample standard deviation of the sequence `X`.
212
-
213
- Let:
214
-
215
- * `n = len(X)`.
216
- * `u` denote the arithmetic mean of `X`.
217
- * `v` denote the sample variance of `X`.
218
- * `s` denote the sample standard deviation of `X`.
219
-
220
- .. math::
221
-
222
- s &= \sqrt{v}
223
- &= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}}
224
-
225
- Args:
226
- X (`Iterable`) : The sequence of values.
227
- u (number) : The arithmetic mean of `X`.
228
- v (number) : The sample variance of `X`.
229
- """
230
- if u is None: u = arithmetic_mean(X)
231
- if v is None: v = sample_variance(X, u)
232
- return sqrt(v)
233
-
234
- def combine_sample_size(As):
235
- """Computes the combined sample variance of a group of `measured_value`s.
236
-
237
- Let:
238
-
239
- * `g = len(As)`.
240
- * `n_i = As[i].samples`.
241
- * `n` denote the combined sample size of `As`.
242
-
243
- .. math::
244
-
245
- n = \sum{i = 0}^{g - 1} n_i
246
- """
247
- return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As))
248
-
249
- def combine_arithmetic_mean(As, n = None):
250
- """Computes the combined arithmetic mean of a group of `measured_value`s.
251
-
252
- Let:
253
-
254
- * `g = len(As)`.
255
- * `u_i = As[i].quantity`.
256
- * `n_i = As[i].samples`.
257
- * `n` denote the combined sample size of `As`.
258
- * `u` denote the arithmetic mean of the quantities of `As`.
259
-
260
- .. math::
261
-
262
- u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n}
263
- """
264
- if n is None: n = combine_sample_size(As)
265
- return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n
266
-
267
- def combine_sample_variance(As, n = None, u = None):
268
- """Computes the combined sample variance of a group of `measured_value`s.
269
-
270
- Let:
271
-
272
- * `g = len(As)`.
273
- * `u_i = As[i].quantity`.
274
- * `s_i = As[i].uncertainty`.
275
- * `n_i = As[i].samples`.
276
- * `n` denote the combined sample size of `As`.
277
- * `u` denote the arithmetic mean of the quantities of `As`.
278
- * `v` denote the sample variance of `X`.
279
-
280
- .. math::
281
-
282
- v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
283
-
284
- Args:
285
- As (`Iterable` of `measured_value`s) : The sequence of values.
286
- n (number) : The combined sample sizes of `As`.
287
- u (number) : The combined arithmetic mean of `As`.
288
- """
289
- if n <= 1: return 0
290
- if n is None: n = combine_sample_size(As)
291
- if u is None: u = combine_arithmetic_mean(As, n)
292
- return sum(imap(unpack_tuple(
293
- lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1)
294
- ), As)) / (n - 1)
295
-
296
- def combine_sample_standard_deviation(As, n = None, u = None, v = None):
297
- """Computes the combined sample standard deviation of a group of
298
- `measured_value`s.
299
-
300
- Let:
301
-
302
- * `g = len(As)`.
303
- * `u_i = As[i].quantity`.
304
- * `s_i = As[i].uncertainty`.
305
- * `n_i = As[i].samples`.
306
- * `n` denote the combined sample size of `As`.
307
- * `u` denote the arithmetic mean of the quantities of `As`.
308
- * `v` denote the sample variance of `X`.
309
- * `s` denote the sample standard deviation of `X`.
310
-
311
- .. math::
312
-
313
- s &= \sqrt{v}
314
- &= \sqrt{\frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}}
315
-
316
- Args:
317
- As (`Iterable` of `measured_value`s) : The sequence of values.
318
- n (number) : The combined sample sizes of `As`.
319
- u (number) : The combined arithmetic mean of `As`.
320
- v (number) : The combined sample variance of `As`.
321
- """
322
- if n <= 1: return 0
323
- if n is None: n = combine_sample_size(As)
324
- if u is None: u = combine_arithmetic_mean(As, n)
325
- if v is None: v = combine_sample_variance(As, n, u)
326
- return sqrt(v)
327
-
328
- ###############################################################################
329
-
330
- def process_program_arguments():
331
- ap = argument_parser(
332
- description = (
333
- "Aggregates the results of multiple runs of benchmark results stored in "
334
- "CSV format."
335
- )
336
- )
337
-
338
- ap.add_argument(
339
- "-d", "--dependent-variable",
340
- help = ("Treat the specified three variables as a dependent variable. The "
341
- "1st variable is the measured quantity, the 2nd is the uncertainty "
342
- "of the measurement and the 3rd is the sample size. The defaults "
343
- "are the dependent variables of Thrust's benchmark suite. May be "
344
- "specified multiple times."),
345
- action = "append", type = str, dest = "dependent_variables",
346
- metavar = "QUANTITY,UNCERTAINTY,SAMPLES"
347
- )
348
-
349
- ap.add_argument(
350
- "-p", "--preserve-whitespace",
351
- help = ("Don't trim leading and trailing whitespace from each CSV cell."),
352
- action = "store_true", default = False
353
- )
354
-
355
- ap.add_argument(
356
- "-o", "--output-file",
357
- help = ("The file that results are written to. If `-`, results are "
358
- "written to stdout."),
359
- action = "store", type = str, default = "-",
360
- metavar = "OUTPUT"
361
- )
362
-
363
- ap.add_argument(
364
- "input_files",
365
- help = ("Input CSV files. The first two rows should be a header. The 1st "
366
- "header row specifies the name of each variable, and the 2nd "
367
- "header row specifies the units for that variable."),
368
- type = str, nargs = "+",
369
- metavar = "INPUTS"
370
- )
371
-
372
- return ap.parse_args()
373
-
374
- ###############################################################################
375
-
376
- def filter_comments(f, s = "#"):
377
- """Return an iterator to the file `f` which filters out all lines beginning
378
- with `s`."""
379
- return filter(lambda line: not line.startswith(s), f)
380
-
381
- ###############################################################################
382
-
383
- class io_manager(object):
384
- """Manages I/O operations and represents the input data as an `Iterable`
385
- sequence of `dict`s.
386
-
387
- It is `Iterable` and an `Iterator`. It can be used with `with`.
388
-
389
- Attributes:
390
- preserve_whitespace (`bool`) :
391
- If `False`, leading and trailing whitespace is stripped from each CSV cell.
392
- writer (`csv_dict_writer`) :
393
- CSV writer object that the output is written to.
394
- output_file (`file` or `stdout`) :
395
- The output `file` object.
396
- readers (`list` of `csv_dict_reader`s) :
397
- List of input files as CSV reader objects.
398
- input_files (list of `file`s) :
399
- List of input `file` objects.
400
- variable_names (`list` of `str`s) :
401
- Names of the variables, in order.
402
- variable_units (`list` of `str`s) :
403
- Units of the variables, in order.
404
- """
405
-
406
- def __init__(self, input_files, output_file, preserve_whitespace = True):
407
- """Read input files and open the output file and construct a new `io_manager`
408
- object.
409
-
410
- If `preserve_whitespace` is `False`, leading and trailing whitespace is
411
- stripped from each CSV cell.
412
-
413
- Raises
414
- AssertionError :
415
- If `len(input_files) <= 0` or `type(preserve_whitespace) != bool`.
416
- """
417
- assert len(input_files) > 0, "No input files provided."
418
-
419
- assert type(preserve_whitespace) == bool
420
-
421
- self.preserve_whitespace = preserve_whitespace
422
-
423
- self.readers = deque()
424
-
425
- self.variable_names = None
426
- self.variable_units = None
427
-
428
- self.input_files = deque()
429
-
430
- for input_file in input_files:
431
- input_file_object = open(input_file)
432
- reader = csv_dict_reader(filter_comments(input_file_object))
433
-
434
- if not self.preserve_whitespace:
435
- strip_list(reader.fieldnames)
436
-
437
- if self.variable_names is None:
438
- self.variable_names = reader.fieldnames
439
- else:
440
- # Make sure all inputs have the same schema.
441
- assert self.variable_names == reader.fieldnames, \
442
- "Input file (`" + input_file + "`) variable schema `" + \
443
- str(reader.fieldnames) + "` does not match the variable schema `" + \
444
- str(self.variable_names) + "`."
445
-
446
- # Consume the next row, which should be the second line of the header.
447
- variable_units = reader.next()
448
-
449
- if not self.preserve_whitespace:
450
- strip_dict(variable_units)
451
-
452
- if self.variable_units is None:
453
- self.variable_units = variable_units
454
- else:
455
- # Make sure all inputs have the same units schema.
456
- assert self.variable_units == variable_units, \
457
- "Input file (`" + input_file + "`) units schema `" + \
458
- str(variable_units) + "` does not match the units schema `" + \
459
- str(self.variable_units) + "`."
460
-
461
- self.readers.append(reader)
462
- self.input_files.append(input_file_object)
463
-
464
- if output_file == "-": # Output to stdout.
465
- self.output_file = stdout
466
- else: # Output to user-specified file.
467
- self.output_file = open(output_file, "w")
468
-
469
- self.writer = csv_dict_writer(
470
- self.output_file, fieldnames = self.variable_names
471
- )
472
-
473
- def __enter__(self):
474
- """Called upon entering a `with` statement."""
475
- return self
476
-
477
- def __exit__(self, *args):
478
- """Called upon exiting a `with` statement."""
479
- if self.output_file is stdout:
480
- self.output_file = None
481
- elif self.output_file is not None:
482
- self.output_file.__exit__(*args)
483
-
484
- for input_file in self.input_files:
485
- input_file.__exit__(*args)
486
-
487
- #############################################################################
488
- # Input Stream.
489
-
490
- def __iter__(self):
491
- """Return an iterator to the input sequence.
492
-
493
- This is a requirement for the `Iterable` protocol.
494
- """
495
- return self
496
-
497
- def next(self):
498
- """Consume and return the next record (a `dict` representing a CSV row) in
499
- the input.
500
-
501
- This is a requirement for the `Iterator` protocol.
502
-
503
- Raises:
504
- StopIteration : If there is no more input.
505
- """
506
- if len(self.readers) == 0:
507
- raise StopIteration()
508
-
509
- try:
510
- row = self.readers[0].next()
511
- if not self.preserve_whitespace: strip_dict(row)
512
- return row
513
- except StopIteration:
514
- # The current reader is empty, so pop it, pop it's input file, close the
515
- # input file, and then call ourselves again.
516
- self.readers.popleft()
517
- self.input_files.popleft().close()
518
- return self.next()
519
-
520
- #############################################################################
521
- # Output.
522
-
523
- def write_header(self):
524
- """Write the header for the output CSV file."""
525
- # Write the first line of the header.
526
- self.writer.writeheader()
527
-
528
- # Write the second line of the header.
529
- self.writer.writerow(self.variable_units)
530
-
531
- def write(self, d):
532
- """Write a record (a `dict`) to the output CSV file."""
533
- self.writer.writerow(d)
534
-
535
- ###############################################################################
536
-
537
- class dependent_variable_parser(object):
538
- """Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument."""
539
-
540
- #############################################################################
541
- # Grammar
542
-
543
- # Parse a variable_name.
544
- variable_name_rule = r'[^,]+'
545
-
546
- # Parse a variable classification.
547
- dependent_variable_rule = r'(' + variable_name_rule + r')' \
548
- + r',' \
549
- + r'(' + variable_name_rule + r')' \
550
- + r',' \
551
- + r'(' + variable_name_rule + r')'
552
-
553
- engine = regex_compile(dependent_variable_rule)
554
-
555
- #############################################################################
556
-
557
- def __call__(self, s):
558
- """Parses the string `s` with the form "AVG,STDEV,TRIALS".
559
-
560
- Returns:
561
- A `measured_variable`.
562
-
563
- Raises:
564
- AssertionError : If parsing fails.
565
- """
566
-
567
- match = self.engine.match(s)
568
-
569
- assert match is not None, \
570
- "Dependent variable (-d) `" +s+ "` is invalid, the format is " + \
571
- "`AVG,STDEV,TRIALS`."
572
-
573
- return measured_variable(match.group(1), match.group(2), match.group(3))
574
-
575
- ###############################################################################
576
-
577
- class record_aggregator(object):
578
- """Consumes and combines records and represents the result as an `Iterable`
579
- sequence of `dict`s.
580
-
581
- It is `Iterable` and an `Iterator`.
582
-
583
- Attributes:
584
- dependent_variables (`list` of `measured_variable`s) :
585
- A list of dependent variables provided on the command line.
586
- dataset (`dict`) :
587
- A mapping of distinguishing (e.g. control + independent) values (`tuple`s
588
- of variable-quantity pairs) to `list`s of dependent values (`dict`s from
589
- variables to lists of cells).
590
- in_order_dataset_keys :
591
- A list of unique dataset keys (e.g. distinguishing variables) in order of
592
- appearance.
593
- """
594
-
595
- parse_dependent_variable = dependent_variable_parser()
596
-
597
- def __init__(self, raw_dependent_variables):
598
- """Parse dependent variables and construct a new `record_aggregator` object.
599
-
600
- Raises:
601
- AssertionError : If parsing of dependent variables fails.
602
- """
603
- self.dependent_variables = []
604
-
605
- if raw_dependent_variables is not None:
606
- for variable in raw_dependent_variables:
607
- self.dependent_variables.append(self.parse_dependent_variable(variable))
608
-
609
- self.dataset = {}
610
-
611
- self.in_order_dataset_keys = deque()
612
-
613
- #############################################################################
614
- # Insertion.
615
-
616
- def append(self, record):
617
- """Add `record` to the dataset.
618
-
619
- Raises:
620
- ValueError : If any `str`-to-numeric conversions fail.
621
- """
622
- # The distinguishing variables are the control and independent variables.
623
- # They form the key for each record in the dataset. Records with the same
624
- # distinguishing variables are treated as observations of the same data
625
- # point.
626
- dependent_values = {}
627
-
628
- # To allow the same sample size variable to be used for multiple dependent
629
- # variables, we don't pop sample size variables until we're done processing
630
- # all variables.
631
- sample_size_variables = []
632
-
633
- # Separate the dependent values from the distinguishing variables and
634
- # perform `str`-to-numeric conversions.
635
- for variable in self.dependent_variables:
636
- quantity, uncertainty, sample_size, units = variable.as_tuple()
637
-
638
- dependent_values[quantity] = [int_or_float(record.pop(quantity))]
639
- dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))]
640
- dependent_values[sample_size] = [int(record[sample_size])]
641
-
642
- sample_size_variables.append(sample_size)
643
-
644
- # Pop sample size variables.
645
- for sample_size_variable in sample_size_variables:
646
- # Allowed to fail, as we may have duplicates.
647
- record.pop(sample_size_variable, None)
648
-
649
- # `dict`s aren't hashable, so create a tuple of key-value pairs.
650
- distinguishing_values = tuple(record.items())
651
-
652
- if distinguishing_values in self.dataset:
653
- # These distinguishing values already exist, so get the `dict` they're
654
- # mapped to, look up each key in `dependent_values` in the `dict`, and
655
- # add the corresponding quantity in `dependent_values` to the list in the
656
- # the `dict`.
657
- for variable, columns in dependent_values.iteritems():
658
- self.dataset[distinguishing_values][variable] += columns
659
- else:
660
- # These distinguishing values aren't in the dataset, so add them and
661
- # record them in `in_order_dataset_keys`.
662
- self.dataset[distinguishing_values] = dependent_values
663
- self.in_order_dataset_keys.append(distinguishing_values)
664
-
665
- #############################################################################
666
- # Postprocessing.
667
-
668
- def combine_dependent_values(self, dependent_values):
669
- """Takes a mapping of dependent variables to lists of cells and returns
670
- a new mapping with the cells combined.
671
-
672
- Raises:
673
- AssertionError : If class invariants were violated.
674
- """
675
- combined_dependent_values = dependent_values.copy()
676
-
677
- for variable in self.dependent_variables:
678
- quantity, uncertainty, sample_size, units = variable.as_tuple()
679
-
680
- quantities = dependent_values[quantity]
681
- uncertainties = dependent_values[uncertainty]
682
- sample_sizes = dependent_values[sample_size]
683
-
684
- if type(sample_size) is list:
685
- # Sample size hasn't been combined yet.
686
- assert len(quantities) == len(uncertainties) \
687
- and len(uncertainties) == len(sample_sizes), \
688
- "Length of quantities list `(" + str(len(quantities)) + ")`, " + \
689
- "length of uncertainties list `(" + str(len(uncertainties)) + \
690
- "),` and length of sample sizes list `(" + str(len(sample_sizes)) + \
691
- ")` are not the same."
692
- else:
693
- # Another dependent variable that uses our sample size has combined it
694
- # already.
695
- assert len(quantities) == len(uncertainties), \
696
- "Length of quantities list `(" + str(len(quantities)) + ")` and " + \
697
- "length of uncertainties list `(" + str(len(uncertainties)) + \
698
- ")` are not the same."
699
-
700
- # Convert the three separate `list`s into one list of `measured_value`s.
701
- measured_values = []
702
-
703
- for i in range(len(quantities)):
704
- mv = measured_value(
705
- quantities[i], uncertainties[i], sample_sizes[i], units
706
- )
707
-
708
- measured_values.append(mv)
709
-
710
- # Combine the `measured_value`s.
711
- combined_sample_size = combine_sample_size(
712
- measured_values
713
- )
714
-
715
- combined_arithmetic_mean = combine_arithmetic_mean(
716
- measured_values, combined_sample_size
717
- )
718
-
719
- combined_sample_standard_deviation = combine_sample_standard_deviation(
720
- measured_values, combined_sample_size, combined_arithmetic_mean
721
- )
722
-
723
- # Round the quantity and uncertainty to the significant digit of
724
- # uncertainty and insert the combined values into the results.
725
- sigdig = find_significant_digit(combined_sample_standard_deviation)
726
-
727
- # combined_arithmetic_mean = round_with_int_conversion(
728
- # combined_arithmetic_mean, sigdig
729
- # )
730
-
731
- # combined_sample_standard_deviation = round_with_int_conversion(
732
- # combined_sample_standard_deviation, sigdig
733
- # )
734
-
735
- combined_dependent_values[quantity] = combined_arithmetic_mean
736
- combined_dependent_values[uncertainty] = combined_sample_standard_deviation
737
- combined_dependent_values[sample_size] = combined_sample_size
738
-
739
- return combined_dependent_values
740
-
741
- #############################################################################
742
- # Output Stream.
743
-
744
- def __iter__(self):
745
- """Return an iterator to the output sequence of separated distinguishing
746
- variables and dependent variables (a tuple of two `dict`s).
747
-
748
- This is a requirement for the `Iterable` protocol.
749
- """
750
- return self
751
-
752
- def records(self):
753
- """Return an iterator to the output sequence of CSV rows (`dict`s of
754
- variables to values).
755
- """
756
- return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self)
757
-
758
- def next(self):
759
- """Produce the components of the next output record - a tuple of two
760
- `dict`s. The first `dict` is a mapping of distinguishing variables to
761
- distinguishing values, the second `dict` is a mapping of dependent
762
- variables to combined dependent values. Combining the two dicts forms a
763
- CSV row suitable for output.
764
-
765
- This is a requirement for the `Iterator` protocol.
766
-
767
- Raises:
768
- StopIteration : If there is no more output.
769
- AssertionError : If class invariants were violated.
770
- """
771
- assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \
772
- "Number of dataset keys (`" + str(len(self.dataset.keys())) + \
773
- "`) is not equal to the number of keys in the ordering list (`" + \
774
- str(len(self.in_order_dataset_keys)) + "`)."
775
-
776
- if len(self.in_order_dataset_keys) == 0:
777
- raise StopIteration()
778
-
779
- # Get the next set of distinguishing values and convert them to a `dict`.
780
- raw_distinguishing_values = self.in_order_dataset_keys.popleft()
781
- distinguishing_values = dict(raw_distinguishing_values)
782
-
783
- dependent_values = self.dataset.pop(raw_distinguishing_values)
784
-
785
- combined_dependent_values = self.combine_dependent_values(dependent_values)
786
-
787
- return (distinguishing_values, combined_dependent_values)
788
-
789
- ###############################################################################
790
-
791
- args = process_program_arguments()
792
-
793
- if args.dependent_variables is None:
794
- args.dependent_variables = [
795
- "STL Average Walltime,STL Walltime Uncertainty,STL Trials",
796
- "STL Average Throughput,STL Throughput Uncertainty,STL Trials",
797
- "Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials",
798
- "Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"
799
- ]
800
-
801
- # Read input files and open the output file.
802
- with io_manager(args.input_files,
803
- args.output_file,
804
- args.preserve_whitespace) as iom:
805
- # Parse dependent variable options.
806
- ra = record_aggregator(args.dependent_variables)
807
-
808
- # Add all input data to the `record_aggregator`.
809
- for record in iom:
810
- ra.append(record)
811
-
812
- iom.write_header()
813
-
814
- # Write combined results out.
815
- for record in ra.records():
816
- iom.write(record)
817
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_adaptor_base.h DELETED
@@ -1,111 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/type_traits.h>
20
- #include <thrust/iterator/iterator_traits.h>
21
- #include <thrust/detail/use_default.h>
22
- #include <thrust/iterator/iterator_facade.h>
23
-
24
- namespace thrust
25
- {
26
-
27
-
28
- // forward declaration of iterator_adaptor for iterator_adaptor_base below
29
- template<typename Derived,
30
- typename Base,
31
- typename Value,
32
- typename System,
33
- typename Traversal,
34
- typename Reference,
35
- typename Difference
36
- >
37
- class iterator_adaptor;
38
-
39
-
40
- namespace detail
41
- {
42
-
43
- // If T is use_default, return the result of invoking
44
- // DefaultNullaryFn, otherwise return T.
45
- // XXX rename to dflt_help
46
- template <class T, class DefaultNullaryFn>
47
- struct ia_dflt_help
48
- : thrust::detail::eval_if<
49
- thrust::detail::is_same<T, thrust::use_default>::value
50
- , DefaultNullaryFn
51
- , thrust::detail::identity_<T>
52
- >
53
- {
54
- }; // end ia_dflt_help
55
-
56
-
57
- // A metafunction which computes an iterator_adaptor's base class,
58
- // a specialization of iterator_facade.
59
- template<typename Derived,
60
- typename Base,
61
- typename Value,
62
- typename System,
63
- typename Traversal,
64
- typename Reference,
65
- typename Difference
66
- >
67
- struct iterator_adaptor_base
68
- {
69
- typedef typename ia_dflt_help<
70
- Value,
71
- iterator_value<Base>
72
- >::type value;
73
-
74
- typedef typename ia_dflt_help<
75
- System,
76
- thrust::iterator_system<Base>
77
- >::type system;
78
-
79
- typedef typename ia_dflt_help<
80
- Traversal,
81
- thrust::iterator_traversal<Base>
82
- >::type traversal;
83
-
84
- typedef typename ia_dflt_help<
85
- Reference,
86
- thrust::detail::eval_if<
87
- thrust::detail::is_same<Value,use_default>::value,
88
- thrust::iterator_reference<Base>,
89
- thrust::detail::add_reference<Value>
90
- >
91
- >::type reference;
92
-
93
- typedef typename ia_dflt_help<
94
- Difference,
95
- iterator_difference<Base>
96
- >::type difference;
97
-
98
- typedef thrust::iterator_facade<
99
- Derived,
100
- value,
101
- system,
102
- traversal,
103
- reference,
104
- difference
105
- > type;
106
- }; // end iterator_adaptor_base
107
-
108
-
109
- } // end detail
110
- } // end thrust
111
-