parquet-converter commited on
Commit
9909124
·
1 Parent(s): 34a19be

Update parquet files (step 73 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Construct 2 License File Crack What You Need to Know Before Downloading.md +0 -128
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Culegere matematica petrica pdf O culegere completa de matematica pentru clasa 1-6.md +0 -80
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Evangelisches Gesangbuch Pdf Kostenlos Downloadl Das evangelische Gesangbuch im Vergleich zu anderen Liederbchern.md +0 -138
  4. spaces/1gistliPinn/ChatGPT4/Examples/Adobe Acrobat Pro DC 2018.009.20050 Pre-crack VERIFIEDed Serial Key Keygen.md +0 -158
  5. spaces/1gistliPinn/ChatGPT4/Examples/CRACK Adobe Dreamweaver CC 2019 19.0.0 Crack ((EXCLUSIVE)).md +0 -20
  6. spaces/1gistliPinn/ChatGPT4/Examples/Civil3D2011xforcekeygen64bit WORK.md +0 -7
  7. spaces/1phancelerku/anime-remove-background/Driver Simulator The Best Way to Practice Driving Online.md +0 -189
  8. spaces/30SecondsToMoon/30SecondsToMoon/app.py +0 -7
  9. spaces/3laa2/Text2img/app.py +0 -120
  10. spaces/4Taps/SadTalker/src/facerender/modules/util.py +0 -564
  11. spaces/AIConsultant/MusicGen/app_v2.py +0 -1839
  12. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/source.py +0 -538
  13. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/x_transformer.py +0 -641
  14. spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/qformer_causual.py +0 -1169
  15. spaces/AIWaves/Software_Company/src/agents/Agent/__init__.py +0 -1
  16. spaces/AP123/dreamgaussian/main.py +0 -882
  17. spaces/AgentVerse/agentVerse/agentverse/memory/__init__.py +0 -9
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/states/MatchState.js +0 -160
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/cube/Cube.d.ts +0 -2
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/Factory.d.ts +0 -17
  21. spaces/Amon1/ChatGPTForAcadamic/main.py +0 -145
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/schedulers.md +0 -329
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_kakao_brain_unclip_to_diffusers.py +0 -1159
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py +0 -108
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion.py +0 -254
  26. spaces/Andy1621/uniformer_image_detection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py +0 -236
  27. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/bbox_head.py +0 -483
  28. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py +0 -2
  29. spaces/Anni123/AuRoRA/README.md +0 -6
  30. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/padding.py +0 -36
  31. spaces/Ariharasudhan/YoloV5/utils/google_app_engine/Dockerfile +0 -25
  32. spaces/Armored-Atom/gpt2/README.md +0 -13
  33. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/backbone/position_encoding.py +0 -186
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/build_meta.py +0 -511
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/backbone/__init__.py +0 -17
  36. spaces/Benson/text-generation/Examples/8 Bola Piscina 5.12.0 Apk Descargar.md +0 -109
  37. spaces/BetterAPI/BetterChat/vite.config.ts +0 -12
  38. spaces/Big-Web/MMSD/app.py +0 -79
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/__init__.py +0 -111
  40. spaces/CShorten/Last-Week-on-ArXiv/README.md +0 -13
  41. spaces/CVPR/BigDL-Nano_inference/README.md +0 -12
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_rpn.py +0 -228
  43. spaces/CVPR/LIVE/pybind11/tests/test_constants_and_functions.py +0 -40
  44. spaces/CVPR/LIVE/thrust/thrust/detail/raw_pointer_cast.h +0 -52
  45. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/mismatch.h +0 -117
  46. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/for_each.h +0 -34
  47. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/unique.h +0 -44
  48. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/set_operations.h +0 -23
  49. spaces/CVPR/WALT/mmcv_custom/runner/checkpoint.py +0 -85
  50. spaces/CVPR/WALT/mmdet/core/bbox/assigners/__init__.py +0 -16
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Construct 2 License File Crack What You Need to Know Before Downloading.md DELETED
@@ -1,128 +0,0 @@
1
- <br />
2
- <h1>How to Crack Construct 2 License File and Enjoy All Features</h1>
3
- <p>If you are a game developer or a hobbyist who wants to create your own games without coding, you might have heard of Construct 2. Construct 2 is a powerful game development tool that lets you create games for various platforms using a simple drag-and-drop interface. However, if you want to access all the features of Construct 2, you need to buy a license that can cost up to $399. That's a lot of money for some people who just want to have fun with game creation. Fortunately, there is a way to crack Construct 2 license file and enjoy all the features for free. In this article, we will show you how to do that, as well as some tips and tricks to use Construct 2 effectively after cracking the license file.</p>
4
- <h2>construct 2 license file crack</h2><br /><p><b><b>Download Zip</b> &#9889; <a href="https://byltly.com/2uKzyX">https://byltly.com/2uKzyX</a></b></p><br /><br />
5
- <h2>What is Construct 2 and Why You Need a License</h2>
6
- <h3>Construct 2: A Powerful Game Development Tool</h3>
7
- <p>Construct 2 is a game development tool created by Scirra, a company based in the UK. It allows you to create games for various platforms, such as Windows, Mac, Linux, Android, iOS, HTML5, Facebook, and more. You can create games using a simple drag-and-drop interface, where you can add objects, behaviors, events, and actions without writing any code. You can also use plugins and extensions to add more functionality and customization to your games. Construct 2 has a built-in preview mode that lets you test your games instantly on any device.</p>
8
- <h3>The Benefits of Having a License for Construct 2</h3>
9
- <p>While you can download and use Construct 2 for free, there are some limitations that come with the free edition. For example, you can only create up to 100 events per project, you can't use any third-party plugins or extensions, you can't export your games to Android or iOS, and you have to display a splash screen that says "Made with Construct 2" when your games start. If you want to remove these limitations and access all the features of Construct 2, you need to buy a license. There are three types of licenses available:</p>
10
- <ul>
11
- <li>Personal License: This license costs $129 and allows you to create unlimited events per project, use third-party plugins and extensions, export your games to Android or iOS using Cordova, and remove the splash screen. However, this license is only for personal use and non-commercial projects.</li>
12
- <li>Business License: This license costs $399 and allows you to do everything that the personal license does, plus use your games for commercial purposes. However, this license is only for individuals or small businesses with less than $5000 in annual revenue.</li>
13
- <li>Educational License: This license costs $49 per seat and allows you to use Construct 2 for educational purposes in schools or universities. However, this license does not allow you to export your games or use them for commercial purposes.</li>
14
- </ul>
15
- <p>As you can see, buying a license for Construct 2 can be quite expensive depending on your needs and goals. That's why some people resort to cracking the license file and enjoying all the features for free.</p>
16
- <h2>How to Crack Construct 2 License File for Free</h2>
17
- <h3>The Risks of Cracking Construct 2 License File</h3>
18
- <p>Before we show you how to crack Construct 2 license file, we have to warn you about the risks involved in doing so. First of all, cracking the license file is illegal and unethical. You are violating the terms and conditions of Scirra and depriving them of their rightful income. You are also exposing yourself to potential legal actions from Scirra if they find out that you are using a cracked license file. Secondly, cracking the license file can be dangerous for your computer and your games. You might download a cracked license file that contains malware or viruses that can harm your computer or steal your personal information. You might also encounter errors or bugs in your games that are caused by the cracked license file. Thirdly, cracking the license file can be unfair for other game developers who paid for their licenses legitimately. You are gaining an unfair advantage over them by accessing all the features of Construct 2 without paying anything.</p>
19
- <p>Therefore, we do not recommend or endorse cracking the license file for Construct 2. We are only providing this information for educational purposes only. If you decide to crack the license file anyway, you are doing so at your own risk and responsibility.</p>
20
- <h3>The Steps to Crack Construct 2 License File</h3>
21
- <p>If you still want to crack Construct 2 license file despite the risks involved, here are the steps that you need to follow:</p>
22
- <p>construct 2 game engine crack<br />
23
- construct 2 license file download<br />
24
- construct 2 full version free<br />
25
- construct 2 activation code generator<br />
26
- construct 2 license file hack<br />
27
- construct 2 cracked version download<br />
28
- construct 2 license key free<br />
29
- construct 2 serial number crack<br />
30
- construct 2 license file bypass<br />
31
- construct 2 full crack download<br />
32
- construct 2 license file generator<br />
33
- construct 2 keygen download<br />
34
- construct 2 license file expired<br />
35
- construct 2 crack reddit<br />
36
- construct 2 license file location<br />
37
- construct 2 patch download<br />
38
- construct 2 license file missing<br />
39
- construct 2 crack mac<br />
40
- construct 2 license file corrupted<br />
41
- construct 2 crack online<br />
42
- construct 2 license file editor<br />
43
- construct 2 crack windows<br />
44
- construct 2 license file backup<br />
45
- construct 2 crack apk<br />
46
- construct 2 license file extension<br />
47
- construct 2 crack tutorial<br />
48
- construct 2 license file format<br />
49
- construct 2 crack linux<br />
50
- construct 2 license file recovery<br />
51
- construct 2 crack android<br />
52
- construct 2 license file remover<br />
53
- construct 2 crack ios<br />
54
- construct 2 license file viewer<br />
55
- construct 2 crack steam<br />
56
- construct 2 license file validator<br />
57
- construct 2 crack update<br />
58
- construct 2 license file extractor<br />
59
- construct 2 crack no survey<br />
60
- construct 2 license file fixer<br />
61
- construct 2 crack without survey<br />
62
- construct 2 license file creator<br />
63
- construct 2 crack no password<br />
64
- construct 2 license file copier<br />
65
- construct 2 crack without password<br />
66
- construct 2 license file eraser<br />
67
- construct 2 crack zip file<br />
68
- construct 2 license file renamer<br />
69
- construct 2 crack rar file<br />
70
- construct 2 license file replacer</p>
71
- <h4>Download and Install Construct 2</h4>
72
- <p>The first step is to download and install Construct 2 from the official website: https://www.scirra.com/construct2/releases. You can choose any version that you want, but we recommend using the latest stable release (r277 at the time of writing). Make sure that you install it in a folder that is easy to access (such as C:\Program Files\Construct 2).</p>
73
- <h4>Download a Cracked License File</h4>
74
- <p>The next step is to download a cracked license file from an online source. There are many websites that offer cracked license files for various software applications, including Construct 2. However, not all of them are reliable or safe. Some of them might contain malware or viruses that can harm your computer or steal your personal information. Some of them might also provide outdated or invalid license files that won't work with your version of Construct 2.</p>
75
- <p>Therefore, you have to be careful when choosing where to download a cracked license file from. We suggest using one of these sources:</p>
76
- <ul>
77
- <li>Nexus Gamez: This is an itch.io page that offers cracked license files for various versions of Construct 2 (from r251 to r277). You can download them from here: https://nexus-gamez.itch.io/construct-2-r277-cracked.</li>
78
- <li>Virus CX: This is a YouTube channel that offers cracked license files for various versions of Construct 2 (from r251 to r280). You can watch their videos and find the download links in the description or comments section.</li>
79
- <li>Newcodern: This is another YouTube channel that offers cracked license files for various versions of Construct 2 (from r239 to r279). You can watch their videos and find the download links in the description or comments section.</li>
80
- </ul>
81
- <p>Once you have downloaded a cracked license file from one of these sources (or any other source that you trust), make sure that it has the name "c2license.txt" and save it in a folder that is easy to access (such as C:\Users\YourName\Downloads).</p>
82
- <h4>Copy and Paste the License File to the Construct 2 Folder</h4>
83
- <p>The final step is to copy and paste the cracked license file (c2license.txt) from where you saved it (such as C:\Users\YourName\Downloads) to where you installed Construct 2 (such as C:\Program Files\Construct 2). If there is already an existing c2license.txt file in the Construct 2 folder (which means that you have already used another non-working license before), delete it first before pasting the new one.</p>
84
- <h4>Restart Construct 2 and Enjoy All Features</h4>
85
- <p>Now that you have copied and pasted the cracked license file (c2license.txt) to the Construct 2 folder (such as C:\Program Files\Construct 2), all you have to do is restart Construct 2 and enjoy all its features without any limitations. You should see a message saying "License activated" when you open Construct 2.</p>
86
- <h5>Note:</h5>
87
- <h2>How to Use Construct 2 Effectively After Cracking the License File</h2>
88
- <p>Now that you have cracked the license file for Construct 2 and unlocked all its features, you might be wondering how to use it effectively for your game development projects. Here are some of the best features of Construct 2 that you can use after cracking the license file, as well as some tips and tricks to make your games stand out.</p>
89
- <h3>The Best Features of Construct 2 for Game Development</h3>
90
- <p>Construct 2 has many features that make it a powerful and versatile game development tool. Some of the best features that you can use after cracking the license file are:</p>
91
- <ul>
92
- <li>Multiplatform Export: You can export your games to various platforms, such as Windows, Mac, Linux, Android, iOS, HTML5, Facebook, and more. You can also use third-party tools and services to enhance your games for different platforms, such as Cocoon.io, PhoneGap, Intel XDK, Steamworks, and more.</li>
93
- <li>Third-Party Plugins and Extensions: You can use third-party plugins and extensions to add more functionality and customization to your games. There are hundreds of plugins and extensions available for Construct 2, such as AdMob, Firebase, Google Play Games, Photon Cloud, Spriter, Q3D, and more. You can find them on the official Scirra Store or on other websites and forums.</li>
94
- <li>Advanced Event System: You can create complex logic and gameplay without writing any code using the advanced event system of Construct 2. You can use variables, functions, arrays, dictionaries, families, groups, sub-events, loops, conditions, actions, expressions, and more to create your own game logic. You can also use behaviors to add common features to your objects, such as platform movement, physics, pathfinding, drag-and-drop, and more.</li>
95
- <li>Visual Effects and Animations: You can add visual effects and animations to your games using the built-in features of Construct 2. You can use effects such as blur, glow, tint, warp, pixelate, noise, and more to enhance your graphics. You can also use animations to make your objects move and change appearance. You can create animations using frames or spritesheets or import them from external sources.</li>
96
- <li>Audio and Music: You can add audio and music to your games using the built-in audio system of Construct 2. You can import audio files in various formats (such as WAV, OGG, MP3) or generate them using the built-in sound generator. You can also control the volume, pitch, looping, panning, fading, and more of your audio files.</li>
97
- </ul>
98
- <h3>The Tips and Tricks to Make Your Games Stand Out</h3>
99
- <h3>The Tips and Tricks to Make Your Games Stand Out</h3>
100
- <p>Besides using the best features of Construct 2 for game development, you also need to apply some tips and tricks to make your games stand out from the crowd. Here are some of them:</p>
101
- <ul>
102
- <li>Plan Your Game: Before you start creating your game, you should have a clear idea of what you want to achieve. You should plan your game concept, genre, theme, story, characters, gameplay, graphics, sound, and more. You should also do some research on your target audience and market. Having a plan will help you stay focused and organized throughout your game development process.</li>
103
- <li>Use Templates and Tutorials: If you are new to Construct 2 or game development in general, you can use templates and tutorials to learn the basics and get started quickly. Construct 2 comes with many templates and examples that you can use as a reference or modify to suit your needs. You can also find many tutorials online that cover various topics and aspects of game development using Construct 2.</li>
104
- <li>Test Your Game: Testing your game is essential to ensure that it works properly and meets your expectations. You should test your game regularly and thoroughly on different devices and platforms. You should also get feedback from other people, such as friends, family, or beta testers. Testing your game will help you identify and fix any errors, bugs, or issues that might affect your game quality or performance.</li>
105
- <li>Optimize Your Game: Optimizing your game is important to improve its speed, efficiency, and compatibility. You should optimize your game by reducing the size of your assets (such as images, sounds, fonts), using efficient events and actions, avoiding unnecessary objects and effects, using layers and layouts wisely, and more. Optimizing your game will help you reduce loading times, save memory and bandwidth, and increase frame rate.</li>
106
- <li>Publish Your Game: Publishing your game is the final step to share it with the world and reach your potential players. You should publish your game to the platforms that suit your goals and audience. You should also promote your game using various methods and channels, such as social media, blogs, forums, websites, ads, and more. Publishing and promoting your game will help you increase its visibility, popularity, and revenue.</li>
107
- </ul>
108
- <h2>Conclusion</h2>
109
- <p>Construct 2 is a powerful game development tool that lets you create games for various platforms without coding. However, if you want to access all its features, you need to buy a license that can be expensive. That's why some people crack Construct 2 license file and enjoy all its features for free. In this article, we showed you how to crack Construct 2 license file for free using a simple method. We also showed you some of the best features of Construct 2 that you can use after cracking the license file, as well as some tips and tricks to make your games stand out.</p>
110
- <p>Moreover, cracking the license file can be dangerous for your computer and your games. You might download a cracked license file that contains malware or viruses that can harm your computer or steal your personal information. You might also encounter errors or bugs in your games that are caused by the cracked license file. Furthermore, cracking the license file can be unfair for other game developers who paid for their licenses legitimately. You are gaining an unfair advantage over them by accessing all the features of Construct 2 without paying anything.</p>
111
- <p>Therefore, we do not recommend or endorse cracking the license file for Construct 2. We are only providing this information for educational purposes only. If you decide to crack the license file anyway, you are doing so at your own risk and responsibility.</p>
112
- <h2>FAQs</h2>
113
- <p>Here are some of the frequently asked questions about cracking Construct 2 license file:</p>
114
- <ol>
115
- <li>Q: Is cracking Construct 2 license file legal?<br>
116
- A: No, cracking Construct 2 license file is illegal and unethical. You are violating the terms and conditions of Scirra and depriving them of their rightful income. You are also exposing yourself to potential legal actions from Scirra if they find out that you are using a cracked license file.</li>
117
- <li>Q: Is cracking Construct 2 license file safe?<br>
118
- A: No, cracking Construct 2 license file can be dangerous for your computer and your games. You might download a cracked license file that contains malware or viruses that can harm your computer or steal your personal information. You might also encounter errors or bugs in your games that are caused by the cracked license file.</li>
119
- <li>Q: Is cracking Construct 2 license file fair?<br>
120
- A: No, cracking Construct 2 license file can be unfair for other game developers who paid for their licenses legitimately. You are gaining an unfair advantage over them by accessing all the features of Construct 2 without paying anything.</li>
121
- <li>Q: How can I crack Construct 2 license file?<br>
122
- A: To crack Construct 2 license file, you need to download and install Construct 2 from the official website, download a cracked license file from an online source, copy and paste the license file to the Construct 2 folder, and restart Construct 2.</li>
123
- <li>Q: How can I use Construct 2 effectively after cracking the license file?<br>
124
- A: To use Construct 2 effectively after cracking the license file, you need to use the best features of Construct 2 for game development, such as multiplatform export, third-party plugins and extensions, advanced event system, visual effects and animations, and audio and music. You also need to apply some tips and tricks to make your games stand out, such as planning your game, using templates and tutorials, testing your game, optimizing your game, and publishing your game.</li>
125
- </ol>
126
- </p> 0a6ba089eb<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Culegere matematica petrica pdf O culegere completa de matematica pentru clasa 1-6.md DELETED
@@ -1,80 +0,0 @@
1
- <br />
2
- <h1>Culegere Matematica Petrica PDF Download: A Useful Resource for Students and Teachers</h1>
3
- <p>If you are looking for a comprehensive and reliable math book for grades 1-8, you might want to check out <strong>Culegere Matematica Petrica</strong>, a collection of math exercises, problems and tests written by Ion Petrica, a renowned Romanian math teacher and author. In this article, we will tell you what Culegere Matematica Petrica is, why you should download it in PDF format, how to do it, and how to use it effectively.</p>
4
- <h2>What is Culegere Matematica Petrica?</h2>
5
- <p><strong>Culegere Matematica Petrica</strong> is a series of math books for grades 1-8, published by Sigma Publishing House in Bucharest, Romania. The books are based on the national math curriculum and cover all the topics and objectives required for each grade level. The books are divided into chapters, each containing a summary of the main concepts, followed by a large number of exercises, problems and tests of different difficulty levels. The books also include answers and solutions for all the questions.</p>
6
- <h2>culegere matematica petrica pdf download</h2><br /><p><b><b>Download File</b> &mdash; <a href="https://byltly.com/2uKyGB">https://byltly.com/2uKyGB</a></b></p><br /><br />
7
- <h3>Who is Ion Petrica?</h3>
8
- <p>Ion Petrica is a Romanian math teacher and author, who has written over 50 books on math education, ranging from elementary to high school level. He has also participated in various national and international math competitions and Olympiads, both as a contestant and as a trainer. He is known for his clear and concise explanations, his creative and challenging problems, and his passion for math.</p>
9
- <h3>What are the main features of Culegere Matematica Petrica?</h3>
10
- <p>Culegere Matematica Petrica has several features that make it a valuable resource for students and teachers alike. Some of these features are:</p>
11
- <p>culegere matematica petrica pdf free download<br />
12
- culegere matematica petrica pdf online<br />
13
- culegere matematica petrica pdf solutii<br />
14
- culegere matematica petrica pdf clasa 5<br />
15
- culegere matematica petrica pdf clasa 6<br />
16
- culegere matematica petrica pdf clasa 7<br />
17
- culegere matematica petrica pdf clasa 8<br />
18
- culegere matematica petrica pdf clasa 9<br />
19
- culegere matematica petrica pdf clasa 10<br />
20
- culegere matematica petrica pdf clasa 11<br />
21
- culegere matematica petrica pdf clasa 12<br />
22
- culegere matematica petrica pdf bacalaureat<br />
23
- culegere matematica petrica pdf admitere<br />
24
- culegere matematica petrica pdf olimpiada<br />
25
- culegere matematica petrica pdf evaluare nationala<br />
26
- culegere matematica petrica pdf probleme rezolvate<br />
27
- culegere matematica petrica pdf exercitii si teste<br />
28
- culegere matematica petrica pdf algebra si geometrie<br />
29
- culegere matematica petrica pdf analiza si trigonometrie<br />
30
- culegere matematica petrica pdf combinatorica si probabilitati<br />
31
- culegere matematica petrica pdf logica si calcul propositional<br />
32
- culegere matematica petrica pdf functii si ecuatii<br />
33
- culegere matematica petrica pdf inegalitati si extremuri<br />
34
- culegere matematica petrica pdf siruri si limite<br />
35
- culegere matematica petrica pdf derivare si integrare<br />
36
- culegere matematica petrica pdf aplicatii ale derivatelor si integralelor<br />
37
- culegere matematica petrica pdf numere complexe si polinoame<br />
38
- culegere matematica petrica pdf geometrie analitica si vectoriala<br />
39
- culegere matematica petrica pdf geometrie euclidiana si trigonometrie plana<br />
40
- culegere matematica petrica pdf geometrie sferica si trigonometrie sferica<br />
41
- culegere matematica petrica pdf arii si volume de corpuri geometrice<br />
42
- culegere matematica petrica pdf transformari geometrice si simetrie<br />
43
- culegere matematica petrica pdf teoreme de geometrie plana si spatiala<br />
44
- culegere matematica petrica pdf congruenta si asemanarea triunghiurilor<br />
45
- culegere matematica petrica pdf cercul si cercul circumscris triunghiului<br />
46
- culegere matematica petrica pdf patrulaterul convex si patrulaterul inscriptibil in cerc<br />
47
- culegere matematica petrica pdf poligoane regulate si poligoane inscriptibile in cerc<br />
48
- culegere matematica petrica pdf constructii geometrice cu rigla si compasul<br />
49
- culegere matematica petrica pdf metoda reducerii la absurd si metoda reductio ad absurdum<br />
50
- culegere matematica petrica pdf metoda inductiei complete si metoda inductiei incomplete<br />
51
- culegere matematica petrica pdf metoda substitutiei si metoda egalitatilor succesive<br />
52
- culegere matematica petrica pdf metoda telescopica si metoda sumelor partiale<br />
53
- culegere matematica petrica pdf metoda descrescatoarelor si metoda crescatoarelor <br />
54
- culegere matematica petrica pdf metoda diviziunii euclidiene si metoda diviziunii continue <br />
55
- culegere matematica petrica pdf metoda binomului lui Newton si metoda binomului generalizat <br />
56
- culegere matematica petrica pdf metoda formelor canonice si metoda formelor echivalente <br />
57
- culegere matematica petrica pdf metoda descompunerii in factori primi si metoda descompunerii in factori ireductibili <br />
58
- culegere matematica petrica pdf metoda radicalilor nestemati si metoda radicalilor conjugati <br />
59
- culegere matematica petrica pdf metoda determinantei lui Vandermonde si metoda determinantei lui Cramer</p>
60
- <ul>
61
- <li>It covers the entire math curriculum for grades 1-8, following the standards and guidelines of the Ministry of Education.</li>
62
- <li>It provides a systematic and progressive presentation of the topics, starting from the basics and moving on to more advanced concepts.</li>
63
- <li>It offers a balanced mix of theory and practice, with clear definitions, examples, formulas, rules and properties.</li>
64
- <li>It contains a large number of exercises, problems and tests of different types and difficulty levels, such as multiple choice, fill in the blanks, matching, true or false, short answer, word problems, puzzles, etc.</li>
65
- <li>It helps students develop their mathematical skills and reasoning abilities, such as computation, estimation, measurement, geometry, algebra, logic, problem solving, etc.</li>
66
- <li>It includes answers and solutions for all the questions at the end of each book.</li>
67
- </ul>
68
- <h2>Why should you download Culegere Matematica Petrica PDF?</h2>
69
- <p>If you are interested in using Culegere Matematica Petrica as your math textbook or reference book, you might want to download it in PDF format. There are several benefits of doing so:</p>
70
- <h3>Benefits of using Culegere Matematica Petrica PDF</h3>
71
- <h4>It covers the math curriculum for grades 1-8</h4>
72
- <p>By downloading Culegere Matematica Petrica PDF, you will have access to all the books in the series, from grade 1 to grade 8. This means that you will have a complete and consistent math education that follows the national standards. You will also be able to review previous topics or prepare for future ones at any time.</p>
73
- <h4>It provides a variety of exercises, problems and tests</h4>
74
- <p>Culegere Matematica Petrica PDF contains thousands of exercises, problems and tests that will help you practice and master the math concepts taught in each grade level. You will be able to choose from different types and difficulty levels of questions that suit your needs and preferences. You will also be able to check your answers and solutions at the end of each book.</p>
75
- <h4>It helps develop mathematical skills and reasoning</h4>
76
- <p>Culegere Matematica Petrica PDF is not just a collection of questions; it is also a tool that will help you develop your mathematical skills and reasoning abilities. By working on the exercises, problems and tests in the books, you will learn how to apply the math concepts to real-life situations; how to analyze data; how to solve equations; how to prove statements; how to think logically; how to communicate your ideas; etc.</p>
77
- <h3>How to download Culegere Matematica Petrica PDF?</h3>
78
- <p>If you want to download Culegere Matematica Petrica PDF</p> 0a6ba089eb<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Evangelisches Gesangbuch Pdf Kostenlos Downloadl Das evangelische Gesangbuch im Vergleich zu anderen Liederbchern.md DELETED
@@ -1,138 +0,0 @@
1
-
2
- <h1>Evangelisches Gesangbuch Pdf Kostenlos Downloadl</h1>
3
- <p>If you are looking for a way to download Evangelisches Gesangbuch Pdf for free, you have come to the right place. In this article, we will explain what Evangelisches Gesangbuch is, why you might want to download it for free, and how to use it effectively. We will also provide you with some tips and resources for finding and downloading the pdf file safely and legally.</p>
4
- <h2>What is Evangelisches Gesangbuch?</h2>
5
- <p>Evangelisches Gesangbuch is a hymnal that is used by the Protestant churches in Germany, Austria, and Switzerland. It was first published in 1993 and has since been revised and updated several times. It contains more than 600 hymns, songs, psalms, canticles, prayers, and liturgical texts that cover various themes and occasions of Christian worship.</p>
6
- <h2>Evangelisches Gesangbuch Pdf Kostenlos Downloadl</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://byltly.com/2uKx7w">https://byltly.com/2uKx7w</a></b></p><br /><br />
7
- <h3>A brief history of the hymnal</h3>
8
- <p>The idea of creating a common hymnal for the Protestant churches in Germany dates back to the 19th century, when several regional hymnals were developed and used by different denominations. However, it was not until after World War II that a serious effort was made to unify the hymnals and create a common liturgy. In 1950, a commission was formed to work on a new hymnal that would reflect the diversity and unity of the Protestant churches. After decades of research, consultation, and revision, Evangelisches Gesangbuch was finally published in 1993. It was intended to replace the old regional hymnals and to serve as a source of inspiration and guidance for worship.</p>
9
- <h3>The contents and structure of the hymnal</h3>
10
- <p>Evangelisches Gesangbuch is divided into two main parts: the general part and the regional part. The general part contains 535 hymns that are common to all regions and denominations. They are arranged according to the seasons of the church year, such as Advent, Christmas, Easter, Pentecost, etc., as well as according to topics such as praise, confession, faith, hope, love, etc. The regional part contains 65 additional hymns that are specific to each region or denomination. They reflect the local traditions, cultures, languages, and preferences of each church. The hymnal also includes an appendix with psalms, canticles, prayers, creeds, confessions, liturgical texts, indexes, and other supplementary materials.</p>
11
- <h3>The significance and impact of the hymnal</h3>
12
- <p>Evangelisches Gesangbuch is more than just a collection of songs. It is also a symbol of the ecumenical spirit and cooperation among the Protestant churches in Germany, Austria, and Switzerland. It represents their common heritage, faith, and mission as followers of Jesus Christ. It also expresses their diversity and openness to new forms and styles of worship. By singing from Evangelisches Gesangbuch, Christians can celebrate their unity in diversity and enrich their spiritual lives.</p>
13
- <p>Evangelisches Gesangbuch online lesen ohne Anmeldung<br />
14
- Evangelisches Gesangbuch als Pdf herunterladen gratis<br />
15
- Evangelisches Gesangbuch Pdf free download for Windows<br />
16
- Evangelisches Gesangbuch Pdf kostenlos runterladen für Mac<br />
17
- Evangelisches Gesangbuch Pdf gratis descargar para Android<br />
18
- Evangelisches Gesangbuch Pdf download gratuito per iPhone<br />
19
- Evangelisches Gesangbuch Pdf télécharger gratuitement pour iPad<br />
20
- Evangelisches Gesangbuch Pdf kostenloser Download für Kindle<br />
21
- Evangelisches Gesangbuch Pdf downloaden zonder kosten voor PC<br />
22
- Evangelisches Gesangbuch Pdf ladda ner gratis för mobil<br />
23
- Evangelisches Gesangbuch in Pdf format umwandeln kostenlos<br />
24
- Evangelisches Gesangbuch ausdrucken als Pdf Datei gratis<br />
25
- Evangelisches Gesangbuch mit Noten Pdf kostenlos downloaden<br />
26
- Evangelisches Gesangbuch mit Akkorden Pdf gratis herunterladen<br />
27
- Evangelisches Gesangbuch mit Texten Pdf free download<br />
28
- Evangelisches Gesangbuch mit Bildern Pdf kostenlos runterladen<br />
29
- Evangelisches Gesangbuch mit Liedern Pdf gratis descargar<br />
30
- Evangelisches Gesangbuch mit Erklärungen Pdf download gratuito<br />
31
- Evangelisches Gesangbuch mit Geschichten Pdf télécharger gratuitement<br />
32
- Evangelisches Gesangbuch mit Gebeten Pdf kostenloser Download<br />
33
- Evangelisches Gesangbuch für Kinder Pdf downloaden zonder kosten<br />
34
- Evangelisches Gesangbuch für Jugendliche Pdf ladda ner gratis<br />
35
- Evangelisches Gesangbuch für Erwachsene Pdf kostenlos downloaden<br />
36
- Evangelisches Gesangbuch für Senioren Pdf gratis herunterladen<br />
37
- Evangelisches Gesangbuch für Familien Pdf free download<br />
38
- Evangelisches Gesangbuch für Gemeinden Pdf kostenlos runterladen<br />
39
- Evangelisches Gesangbuch für Schulen Pdf gratis descargar<br />
40
- Evangelisches Gesangbuch für Chöre Pdf download gratuito<br />
41
- Evangelisches Gesangbuch für Gottesdienste Pdf télécharger gratuitement<br />
42
- Evangelisches Gesangbuch für Feiertage Pdf kostenloser Download<br />
43
- Evangelisches Gesangbuch nach Themen sortiert Pdf downloaden zonder kosten<br />
44
- Evangelisches Gesangbuch nach Nummern geordnet Pdf ladda ner gratis<br />
45
- Evangelisches Gesangbuch nach Autoren alphabetisch Pdf kostenlos downloaden<br />
46
- Evangelisches Gesangbuch nach Melodien klassifiziert Pdf gratis herunterladen<br />
47
- Evangelisches Gesangbuch nach Sprachen übersetzt Pdf free download<br />
48
- Evangelisches Gesangbuch nach Regionen angepasst Pdf kostenlos runterladen<br />
49
- Evangelisches Gesangbuch nach Konfessionen unterschieden Pdf gratis descargar<br />
50
- Evangelisches Gesangbuch nach Epochen eingeteilt Pdf download gratuito<br />
51
- Evangelisches Gesangbuch nach Stilen bewertet Pdf télécharger gratuitement<br />
52
- Evangelisches Gesangbuch nach Genres kategorisiert Pdf kostenloser Download<br />
53
- Das beste evangelische Gesangbuch als Pdf zum Downloaden ohne Kosten <br />
54
- Das neueste evangelische Gesangbuch als Pdf zum Herunterladen gratis <br />
55
- Das beliebteste evangelische Gesangbuch als Pdf zum Runterladen kostenlos <br />
56
- Das originellste evangelische Gesangbuch als Pdf zum Descargar gratis <br />
57
- Das schönste evangelische Gesangbuch als Pdf zum Download gratuito <br />
58
- Das umfangreichste evangelische Gesangbuch als Pdf zum Télécharger gratuitement <br />
59
- Das praktischste evangelische Gesangbuch als Pdf zum Kostenloser Download <br />
60
- Das lehrreichste evangelische Gesangbuch als Pdf zum Downloaden zonder kosten <br />
61
- Das inspirierendste evangelische Gesangbuch als Pdf zum Ladda ner gratis</p>
62
- <h2>Why download Evangelisches Gesangbuch Pdf for free?</h2>
63
- <p>Evangelisches Gesangbuch is a valuable resource for anyone who wants to learn more about Protestant hymnody and liturgy. However, buying a physical copy of the hymnal can be expensive or inconvenient for some people. That is why downloading Evangelisches Gesangbuch Pdf for free can be a good option for many reasons.</p>
64
- <h3>The benefits of having a digital copy of the hymnal</h3>
65
- <p>Having a digital copy of Evangelisches Gesangbuch Pdf can offer you several advantages over having a physical copy. For example:</p>
66
- <ul>
67
- <li>You can access it anytime and anywhere on your computer or mobile device.</li>
68
- <li>You can save space and money by not having to buy or store a bulky book.</li>
69
- <li>You can search for any hymn or text by keywords or numbers.</li>
70
- <li>You can zoom in or out to adjust the font size or layout.</li>
71
- <li>You can bookmark your favorite hymns or pages for easy reference.</li>
72
- <li>You can copy or paste any text or image from the pdf file.</li>
73
- <li>You can print any page or section you want.</li>
74
- </ul>
75
- <h3>The challenges and risks of downloading the hymnal for free</h3>
76
- <p>However, downloading Evangelisches Gesangbuch Pdf for free also comes with some challenges and risks that you should be aware of. For example:</p>
77
- <ul>
78
- <li>You may not be able to find a reliable or legal source for downloading the pdf file.</li>
79
- <li>You may encounter viruses or malware that can harm your device or data.</li>
80
- <li>You may violate the copyright laws or ethical principles by downloading or sharing the pdf file without permission or payment.</li>
81
- <li>You may miss out on some features or updates that are available only in the official or authorized version of the pdf file.</li>
82
- <li>You may experience some technical issues or errors with the pdf file such as poor quality, missing pages, incorrect formatting, etc.</li>
83
- </ul>
84
- <h3>The best sources and methods for downloading the hymnal for free</h3>
85
- <p>To avoid these challenges and risks, you should be careful and selective when choosing where and how to download Evangelisches Gesangbuch Pdf for free. Here are some tips and resources that can help you:</p>
86
- <ul>
87
- <li>Check if your church or library has a digital subscription or license for accessing Evangelisches Gesangbuch Pdf online or offline. If so, you can use their login credentials or ask them for permission to download it.</li>
88
- <li>Look for reputable websites or platforms that offer free or low-cost downloads of Evangelisches Gesangbuch Pdf legally and safely. For example: <a href="https://www.evangeliums.net/lieder/evangelisches_gesangbuch.html">Evangeliums.net</a>, <a href="https://www.ebook.de/de/category/66667/evangelische_gesangbuecher.html">Ebook.de</a>, <a href="https://www.amazon.de/Evangelische-Gesangb%C3%BCcher/b?ie=UTF8&node=340533031">Amazon.de</a>, etc.</li>
89
- <li>Use a reliable antivirus software or browser extension that can scan and protect your device from any malicious downloads.</li>
90
- <li>Read the terms and conditions carefully before downloading any pdf file. Make sure you understand your rights and responsibilities as a user.</li>
91
- <li>Respect the intellectual property rights of the authors and publishers of Evangelisches Gesangbuch. Do not distribute or reproduce the pdf file without their consent or acknowledgment.</li>
92
- </ul>
93
- <h2>How to use Evangelisches Gesangbuch Pdf effectively?</h2>
94
- <p>Once you have downloaded Evangelisches Gesangbuch Pdf for free successfully, you may wonder how to use it effectively. Here are some suggestions:</p>
95
- <h3>The features and functions of the pdf format</h3>
96
- <p>The pdf format is one of the most popular and widely used formats for digital documents. It has many features and functions that can enhance your reading and learning experience. For example:</p>
97
- <ul>
98
- <li>You can open and view the pdf file with any compatible software or application, such as Adobe Acrobat Reader, Google Chrome, Microsoft Edge, etc.</li>
99
- <li>You can adjust the display settings of the pdf file according to your preferences, such as full screen, fit width, fit page, rotate, etc.</li>
100
- <li>You can navigate through the pdf file easily by using the table of contents, the bookmarks, the thumbnails, the page numbers, the scroll bar, etc.</li>
101
- <li>You can search for any word or phrase in the pdf file by using the find tool, the advanced search tool, or the keyboard shortcuts (Ctrl+F).</li>
102
- <li>You can highlight, annotate, or comment on any text or image in the pdf file by using the tools menu, the comment menu, or the keyboard shortcuts (Ctrl+E).</li>
103
- <li>You can edit, modify, or convert the pdf file by using a specialized software or application, such as Adobe Acrobat Pro, PDFelement, Smallpdf, etc. However, may need to pay for some of these features or functions.</li>
104
- </ul>
105
- <h3>The tips and tricks for using the pdf reader and editor</h3>
106
- <p>To make the most out of the pdf reader and editor, you can use some tips and tricks that can save you time and effort. For example:</p>
107
- <ul>
108
- <li>You can use keyboard shortcuts to perform common tasks faster and easier, such as zoom in (Ctrl++), zoom out (Ctrl+-), copy (Ctrl+C), paste (Ctrl+V), undo (Ctrl+Z), redo (Ctrl+Y), etc.</li>
109
- <li>You can customize the toolbar of the pdf reader and editor by adding or removing the buttons or tools that you use frequently or rarely.</li>
110
- <li>You can create and organize your own folders or collections of pdf files on your device or cloud storage, such as Google Drive, Dropbox, OneDrive, etc.</li>
111
- <li>You can sync and access your pdf files across multiple devices, such as your computer, tablet, smartphone, etc., by using a cloud service, such as Adobe Document Cloud, Google Drive, Dropbox, OneDrive, etc.</li>
112
- <li>You can share and collaborate on your pdf files with others, such as your friends, family, colleagues, etc., by using an email service, such as Gmail, Outlook, Yahoo Mail, etc., or a social media platform, such as Facebook, Twitter, Instagram, etc.</li>
113
- </ul>
114
- <h3>The ways to share and print the pdf file</h3>
115
- <p>Finally, you may want to share or print the pdf file of Evangelisches Gesangbuch for various purposes. Here are some ways to do that:</p>
116
- <ul>
117
- <li>You can share the pdf file by attaching it to an email, uploading it to a cloud service, posting it on a social media platform, or sending it via a messaging app. However, you should always ask for permission and give credit to the original source before sharing the pdf file.</li>
118
- <li>You can print the pdf file by using a printer that is connected to your device or network. You can choose the print settings that suit your needs, such as the number of copies, the page range, the orientation, the paper size, etc. However, you should always respect the copyright laws and ethical principles before printing the pdf file.</li>
119
- </ul>
120
- <h2>Conclusion</h2>
121
- <p>In conclusion, Evangelisches Gesangbuch Pdf is a great resource for anyone who wants to learn more about Protestant hymnody and liturgy. It is a hymnal that contains more than 600 hymns, songs, psalms, canticles, prayers, and liturgical texts that cover various themes and occasions of Christian worship. It is also a symbol of the ecumenical spirit and cooperation among the Protestant churches in Germany, Austria, and Switzerland. By downloading Evangelisches Gesangbuch Pdf for free, you can enjoy the benefits of having a digital copy of the hymnal that you can access anytime and anywhere on your device. However, you should also be aware of the challenges and risks of downloading the hymnal for free and follow some tips and resources for finding and downloading the pdf file safely and legally. Moreover, you should also know how to use Evangelisches Gesangbuch Pdf effectively by using the features and functions of the pdf format, the tips and tricks for using the pdf reader and editor, and the ways to share and print the pdf file. We hope this article has helped you understand more about Evangelisches Gesangbuch Pdf and how to download it for free.</p>
122
- <h3>FAQs</h3>
123
- <p>Here are some frequently asked questions about Evangelisches Gesangbuch Pdf:</p>
124
- <ol>
125
- <li>Q: How many versions or editions of Evangelisches Gesangbuch are there?<br>
126
- A: There are 14 regional versions or editions of Evangelisches Gesangbuch that correspond to each region or denomination in Germany, Austria, and Switzerland. Each version or edition has a different color and number on its cover.</li>
127
- <li>Q: How can I find out which version or edition of Evangelisches Gesangbuch I need?<br>
128
- A: You can find out which version or edition of Evangelisches Gesangbuch you need by checking with your church or denomination. You can also look at the list of regions or denominations on <a href="https://www.evangelisch.de/inhalte/149383/01-12-2018/evangelisches-gesangbuch-die-regionalausgaben">this website</a>.</li>
129
- <li>Q: How can I buy a physical copy of Evangelisches Gesangbuch?<br>
130
- A: You can buy a physical copy of Evangelisches Gesangbuch from various online or offline bookstores or publishers. For example: <a href="https://www.gottesdienstinstitut.org/shop/gesangbuecher/">Gottesdienstinstitut.org</a>, <a href="https://www.buchhandel.de/buch/Evangelisches-Gesangbuch-Ausgabe-fuer-die-Evangelisch-Lutherische-Kirche-in-Bayern-9783872143020">Buchhandel.de</a>, <a href="https://www.v-r.de/de/evangelisches_gesangbuch/t-0/1011134/">V-r.de</a>, etc.</li>
131
- <li>Q: How can I update my pdf file of Evangelisches Gesangbuch?<br>
132
- A: You can update your pdf file of Evangelisches Gesangbuch by downloading the latest version or edition from a reliable or legal source. You can also check for any updates or corrections on <a href="https://www.evangelisch.de/inhalte/149383/01-12-2018/evangelisches-gesangbuch-die-regionalausgaben">this website</a>.</li>
133
- <li>Q: How can I contact the authors or publishers of Evangelisches Gesangbuch?<br>
134
- A: You can contact the authors or publishers of Evangelisches Gesangbuch by visiting their official websites or sending them an email. For example: <a href="https://www.evlka.de/">Evlka.de</a>, <a href="https://www.evang.at/">Evang.at</a>, <a href="https://www.ref.ch/">Ref.ch</a>, etc.</li>
135
- </ol>
136
- </p> 0a6ba089eb<br />
137
- <br />
138
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Acrobat Pro DC 2018.009.20050 Pre-crack VERIFIEDed Serial Key Keygen.md DELETED
@@ -1,158 +0,0 @@
1
- <br />
2
- <h1>Adobe Acrobat Pro DC 2018.009.20050: The Best PDF Software with Pre-Cracked Serial Key</h1>
3
-
4
- <p>If you are looking for a reliable and versatile PDF software, you might want to consider Adobe Acrobat Pro DC 2018.009.20050. This software is one of the most popular and powerful tools for creating, editing, and managing PDF documents. It has a lot of features that can help you work with PDF files efficiently and professionally.</p>
5
- <h2>Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt;&gt;&gt; <a href="https://imgfil.com/2uy07G">https://imgfil.com/2uy07G</a></b></p><br /><br />
6
-
7
- <p>However, Adobe Acrobat Pro DC 2018.009.20050 is not a free software. You need to purchase a license to use it fully. But what if you don't want to spend money on it? Is there a way to get it for free? The answer is yes, with the help of Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen.</p>
8
-
9
- <h2>What is Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen?</h2>
10
-
11
- <p>Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen is a tool that can generate a valid serial key or activation code for Adobe Acrobat Pro DC 2018.009.20050 product. With this serial key, you can activate the software and use it without any limitations.</p>
12
-
13
- <p>Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen is easy to use and user-friendly. You don't need any technical skills or knowledge to use it. All you need to do is download it from a reliable source, run it, and copy the serial key that it generates.</p>
14
-
15
- <p>Then, you can install Adobe Acrobat Pro DC 2018.009.20050 on your computer and paste the serial key when prompted. That's it! You can now enjoy the full features of Adobe Acrobat Pro DC 2018.009.20050 for free.</p>
16
-
17
- <h2>What are the benefits of using Adobe Acrobat Pro DC 2018.009.20050?</h2>
18
-
19
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a comprehensive PDF software that can help you with various tasks related to PDF files. Some of the benefits of using this software are:</p>
20
- <p></p>
21
-
22
- <ul>
23
- <li>You can create PDF files from any application that can print, such as Word, Excel, PowerPoint, etc.</li>
24
- <li>You can edit PDF files with ease, such as adding or deleting text, images, links, comments, etc.</li>
25
- <li>You can convert PDF files to other formats, such as Word, Excel, PowerPoint, HTML, etc.</li>
26
- <li>You can merge or split PDF files according to your needs.</li>
27
- <li>You can protect PDF files with passwords, encryption, digital signatures, etc.</li>
28
- <li>You can fill out and sign PDF forms electronically.</li>
29
- <li>You can collaborate with others on PDF files using cloud services, such as Dropbox, Google Drive, etc.</li>
30
- <li>You can optimize PDF files for web or mobile devices.</li>
31
- </ul>
32
-
33
- <p>And many more!</p>
34
-
35
- <h2>How to download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen?</h2>
36
-
37
- <p>If you want to download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen, you need to be careful about the source you choose. There are many websites that claim to offer this tool for free, but some of them might be fake or malicious.</p>
38
-
39
- <p>To avoid any risks or problems, you should only download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen from a trusted and reputable website that has positive reviews and feedback from other users.</p>
40
-
41
- <p>One of the websites that you can try is <a href="https://opensea.io/collection/adobe-acrobat-pro-dc-201800920050-precracked-seria">OpenSea</a>. This website is a platform for digital collectibles and NFTs that also offers Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen as a collection item.</p>
42
-
43
- <p>To download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen from OpenSea, you need to create an account and connect your wallet to the website. Then, you can browse the collection and find the item that you want.</p>
44
-
45
- <p>Once you find it, you can click on it and see the details and description of the item. You will also see a download link that will direct you to Google Drive where you can download the tool as a RAR file.</p>
46
-
47
- <p>After downloading the file, you need to extract it using a software like WinRAR or 7-Zip and run the tool as an administrator.</p>
48
-
49
- <h2>Conclusion</h2>
50
-
51
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a great PDF software that can help you with various tasks related to PDF files.</p>
52
-
53
- <p>If you want to use this software for free, you can use Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen to generate a valid serial key or activation code for the product.</p>
54
-
55
- <p>You can download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen from OpenSea website safely and easily.</p>
56
-
57
- <p>However, you should also be aware of the possible risks or consequences of using cracked software or tools.</p>
58
-
59
- <p>You might violate the terms and conditions of Adobe or face legal issues if you use this software for commercial purposes.</p>
60
-
61
- <p>You might also expose your computer or data to viruses or malware if you download cracked software or tools from untrusted sources.</p>
62
-
63
- <p>Therefore, you should always be careful and responsible when using cracked software or tools and use them at your own risk.</p>
64
- <h2>What are the features of Adobe Acrobat Pro DC 2018.009.20050?</h2>
65
-
66
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a comprehensive PDF software that has many features that can help you with various tasks related to PDF files. Some of the features of this software are:</p>
67
-
68
- <ul>
69
- <li><b>Create PDF files</b>: You can create PDF files from any application that can print, such as Word, Excel, PowerPoint, etc. You can also scan paper documents and convert them to PDF files. You can also create PDF files from web pages, images, videos, etc.</li>
70
- <li><b>Edit PDF files</b>: You can edit PDF files with ease, such as adding or deleting text, images, links, comments, etc. You can also rearrange, rotate, crop, or resize pages. You can also add headers, footers, watermarks, bookmarks, etc.</li>
71
- <li><b>Convert PDF files</b>: You can convert PDF files to other formats, such as Word, Excel, PowerPoint, HTML, etc. You can also export PDF files to JPEG, PNG, TIFF, etc. You can also convert scanned documents to editable text using OCR technology.</li>
72
- <li><b>Merge or split PDF files</b>: You can merge or split PDF files according to your needs. You can combine multiple PDF files into one or extract specific pages from a PDF file. You can also organize pages by dragging and dropping.</li>
73
- <li><b>Protect PDF files</b>: You can protect PDF files with passwords, encryption, digital signatures, etc. You can also restrict editing, printing, or copying of PDF files. You can also remove sensitive information from PDF files using redaction tools.</li>
74
- <li><b>Fill out and sign PDF forms</b>: You can fill out and sign PDF forms electronically. You can also create your own forms using templates or from scratch. You can also collect and track responses from others using cloud services.</li>
75
- <li><b>Collaborate with others on PDF files</b>: You can collaborate with others on PDF files using cloud services, such as Dropbox, Google Drive, etc. You can also share PDF files via email or social media. You can also review and comment on PDF files with others using annotation tools.</li>
76
- <li><b>Optimize PDF files</b>: You can optimize PDF files for web or mobile devices. You can also reduce the file size of PDF files without compromising quality. You can also enhance the accessibility and readability of PDF files using tools like Read Out Loud or Reflow.</li>
77
- </ul>
78
-
79
- <h2>How to use Adobe Acrobat Pro DC 2018.009.20050?</h2>
80
-
81
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a user-friendly and intuitive software that you can use easily and efficiently. Here are some steps on how to use this software:</p>
82
-
83
- <ol>
84
- <li>Download and install Adobe Acrobat Pro DC 2018.009.20050 on your computer using the serial key generated by Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen.</li>
85
- <li>Launch the software and choose the task that you want to do from the Home screen or the Tools menu.</li>
86
- <li>Select the file that you want to work with or create a new file from scratch.</li>
87
- <li>Use the tools and options available on the toolbar or the right pane to perform the task that you want.</li>
88
- <li>Save or export your file as needed.</li>
89
- </ol>
90
-
91
- <p>You can also access online tutorials and help resources from the Help menu or the Adobe website if you need more guidance or assistance.</p>
92
-
93
- <h2>Why choose Adobe Acrobat Pro DC 2018.009.20050?</h2>
94
-
95
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a great choice for anyone who works with PDF files regularly or occasionally. Here are some reasons why you should choose this software:</p>
96
-
97
- <ul>
98
- <li>It is a comprehensive and versatile software that can handle any task related to PDF files.</li>
99
- <li>It is a reliable and trusted software that has been developed by Adobe, a leading company in digital media and software solutions.</li>
100
- <li>It is a compatible and flexible software that works with Windows XP, Windows 7, Windows 8, Windows 8.1, and Windows 10 operating systems.</li>
101
- <li>It is an easy and convenient software that has a user-friendly interface and intuitive tools.</li>
102
- <li>It is an affordable and cost-effective software that you can get for free with Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen.</li>
103
- </ul>
104
-
105
- <p>So what are you waiting for? Download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen today and enjoy the benefits of this amazing software!</p>
106
- <h2>How to troubleshoot Adobe Acrobat Pro DC 2018.009.20050?</h2>
107
-
108
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a stable and reliable software that works smoothly and efficiently. However, sometimes you might encounter some issues or errors while using this software. Here are some common problems and solutions that you can try to troubleshoot Adobe Acrobat Pro DC 2018.009.20050:</p>
109
-
110
- <ul>
111
- <li><b>The software does not launch or crashes</b>: This might be caused by corrupted or missing files, incompatible drivers, or malware infection. You can try to repair the installation, update the drivers, scan your computer for viruses, or reinstall the software.</li>
112
- <li><b>The serial key does not work or is invalid</b>: This might be caused by typing errors, expired or blocked serial key, or wrong product version. You can try to check the spelling and case of the serial key, generate a new serial key using Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen, or download the correct product version.</li>
113
- <li><b>The PDF files do not open or display correctly</b>: This might be caused by corrupted or damaged PDF files, incompatible PDF formats, or outdated software. You can try to repair the PDF files, convert the PDF files to a compatible format, or update the software.</li>
114
- <li><b>The PDF files do not print or print with errors</b>: This might be caused by printer issues, incorrect print settings, or corrupted PDF files. You can try to check the printer status and connection, adjust the print settings, or repair the PDF files.</li>
115
- <li><b>The PDF files do not edit or convert properly</b>: This might be caused by restricted PDF files, unsupported file formats, or insufficient system resources. You can try to remove the restrictions from the PDF files, choose a supported file format, or free up some memory and disk space.</li>
116
- </ul>
117
-
118
- <p>If none of these solutions work for you, you can also contact Adobe customer support or visit their online forums for more help and guidance.</p>
119
-
120
- <h2>What are the alternatives to Adobe Acrobat Pro DC 2018.009.20050?</h2>
121
-
122
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a great PDF software that can meet your needs and expectations. However, if you are looking for some alternatives to this software, you might want to consider these options:</p>
123
-
124
- <ul>
125
- <li><b>Nitro Pro</b>: This is a powerful and affordable PDF software that can create, edit, convert, sign, and share PDF files with ease. It has a similar interface and features as Adobe Acrobat Pro DC 2018.009.20050 but with a lower price tag.</li>
126
- <li><b>PDFelement</b>: This is a simple and elegant PDF software that can create, edit, convert, annotate, and protect PDF files with ease. It has a user-friendly interface and features that are suitable for beginners and professionals alike.</li>
127
- <li><b>Foxit PhantomPDF</b>: This is a fast and secure PDF software that can create, edit, convert, sign, and collaborate on PDF files with ease. It has a robust and flexible interface and features that are ideal for business and enterprise users.</li>
128
- <li><b>PDF-XChange Editor</b>: This is a lightweight and versatile PDF software that can create, edit, view, annotate, OCR, and manipulate PDF files with ease. It has a customizable and intuitive interface and features that are perfect for personal and academic users.</li>
129
- </ul>
130
-
131
- <p>These are some of the best alternatives to Adobe Acrobat Pro DC 2018.009.20050 that you can try if you want to explore other options.</p>
132
-
133
- <h2>Conclusion</h2>
134
-
135
- <p>Adobe Acrobat Pro DC 2018.009.20050 is a comprehensive and versatile PDF software that can help you with various tasks related to PDF files.</p>
136
-
137
- <p>If you want to use this software for free, you can use Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen to generate a valid serial key or activation code for the product.</p>
138
-
139
- <p>You can download Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen from OpenSea website safely and easily.</p>
140
-
141
- <p>However, you should also be aware of the possible risks or consequences of using cracked software or tools.</p>
142
-
143
- <p>You might violate the terms and conditions of Adobe or face legal issues if you use this software for commercial purposes.</p>
144
-
145
- <p>You might also expose your computer or data to viruses or malware if you download cracked software or tools from untrusted sources.</p>
146
-
147
- <p>Therefore, you should always be careful and responsible when using cracked software or tools and use them at your own risk.</p>
148
- <p>In conclusion, Adobe Acrobat Pro DC 2018.009.20050 is a great PDF software that can help you with various tasks related to PDF files. It has a lot of features that can make your work easier and more professional.</p>
149
-
150
- <p>However, if you want to use this software for free, you need to use Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Serial Key keygen to generate a valid serial key or activation code for the product. This tool can save you money and time, but it also comes with some risks and drawbacks.</p>
151
-
152
- <p>You should be careful and responsible when using cracked software or tools and use them at your own risk. You should also respect the terms and conditions of Adobe and avoid using this software for commercial purposes.</p>
153
-
154
- <p>If you are looking for some alternatives to Adobe Acrobat Pro DC 2018.009.20050, you can try Nitro Pro, PDFelement, Foxit PhantomPDF, or PDF-XChange Editor. These are some of the best PDF software that can offer similar or better features and performance than Adobe Acrobat Pro DC 2018.009.20050.</p>
155
-
156
- <p>We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.</p> 3cee63e6c2<br />
157
- <br />
158
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CRACK Adobe Dreamweaver CC 2019 19.0.0 Crack ((EXCLUSIVE)).md DELETED
@@ -1,20 +0,0 @@
1
- <h2>CRACK Adobe Dreamweaver CC 2019 19.0.0 Crack</h2><br /><p><b><b>Download Zip</b> ::: <a href="https://imgfil.com/2uy05I">https://imgfil.com/2uy05I</a></b></p><br /><br />
2
-
3
- Unstoppable watches in HD
4
-
5
- Full of joss sticks, and smoke, and mojos, and all sorts of other things, the great and the good of Bollywood have descended on this ancient temple to celebrate the Lord of all that is, to meditate on the Almighty, to seek truth.
6
-
7
- Comprised of around 150 older and younger men who offer themselves up for a simple meditation, Lord Jagannath is the sacred thread of the temple, and within him lies the very heart of the Temple. Those who partake of his blessing are free from the curse of the evil eye.
8
-
9
- On the eve of the festivities, Karan, his wife Savita, and their daughter, Ravi have been married for some time. Their wedding was full of pomp and circumstance, although a bit disorganized. Karan and Savita, however, have never gotten on particularly well. Perhaps because the Karans have always been a bit flashy and the Savitas a bit conservative, the relationship had never been close. Nevertheless, it seems they are to spend the holidays together in a small town in Haryana, a state in North India, and these will be some of the happiest days of their lives.
10
-
11
- The family is greeted by a harried looking priest who explains to them that the ceremony is to celebrate the marriage between Lord Jagannath and his wife and that the Lord is very happy that they have decided to make this trip and will be the first to perform the ceremony. The pujas begin in the morning. Karan and Savita and their daughter wait for their turn in the lavish dining area, but a few moments later Savita and her daughter disappear, and Karan is left to play with Ravi, who is in awe of the place. Karan shows off the beautiful carvings of the temple in Haryana, the history of the place, and why the temple exists and it is clearly to the delight of his daughter.
12
-
13
- The family is given a tour of the temple, and Karan picks up a brownish stick of sandalwood, which he drops on the ground. Savita and her daughter are given the sandalwood stick as well.
14
-
15
- Savita and Ravi return in the evening after a nap, and Karan announces that the family will go to the city to perform pujas there.
16
-
17
- Ravi becomes very angry that he will not be given sandalwood as well, and Savita tells him that 4fefd39f24<br />
18
- <br />
19
- <br />
20
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Civil3D2011xforcekeygen64bit WORK.md DELETED
@@ -1,7 +0,0 @@
1
- <br />
2
- <p>Civil3D2011xforcekeygen64bit xforce keygen 32bits or 64bits version Vault Workgroup 2009 crack anegan full movie download tamilrockers. trysjer 15/05/2022 06:25. trysjer 002eecfc5e https://melaninterest.com/pin/civil3d2011xforcekeygen64bit-latest/ Reply lasimar 15/05/2022 08:02. </p>
3
- <h2>Civil3D2011xforcekeygen64bit</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://imgfil.com/2uxY4J">https://imgfil.com/2uxY4J</a></b></p><br /><br />
4
- <p>civil3d2011xforcekeygen64bit xforce keygen 32bits or 64bits version vault workgroup 2009 crack anegan full movie download tamilrockers. trysjer 15/05/2022 06:25. trysjer 002eecfc5e https://melaninterest.com/pin/civil3d2011xforcekeygen64bit-latest/ reply lasimar 15/05/2022 08:02. </p>
5
- <p>download windows 7 professional oa acer x16 96076<br />roadside romeo hindi movie 720p free download <br />principles of mathematics 9 nelson pdf download <br />3dmgame.dll metal gear solid v the p <br />hd online player (hindi hd yaariyan movies 1080p torrent) <br />biology book for class 9 sindh board <br />civil3d2011xforcekeygen64bit <br />xforce keygen 32bits or 64bits version vault workgroup 2009 crack <br />anegan full movie download tamilrockers 170 <br />xforce keygen autocad mechanical 2011 32 bit free download <br />murachs android programming (2nd edition) books pdf file <br />supreme ruler ultimate patch 8 download <br />advanced system repair pro 1.9.0.18.5.17 full with medicine [b download pc <br />bigwerks blue rose ii kontakt-decibel <br />download blood money full movie in hindi 720p <br />judaai in hindi 720p download <br />free download movie the karbala <br />cnc simulator pro crack 13 <br />mortal kombat armageddon download free ps2 gamesl <br />toontrack ezkeys mellotoon v1.1.rar </p> 899543212b<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Driver Simulator The Best Way to Practice Driving Online.md DELETED
@@ -1,189 +0,0 @@
1
- <br />
2
- <h1>Driver Simulator Games: A Review and Comparison</h1>
3
- <p>Have you ever wondered what it would be like to drive a car, a truck, a bus, or even a plane in different scenarios and conditions? Do you want to learn how to drive better, safer, and more efficiently? Do you enjoy the thrill and challenge of racing, drifting, or exploring different roads and tracks? If you answered yes to any of these questions, then you might be interested in trying out a driver simulator game.</p>
4
- <p>A driver simulator game is a software that simulates the experience of driving a vehicle in a virtual environment. You can control the vehicle using a keyboard, a mouse, a joystick, a steering wheel, or other devices. You can see the road, the traffic, the weather, and other elements on your screen. You can hear the engine, the brakes, the horn, and other sounds through your speakers or headphones. You can feel the vibration, the acceleration, the deceleration, and other forces through your seat or your motion platform.</p>
5
- <h2>driver simulator</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://jinyurl.com/2uNP9a">https://jinyurl.com/2uNP9a</a></b></p><br /><br />
6
- <p>Driver simulator games can be used for various purposes. Some people play them for fun and entertainment. They enjoy the realism, the graphics, the physics, and the content of these games. They like to race against other players online, to drift around corners, to explore different maps and locations, or to customize their vehicles. Some people play them for education and research. They want to learn more about driving rules, traffic laws, vehicle dynamics, road safety, or human factors. They use these games to test their skills, to measure their performance, to collect data, or to conduct experiments. Some people play them for training and practice. They want to improve their driving abilities, knowledge, and confidence. They use these games to train for specific situations, to practice for real-world driving tests, to refresh their memory, or to prepare for emergencies.</p>
7
- <p>Whatever your reason for playing a driver simulator game is, you will benefit from it in many ways. In this article, we will review some of the benefits of driver simulator games. We will also compare some of the features of different driver simulator games. Finally, we will give you our reviews of some of the best driver simulator games available in 2023.</p>
8
- <h2>Benefits of driver simulator games</h2>
9
- <p>Driver simulator games offer various advantages compared to real vehicles or other types of games. Here are some of them:</p>
10
- <ul>
11
- <li><strong>Controllability</strong>: You can control the behavior of virtual traffic, weather conditions, road layout, vehicle settings, and other factors as a function of your needs or preferences. You can also pause, rewind, replay, or skip any part of the simulation.</li>
12
- <li><strong>Reproducibility</strong>: You can repeat the same scenario as many times as you want with consistent results. You can also compare your performance with others or with yourself over time.</li>
13
- <li><strong>Standardization</strong>: You can use the same simulation environment for all participants or groups. You can also follow the same protocol or procedure for each session.</li>
14
- <li><strong>Safety</strong>: You can experience risky or dangerous situations without putting yourself or others at risk. You can also avoid injuries or damages to yourself or your property.</li>
15
- <li><strong>Cost-effectiveness</strong>: You can save money on fuel consumption - <strong>Accessibility</strong>: You can access a wide range of vehicles, roads, and scenarios that you might not have in real life. You can also play these games anytime and anywhere you want.</li>
16
- </ul>
17
- <p>These benefits make driver simulator games a valuable tool for enhancing your driving skills, knowledge, and safety. They can also make your driving experience more enjoyable and satisfying.</p>
18
- <h2>Features of driver simulator games</h2>
19
- <p>Driver simulator games vary in terms of their quality, realism, and complexity. Some of the main features that you should look for when choosing a driver simulator game are:</p>
20
- <ul>
21
- <li><strong>Realism</strong>: How closely does the game mimic the real world? Does it include realistic graphics, sounds, physics, traffic, weather, and other elements? Does it account for human factors such as perception, attention, memory, decision making, and emotions?</li>
22
- <li><strong>Graphics</strong>: How detailed and clear are the images and animations in the game? Do they create a sense of immersion and presence? Do they run smoothly and without glitches?</li>
23
- <li><strong>Physics</strong>: How accurately does the game model the behavior and interaction of the vehicle, the road, and the environment? Does it consider factors such as speed, acceleration, braking, steering, traction, suspension, aerodynamics, and damage?</li>
24
- <li><strong>Content</strong>: How much variety and diversity does the game offer in terms of vehicles, roads, scenarios, and modes? Does it include different types of vehicles such as cars, trucks, buses, motorcycles, or planes? Does it include different types of roads such as highways, city streets, rural roads, or off-road tracks? Does it include different types of scenarios such as racing, drifting, parking, or exploring? Does it include different modes such as single-player, multiplayer, online, or offline?</li>
25
- <li><strong>Gameplay</strong>: How easy and intuitive is the game to play? Does it have a user-friendly interface and controls? Does it have a clear and consistent feedback system? Does it have a fair and balanced difficulty level? Does it have a fun and engaging storyline and objectives?</li>
26
- </ul>
27
- <p>These features determine the quality and enjoyment of your driver simulator game. You should choose a game that suits your preferences, goals, and expectations.</p>
28
- <h2>Comparison of driver simulator games</h2>
29
- <p>To help you decide which driver simulator game to play, we have compared some of the most popular and realistic driver simulator games in 2023. We have rated them on a scale of 1 to 5 stars based on their realism, graphics, physics, content, and gameplay. We have also summarized their pros and cons in a table below.</p>
30
- <table>
31
- <tr>
32
- <th>Game</th>
33
- <th>Realism</th>
34
- <th>Graphics</th>
35
- <th>Physics</th>
36
- <th>Content</th>
37
- <th>Gameplay</th>
38
- <th>Total</th>
39
- <th>Pros</th>
40
- <th>Cons</th>
41
- </tr>
42
- <tr>
43
- <td><a href="">Gran Turismo 7</a></td>
44
- <td>★★★★★</td>
45
- <td>★★★★★</td>
46
- <td>★★★★★</td>
47
- <td>★★★★☆</td>
48
- <td>★★★★☆</td>
49
- <td>23/25</td>
50
- <td>- Stunning visuals<br>- Realistic physics<br>- Licensed vehicles<br>- Career mode<br>- Online features</td>
51
- <td>- Limited tracks<br>- Long loading times<br>- High system requirements<br>- Expensive DLCs<br>- Occasional bugs</td>
52
- </tr>
53
- <tr>
54
- <td><a href="">Euro Truck Simulator 2</a></td>
55
- <td>★★★★☆</td>
56
- <td>★★★☆☆</td>
57
- <td>★★★★☆</td>
58
- <td>★★★★★</td>
59
- <td>★★★★☆</td>
60
- <td>20/25</td>
61
- <td>- Immersive simulation<br>- Diverse content<br>- Customizable trucks<br>- Mod support<br>- Multiplayer mode</td>
62
- <td>- Dated graphics<br>- Repetitive gameplay<br>- Unrealistic AI<br>- Complex controls<br>- Steep learning curve</td>
63
- </tr>
64
- <tr>
65
- <td><a href="">City Car Driving</a></td>
66
- <td>★★★☆☆</td>
67
- <td>★★☆☆☆</td>
68
- <td>★★★☆☆</td <td>★★★★☆</td>
69
- <td>★★★☆☆</td>
70
- <td>18/25</td>
71
- <td>- Educational value<br>- Realistic traffic<br>- Driving scenarios<br>- Weather effects<br>- VR support</td>
72
- <td>- Low-quality graphics<br>- Limited vehicles<br>- Boring content<br>- Poor sound effects<br>- Expensive price</td>
73
- </tr>
74
- <tr>
75
- <td><a href="">Forza Horizon 5</a></td>
76
- <td>★★★☆☆</td>
77
- <td>★★★★★</td>
78
- <td>★★★☆☆</td>
79
- <td>★★★★★</td>
80
- <td>★★★★★</td>
81
- <td>21/25</td>
82
- <td>- Gorgeous graphics<br>- Open-world exploration<br>- Diverse vehicles<br>- Fun gameplay<br>- Social features</td>
83
- <td>- Arcade physics<br>- Unrealistic scenarios<br>- Frequent updates<br>- Microtransactions<br>- Online dependency</td>
84
- </tr>
85
- <tr>
86
- <td><a href="">Flight Simulator 2023</a></td>
87
- <td>★★★★★</td <td>★★★★★</td>
88
- <td>★★★★★</td>
89
- <td>★★★★☆</td>
90
- <td>★★★★☆</td>
91
- <td>23/25</td>
92
- <td>- Amazing realism<br>- Stunning scenery<br>- Real-time weather<br>- Live traffic<br>- Flight lessons</td>
93
- <td>- High hardware demands<br>- Long installation time<br>- Limited aircraft<br>- Complex controls<br>- Occasional glitches</td>
94
- </tr>
95
- </table>
96
- <h2>Reviews of driver simulator games</h2>
97
- <p>In this section, we will give you our detailed reviews of some of the best driver simulator games in 2023. We will highlight their strengths and weaknesses, and give you our recommendations.</p>
98
- <p>I searched for the seed keyword "driver simulator"<br />
99
- I went to the Matching terms report<br />
100
- I filtered for keywords with a monthly search volume up to 300<br />
101
- I filtered for keywords with a Traffic Potential (TP) up to 300<br />
102
- I sorted the results by relevance<br />
103
- driver simulator games<br />
104
- driver simulator pc<br />
105
- driver simulator ps4<br />
106
- driver simulator online<br />
107
- driver simulator 3d<br />
108
- driver simulator mod apk<br />
109
- driver simulator free download<br />
110
- driver simulator steam<br />
111
- driver simulator vr<br />
112
- driver simulator xbox one<br />
113
- driver simulator android<br />
114
- driver simulator app<br />
115
- driver simulator car parking game<br />
116
- driver simulator download<br />
117
- driver simulator for windows 10<br />
118
- driver simulator game online<br />
119
- driver simulator game pc<br />
120
- driver simulator ios<br />
121
- driver simulator mod apk unlimited money<br />
122
- driver simulator offline<br />
123
- driver simulator pc game download<br />
124
- driver simulator ps5<br />
125
- driver simulator roblox codes<br />
126
- driver simulator switch<br />
127
- driver simulator unblocked<br />
128
- best driver simulator games for pc<br />
129
- bus driver simulator 2019 mods<br />
130
- bus driver simulator game online free play<br />
131
- car and truck driver simulator 2020 mod apk<br />
132
- car and truck driver simulator 2020 unlimited money<br />
133
- car driving school 2020: ultimate car simulator mod apk<br />
134
- city car driving: ultimate car driving simulator mod apk download<br />
135
- city coach bus driving: bus driving games 2020 mod apk download<br />
136
- city coach bus driving: bus driving games 2020 unlimited money and gems download<br />
137
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for android mobile phone and tablet devices.<br />
138
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for ios iphone ipad devices.<br />
139
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for pc windows 10 laptop desktop devices.<br />
140
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for ps4 playstation console devices.<br />
141
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for xbox one console devices.<br />
142
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for nintendo switch console devices.<br />
143
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for mac os laptop desktop devices.<br />
144
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for linux laptop desktop devices.<br />
145
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for chromebook laptop desktop devices.<br />
146
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for amazon fire tablet devices.<br />
147
- city coach bus driving: bus driving games 2020 unlimited money and gems hack version download free for samsung galaxy smartphone devices.</p>
148
- <h3>Gran Turismo 7</h3>
149
- <p>Gran Turismo 7 is the latest installment in the legendary racing simulator series by Polyphony Digital. It is exclusive to PlayStation 5 and features over 400 licensed vehicles, 28 tracks, and a revamped career mode. It also supports online multiplayer, VR mode, and ray tracing technology.</p>
150
- <p>The game is praised for its stunning visuals, realistic physics, and authentic sound effects. The vehicles look and feel like their real counterparts, and the tracks are faithfully recreated from real locations. The game also offers a variety of customization options, such as tuning, painting, and decals. The career mode is engaging and challenging, and the online features are smooth and fun.</p>
151
- <p>The game is criticized for its limited track selection, long loading times, and high system requirements. The game also has some expensive DLCs that add more content, but some players feel that they should have been included in the base game. The game also has some occasional bugs and glitches that can affect the gameplay.</p>
152
- <p>Overall, Gran Turismo 7 is a must-have for racing enthusiasts who own a PlayStation 5. It is one of the most realistic and beautiful racing simulators ever made, and it offers hours of entertainment and challenge. However, it is not perfect, and it might not appeal to casual gamers or those who prefer arcade-style racing games.</p>
153
- <h3>Euro Truck Simulator 2</h3>
154
- <p>Euro Truck Simulator 2 is a truck driving simulator by SCS Software. It is available for Windows, Mac, and Linux, and features over 70 European cities, 15 countries, and hundreds of roads. It also supports modding, multiplayer mode, and VR mode.</p>
155
- <p>The game is praised for its immersive simulation, diverse content, and customizable trucks. The game lets you drive various types of trucks across Europe, delivering cargo, earning money, and expanding your business. The game also lets you modify your trucks with different parts, colors, and accessories. The modding community is active and provides many additional content such as maps, vehicles, skins, and more. The multiplayer mode is fun and social, and the VR mode is realistic and thrilling.</p <p>The game is criticized for its dated graphics, repetitive gameplay, and unrealistic AI. The game does not have the best visuals, and some of the environments look bland and boring. The game can also get monotonous after a while, as the missions are similar and the routes are long. The game also has some issues with the AI traffic, such as erratic behavior, collisions, and traffic jams.</p>
156
- <p>Overall, Euro Truck Simulator 2 is a great game for truck lovers who want to experience the life of a truck driver. It is a relaxing and rewarding game that offers a lot of variety and customization. However, it is not a game for everyone, and it might not appeal to those who prefer fast-paced or action-packed games.</p>
157
- <h3>City Car Driving</h3>
158
- <p>City Car Driving is a car driving simulator by Forward Development. It is available for Windows and features over 25 vehicles, 16 maps, and 24 scenarios. It also supports VR mode and modding.</p>
159
- <p>The game is praised for its educational value, realistic traffic, and driving scenarios. The game is designed to help you learn how to drive in different situations and conditions, such as city traffic, country roads, night driving, rain, snow, fog, etc. The game also includes a traffic rules mode, where you have to follow the traffic laws and signs of different countries. The game also has a variety of scenarios, such as parking, overtaking, emergency braking, etc. The game also supports VR mode, which enhances the immersion and realism of the game.</p>
160
- <p>The game is criticized for its low-quality graphics, limited vehicles, and boring content. The game does not have the best graphics, and some of the models and textures look outdated and low-resolution. The game also has a small selection of vehicles, mostly sedans and hatchbacks. The game also lacks content in terms of maps, modes, and objectives. The game can get dull and tedious after a while.</p>
161
- <p>Overall, City Car Driving is a good game for beginners who want to learn how to drive or improve their driving skills. It is a realistic and challenging game that teaches you the basics of driving in various situations. However, it is not a very entertaining or exciting game, and it might not appeal to those who want more action or variety in their games.</p>
162
- <h3>Forza Horizon 5</h3>
163
- <p>Forza Horizon 5 is an open-world racing simulator by Playground Games. It is available for Windows and Xbox Series X/S and features over 500 vehicles , and a stunning recreation of Mexico. It also supports online multiplayer, cross-play, and ray tracing technology.</p>
164
- <p>The game is praised for its gorgeous graphics, open-world exploration, and diverse vehicles. The game showcases the beauty and diversity of Mexico, with its vibrant cities, lush forests, arid deserts, snowy mountains, and ancient ruins. The game also offers a huge variety of vehicles, from supercars to off-road trucks, from motorcycles to planes. The game also has a fun and engaging gameplay, with dynamic seasons, events, challenges, and rewards.</p>
165
- <p>The game is criticized for its arcade physics, unrealistic scenarios, and frequent updates. The game does not have the most realistic physics, and some of the vehicles and tracks feel too easy or too hard to drive. The game also has some unrealistic scenarios, such as driving through a volcano, a sandstorm, or a tornado. The game also has frequent updates that add more content, but also require more storage space and internet bandwidth.</p>
166
- <p>Overall, Forza Horizon 5 is a fantastic game for racing fans who want to experience the thrill and joy of driving in a beautiful and diverse world. It is one of the most fun and enjoyable racing simulators ever made, and it offers hours of entertainment and challenge. However, it is not a very realistic or serious game, and it might not appeal to those who prefer more simulation or realism in their games.</p>
167
- <h3>Flight Simulator 2023</h3>
168
- <p>Flight Simulator 2023 is a flight simulator by Asobo Studio. It is available for Windows and Xbox Series X/S and features over 40 aircraft , and a realistic representation of the entire Earth. It also supports online multiplayer, VR mode, and modding.</p>
169
- <p>The game is praised for its amazing realism, stunning scenery, and real-time weather. The game uses satellite imagery, 3D mapping, and artificial intelligence to create a detailed and accurate model of the Earth. The game also uses real-time data from various sources to simulate the weather, the traffic, and the wildlife. The game also offers a variety of aircraft, from light planes to jets, from helicopters to gliders. The game also supports VR mode, which enhances the immersion and realism of the game.</p>
170
- <p>The game is criticized for its high hardware demands, long installation time, and limited aircraft. The game requires a powerful PC or console, a fast internet connection, and a large storage space to run smoothly and without issues. The game also takes a long time to install and update, which can be frustrating for some players. The game also has a small selection of aircraft, mostly civilian and commercial ones. The game also lacks some features such as combat, missions, or challenges.</p>
171
- <p>Overall, Flight Simulator 2023 is an incredible game for aviation enthusiasts who want to experience the wonder and beauty of flying in a realistic and immersive way. It is one of the most advanced and impressive flight simulators ever made, and it offers hours of exploration and discovery. However, it is not a very accessible or casual game, and it might not appeal to those who prefer more action or variety in their games.</p>
172
- <h2>Conclusion</h2>
173
- <p>Driver simulator games are software that simulate the experience of driving a vehicle in a virtual environment. They can be used for entertainment, education, research, or training purposes. They can also improve your driving skills, knowledge, and safety.</p>
174
- <p>Driver simulator games vary in terms of their quality, realism, and complexity. Some of the main features that you should look for when choosing a driver simulator game are realism, graphics, physics, content, and gameplay.</p>
175
- <p>We have compared and reviewed some of the best driver simulator games available in 2023. We have rated them on a scale of 1 to 5 stars based on their realism, graphics, physics, content , and gameplay. We have also summarized their pros and cons in a table and given our detailed reviews of each game.</p>
176
- <p>Some of the best driver simulator games in 2023 are Gran Turismo 7, Euro Truck Simulator 2, City Car Driving, Forza Horizon 5, and Flight Simulator 2023. Each game has its own strengths and weaknesses, and you should choose the one that suits your preferences, goals, and expectations.</p>
177
- <p>Driver simulator games are a great way to enjoy the thrill and challenge of driving in a safe and convenient way. They can also help you learn more about driving rules, traffic laws, vehicle dynamics, road safety, or human factors. They can also help you improve your driving abilities, knowledge, and confidence.</p>
178
- <p>We hope that this article has helped you understand more about driver simulator games and how to choose the best one for you. Happy driving!</p>
179
- <h2>FAQs</h2>
180
- <p>Here are some common questions and answers about driver simulator games:</p>
181
- <ul>
182
- <li><strong>What is the difference between a driver simulator game and a racing game?</strong><br>A driver simulator game is a software that simulates the experience of driving a vehicle in a virtual environment. A racing game is a software that focuses on the competitive aspect of driving a vehicle in a virtual environment. Driver simulator games tend to be more realistic, complex, and educational than racing games. Racing games tend to be more arcade-style, simple, and entertaining than driver simulator games.</li>
183
- <li><strong>What are the benefits of playing driver simulator games?</strong><br>Driver simulator games can offer various benefits such as controllability, reproducibility, standardization, safety, cost-effectiveness, and accessibility. They can also improve your driving skills, knowledge, and safety.</li>
184
- <li><strong>What are the features of driver simulator games?</strong><br>Driver simulator games vary in terms of their quality, realism, and complexity. Some of the main features that you should look for when choosing a driver simulator game are realism, graphics, physics, content, and gameplay.</li>
185
- <li><strong>What are some of the best driver simulator games in 2023?</strong><br>Some of the best driver simulator games in 2023 are Gran Turismo 7, Euro Truck Simulator 2, City Car Driving, Forza Horizon 5, and Flight Simulator 2023. Each game has its own strengths and weaknesses, and you should choose the one that suits your preferences, goals , and expectations.</li>
186
- <li><strong>How can I play driver simulator games?</strong><br>You can play driver simulator games on various platforms such as PC, console, mobile, or VR. You can also use different devices such as keyboard, mouse, joystick, steering wheel, or motion platform to control the vehicle. You can also play driver simulator games online or offline, alone or with others.</li>
187
- </ul></p> 401be4b1e0<br />
188
- <br />
189
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/30SecondsToMoon/30SecondsToMoon/app.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
spaces/3laa2/Text2img/app.py DELETED
@@ -1,120 +0,0 @@
1
- import streamlit as st
2
- import cv2 as cv
3
- import time
4
- import torch
5
- from diffusers import StableDiffusionPipeline
6
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
7
-
8
-
9
- def create_model(loc = "stabilityai/stable-diffusion-2-1-base", mch = 'cpu'):
10
- pipe = StableDiffusionPipeline.from_pretrained(loc)
11
- pipe = pipe.to(mch)
12
- return pipe
13
-
14
-
15
- def tok_mod():
16
- tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
17
- tokenizer.add_special_tokens({'pad_token': '[PAD]'})
18
- model = GPT2LMHeadModel.from_pretrained('FredZhang7/distilgpt2-stable-diffusion-v2')
19
- model.to('cpu')
20
- return model,tokenizer
21
-
22
-
23
- t2i = st.title("""
24
- Txt2Img
25
- ###### `CLICK "Create_Update_Model"` :
26
- - `FIRST RUN OF THE CODE`
27
- - `CHANGING MODEL`
28
- ###### TO USE GPT PROMPTS GENERATOR CHECK `GPT PROMS` THEN CLICK `CREATE GPT MODEL`""")
29
-
30
- the_type = st.selectbox("Model",("stabilityai/stable-diffusion-2-1-base",
31
- "CompVis/stable-diffusion-v1-4"))
32
- st.session_state.gate = False
33
-
34
- ma_1,_,ma_2 = st.columns([2,2,2])
35
-
36
- with ma_1 :
37
- create = st.button("Create The Model")
38
-
39
- if create:
40
- st.session_state.t2m_mod = create_model(loc=the_type)
41
-
42
- with ma_2 :
43
- gpt = st.checkbox("GPT PROMS")
44
-
45
- if gpt :
46
- gen = st.button("Create GPT Model")
47
- if gen:
48
- st.session_state.mod,st.session_state.tok = tok_mod()
49
-
50
- m1,m2,m3 = st.columns([1,1,3])
51
- m4,m5 = st.columns(2)
52
- prompt = st.text_input("GPT PROM",r'' )
53
- with m1 :
54
- temperature = st.slider("Temp",0.0,1.0,.9,.1)
55
- with m2 :
56
- top_k = st.slider("K",2,16,8,2)
57
- with m3 :
58
- max_length = st.slider("Length",10,100,80,1)
59
- with m4 :
60
- repitition_penalty = st.slider("penality",1.0,5.0,1.2,1.0)
61
- with m5 :
62
- num_return_sequences=st.slider("Proms Num",1,10,5,1)
63
-
64
- prom_gen = st.button("Generate Proms")
65
-
66
- if prom_gen :
67
- model, tokenizer = st.session_state.mod,st.session_state.tok
68
- input_ids = tokenizer(prompt, return_tensors='pt').input_ids
69
- output = model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k, max_length=max_length,
70
- num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty,
71
- penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
72
-
73
- st.session_state.PROMPTS = []
74
- for i in range(len(output)):
75
- st.session_state.PROMPTS.append(tokenizer.decode(output[i]))
76
-
77
- if 'PROMPTS' in st.session_state :
78
- prom = st.selectbox("Proms",st.session_state.PROMPTS)
79
-
80
- else :
81
- prom = st.text_input("# Prompt",'')
82
-
83
-
84
-
85
-
86
- c1,c2,c3 = st.columns([1,1,3])
87
- c4,c5 = st.columns(2)
88
- with c1:
89
- bu_1 = st.text_input("Seed",'999')
90
- with c2:
91
- bu_2 = st.text_input("Steps",'12')
92
- with c3:
93
- bu_3 = st.text_input("Number of Images",'1')
94
- with c4:
95
- sl_1 = st.slider("Width",128,1024,512,8)
96
- with c5:
97
- sl_2 = st.slider("hight",128,1024,512,8)
98
-
99
- st.session_state.generator = torch.Generator("cpu").manual_seed(int(bu_1))
100
-
101
- create = st.button("Imagine")
102
-
103
- if create:
104
- model = st.session_state.t2m_mod
105
- generator = st.session_state.generator
106
-
107
- if int(bu_3) == 1 :
108
- IMG = model(prom, width=int(sl_1), height=int(sl_2),
109
- num_inference_steps=int(bu_2),
110
- generator=generator).images[0]
111
- st.image(IMG)
112
-
113
- else :
114
- PROMS = [prom]*int(bu_3)
115
-
116
- IMGS = model(PROMS, width=int(sl_1), height=int(sl_2),
117
- num_inference_steps=int(bu_2),
118
- generator=generator).images
119
-
120
- st.image(IMGS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/modules/util.py DELETED
@@ -1,564 +0,0 @@
1
- from torch import nn
2
-
3
- import torch.nn.functional as F
4
- import torch
5
-
6
- from src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
7
- from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d
8
-
9
- import torch.nn.utils.spectral_norm as spectral_norm
10
-
11
-
12
- def kp2gaussian(kp, spatial_size, kp_variance):
13
- """
14
- Transform a keypoint into gaussian like representation
15
- """
16
- mean = kp['value']
17
-
18
- coordinate_grid = make_coordinate_grid(spatial_size, mean.type())
19
- number_of_leading_dimensions = len(mean.shape) - 1
20
- shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
21
- coordinate_grid = coordinate_grid.view(*shape)
22
- repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1)
23
- coordinate_grid = coordinate_grid.repeat(*repeats)
24
-
25
- # Preprocess kp shape
26
- shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3)
27
- mean = mean.view(*shape)
28
-
29
- mean_sub = (coordinate_grid - mean)
30
-
31
- out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
32
-
33
- return out
34
-
35
- def make_coordinate_grid_2d(spatial_size, type):
36
- """
37
- Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
38
- """
39
- h, w = spatial_size
40
- x = torch.arange(w).type(type)
41
- y = torch.arange(h).type(type)
42
-
43
- x = (2 * (x / (w - 1)) - 1)
44
- y = (2 * (y / (h - 1)) - 1)
45
-
46
- yy = y.view(-1, 1).repeat(1, w)
47
- xx = x.view(1, -1).repeat(h, 1)
48
-
49
- meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
50
-
51
- return meshed
52
-
53
-
54
- def make_coordinate_grid(spatial_size, type):
55
- d, h, w = spatial_size
56
- x = torch.arange(w).type(type)
57
- y = torch.arange(h).type(type)
58
- z = torch.arange(d).type(type)
59
-
60
- x = (2 * (x / (w - 1)) - 1)
61
- y = (2 * (y / (h - 1)) - 1)
62
- z = (2 * (z / (d - 1)) - 1)
63
-
64
- yy = y.view(1, -1, 1).repeat(d, 1, w)
65
- xx = x.view(1, 1, -1).repeat(d, h, 1)
66
- zz = z.view(-1, 1, 1).repeat(1, h, w)
67
-
68
- meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3)
69
-
70
- return meshed
71
-
72
-
73
- class ResBottleneck(nn.Module):
74
- def __init__(self, in_features, stride):
75
- super(ResBottleneck, self).__init__()
76
- self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features//4, kernel_size=1)
77
- self.conv2 = nn.Conv2d(in_channels=in_features//4, out_channels=in_features//4, kernel_size=3, padding=1, stride=stride)
78
- self.conv3 = nn.Conv2d(in_channels=in_features//4, out_channels=in_features, kernel_size=1)
79
- self.norm1 = BatchNorm2d(in_features//4, affine=True)
80
- self.norm2 = BatchNorm2d(in_features//4, affine=True)
81
- self.norm3 = BatchNorm2d(in_features, affine=True)
82
-
83
- self.stride = stride
84
- if self.stride != 1:
85
- self.skip = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=1, stride=stride)
86
- self.norm4 = BatchNorm2d(in_features, affine=True)
87
-
88
- def forward(self, x):
89
- out = self.conv1(x)
90
- out = self.norm1(out)
91
- out = F.relu(out)
92
- out = self.conv2(out)
93
- out = self.norm2(out)
94
- out = F.relu(out)
95
- out = self.conv3(out)
96
- out = self.norm3(out)
97
- if self.stride != 1:
98
- x = self.skip(x)
99
- x = self.norm4(x)
100
- out += x
101
- out = F.relu(out)
102
- return out
103
-
104
-
105
- class ResBlock2d(nn.Module):
106
- """
107
- Res block, preserve spatial resolution.
108
- """
109
-
110
- def __init__(self, in_features, kernel_size, padding):
111
- super(ResBlock2d, self).__init__()
112
- self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
113
- padding=padding)
114
- self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
115
- padding=padding)
116
- self.norm1 = BatchNorm2d(in_features, affine=True)
117
- self.norm2 = BatchNorm2d(in_features, affine=True)
118
-
119
- def forward(self, x):
120
- out = self.norm1(x)
121
- out = F.relu(out)
122
- out = self.conv1(out)
123
- out = self.norm2(out)
124
- out = F.relu(out)
125
- out = self.conv2(out)
126
- out += x
127
- return out
128
-
129
-
130
- class ResBlock3d(nn.Module):
131
- """
132
- Res block, preserve spatial resolution.
133
- """
134
-
135
- def __init__(self, in_features, kernel_size, padding):
136
- super(ResBlock3d, self).__init__()
137
- self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
138
- padding=padding)
139
- self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
140
- padding=padding)
141
- self.norm1 = BatchNorm3d(in_features, affine=True)
142
- self.norm2 = BatchNorm3d(in_features, affine=True)
143
-
144
- def forward(self, x):
145
- out = self.norm1(x)
146
- out = F.relu(out)
147
- out = self.conv1(out)
148
- out = self.norm2(out)
149
- out = F.relu(out)
150
- out = self.conv2(out)
151
- out += x
152
- return out
153
-
154
-
155
- class UpBlock2d(nn.Module):
156
- """
157
- Upsampling block for use in decoder.
158
- """
159
-
160
- def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
161
- super(UpBlock2d, self).__init__()
162
-
163
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
164
- padding=padding, groups=groups)
165
- self.norm = BatchNorm2d(out_features, affine=True)
166
-
167
- def forward(self, x):
168
- out = F.interpolate(x, scale_factor=2)
169
- out = self.conv(out)
170
- out = self.norm(out)
171
- out = F.relu(out)
172
- return out
173
-
174
- class UpBlock3d(nn.Module):
175
- """
176
- Upsampling block for use in decoder.
177
- """
178
-
179
- def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
180
- super(UpBlock3d, self).__init__()
181
-
182
- self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
183
- padding=padding, groups=groups)
184
- self.norm = BatchNorm3d(out_features, affine=True)
185
-
186
- def forward(self, x):
187
- # out = F.interpolate(x, scale_factor=(1, 2, 2), mode='trilinear')
188
- out = F.interpolate(x, scale_factor=(1, 2, 2))
189
- out = self.conv(out)
190
- out = self.norm(out)
191
- out = F.relu(out)
192
- return out
193
-
194
-
195
- class DownBlock2d(nn.Module):
196
- """
197
- Downsampling block for use in encoder.
198
- """
199
-
200
- def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
201
- super(DownBlock2d, self).__init__()
202
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
203
- padding=padding, groups=groups)
204
- self.norm = BatchNorm2d(out_features, affine=True)
205
- self.pool = nn.AvgPool2d(kernel_size=(2, 2))
206
-
207
- def forward(self, x):
208
- out = self.conv(x)
209
- out = self.norm(out)
210
- out = F.relu(out)
211
- out = self.pool(out)
212
- return out
213
-
214
-
215
- class DownBlock3d(nn.Module):
216
- """
217
- Downsampling block for use in encoder.
218
- """
219
-
220
- def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
221
- super(DownBlock3d, self).__init__()
222
- '''
223
- self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
224
- padding=padding, groups=groups, stride=(1, 2, 2))
225
- '''
226
- self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
227
- padding=padding, groups=groups)
228
- self.norm = BatchNorm3d(out_features, affine=True)
229
- self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2))
230
-
231
- def forward(self, x):
232
- out = self.conv(x)
233
- out = self.norm(out)
234
- out = F.relu(out)
235
- out = self.pool(out)
236
- return out
237
-
238
-
239
- class SameBlock2d(nn.Module):
240
- """
241
- Simple block, preserve spatial resolution.
242
- """
243
-
244
- def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):
245
- super(SameBlock2d, self).__init__()
246
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,
247
- kernel_size=kernel_size, padding=padding, groups=groups)
248
- self.norm = BatchNorm2d(out_features, affine=True)
249
- if lrelu:
250
- self.ac = nn.LeakyReLU()
251
- else:
252
- self.ac = nn.ReLU()
253
-
254
- def forward(self, x):
255
- out = self.conv(x)
256
- out = self.norm(out)
257
- out = self.ac(out)
258
- return out
259
-
260
-
261
- class Encoder(nn.Module):
262
- """
263
- Hourglass Encoder
264
- """
265
-
266
- def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
267
- super(Encoder, self).__init__()
268
-
269
- down_blocks = []
270
- for i in range(num_blocks):
271
- down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
272
- min(max_features, block_expansion * (2 ** (i + 1))),
273
- kernel_size=3, padding=1))
274
- self.down_blocks = nn.ModuleList(down_blocks)
275
-
276
- def forward(self, x):
277
- outs = [x]
278
- for down_block in self.down_blocks:
279
- outs.append(down_block(outs[-1]))
280
- return outs
281
-
282
-
283
- class Decoder(nn.Module):
284
- """
285
- Hourglass Decoder
286
- """
287
-
288
- def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
289
- super(Decoder, self).__init__()
290
-
291
- up_blocks = []
292
-
293
- for i in range(num_blocks)[::-1]:
294
- in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
295
- out_filters = min(max_features, block_expansion * (2 ** i))
296
- up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
297
-
298
- self.up_blocks = nn.ModuleList(up_blocks)
299
- # self.out_filters = block_expansion
300
- self.out_filters = block_expansion + in_features
301
-
302
- self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1)
303
- self.norm = BatchNorm3d(self.out_filters, affine=True)
304
-
305
- def forward(self, x):
306
- out = x.pop()
307
- # for up_block in self.up_blocks[:-1]:
308
- for up_block in self.up_blocks:
309
- out = up_block(out)
310
- skip = x.pop()
311
- out = torch.cat([out, skip], dim=1)
312
- # out = self.up_blocks[-1](out)
313
- out = self.conv(out)
314
- out = self.norm(out)
315
- out = F.relu(out)
316
- return out
317
-
318
-
319
- class Hourglass(nn.Module):
320
- """
321
- Hourglass architecture.
322
- """
323
-
324
- def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
325
- super(Hourglass, self).__init__()
326
- self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
327
- self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
328
- self.out_filters = self.decoder.out_filters
329
-
330
- def forward(self, x):
331
- return self.decoder(self.encoder(x))
332
-
333
-
334
- class KPHourglass(nn.Module):
335
- """
336
- Hourglass architecture.
337
- """
338
-
339
- def __init__(self, block_expansion, in_features, reshape_features, reshape_depth, num_blocks=3, max_features=256):
340
- super(KPHourglass, self).__init__()
341
-
342
- self.down_blocks = nn.Sequential()
343
- for i in range(num_blocks):
344
- self.down_blocks.add_module('down'+ str(i), DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
345
- min(max_features, block_expansion * (2 ** (i + 1))),
346
- kernel_size=3, padding=1))
347
-
348
- in_filters = min(max_features, block_expansion * (2 ** num_blocks))
349
- self.conv = nn.Conv2d(in_channels=in_filters, out_channels=reshape_features, kernel_size=1)
350
-
351
- self.up_blocks = nn.Sequential()
352
- for i in range(num_blocks):
353
- in_filters = min(max_features, block_expansion * (2 ** (num_blocks - i)))
354
- out_filters = min(max_features, block_expansion * (2 ** (num_blocks - i - 1)))
355
- self.up_blocks.add_module('up'+ str(i), UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
356
-
357
- self.reshape_depth = reshape_depth
358
- self.out_filters = out_filters
359
-
360
- def forward(self, x):
361
- out = self.down_blocks(x)
362
- out = self.conv(out)
363
- bs, c, h, w = out.shape
364
- out = out.view(bs, c//self.reshape_depth, self.reshape_depth, h, w)
365
- out = self.up_blocks(out)
366
-
367
- return out
368
-
369
-
370
-
371
- class AntiAliasInterpolation2d(nn.Module):
372
- """
373
- Band-limited downsampling, for better preservation of the input signal.
374
- """
375
- def __init__(self, channels, scale):
376
- super(AntiAliasInterpolation2d, self).__init__()
377
- sigma = (1 / scale - 1) / 2
378
- kernel_size = 2 * round(sigma * 4) + 1
379
- self.ka = kernel_size // 2
380
- self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
381
-
382
- kernel_size = [kernel_size, kernel_size]
383
- sigma = [sigma, sigma]
384
- # The gaussian kernel is the product of the
385
- # gaussian function of each dimension.
386
- kernel = 1
387
- meshgrids = torch.meshgrid(
388
- [
389
- torch.arange(size, dtype=torch.float32)
390
- for size in kernel_size
391
- ]
392
- )
393
- for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
394
- mean = (size - 1) / 2
395
- kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
396
-
397
- # Make sure sum of values in gaussian kernel equals 1.
398
- kernel = kernel / torch.sum(kernel)
399
- # Reshape to depthwise convolutional weight
400
- kernel = kernel.view(1, 1, *kernel.size())
401
- kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
402
-
403
- self.register_buffer('weight', kernel)
404
- self.groups = channels
405
- self.scale = scale
406
- inv_scale = 1 / scale
407
- self.int_inv_scale = int(inv_scale)
408
-
409
- def forward(self, input):
410
- if self.scale == 1.0:
411
- return input
412
-
413
- out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
414
- out = F.conv2d(out, weight=self.weight, groups=self.groups)
415
- out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale]
416
-
417
- return out
418
-
419
-
420
- class SPADE(nn.Module):
421
- def __init__(self, norm_nc, label_nc):
422
- super().__init__()
423
-
424
- self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
425
- nhidden = 128
426
-
427
- self.mlp_shared = nn.Sequential(
428
- nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1),
429
- nn.ReLU())
430
- self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
431
- self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
432
-
433
- def forward(self, x, segmap):
434
- normalized = self.param_free_norm(x)
435
- segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
436
- actv = self.mlp_shared(segmap)
437
- gamma = self.mlp_gamma(actv)
438
- beta = self.mlp_beta(actv)
439
- out = normalized * (1 + gamma) + beta
440
- return out
441
-
442
-
443
- class SPADEResnetBlock(nn.Module):
444
- def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):
445
- super().__init__()
446
- # Attributes
447
- self.learned_shortcut = (fin != fout)
448
- fmiddle = min(fin, fout)
449
- self.use_se = use_se
450
- # create conv layers
451
- self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
452
- self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
453
- if self.learned_shortcut:
454
- self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
455
- # apply spectral norm if specified
456
- if 'spectral' in norm_G:
457
- self.conv_0 = spectral_norm(self.conv_0)
458
- self.conv_1 = spectral_norm(self.conv_1)
459
- if self.learned_shortcut:
460
- self.conv_s = spectral_norm(self.conv_s)
461
- # define normalization layers
462
- self.norm_0 = SPADE(fin, label_nc)
463
- self.norm_1 = SPADE(fmiddle, label_nc)
464
- if self.learned_shortcut:
465
- self.norm_s = SPADE(fin, label_nc)
466
-
467
- def forward(self, x, seg1):
468
- x_s = self.shortcut(x, seg1)
469
- dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
470
- dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
471
- out = x_s + dx
472
- return out
473
-
474
- def shortcut(self, x, seg1):
475
- if self.learned_shortcut:
476
- x_s = self.conv_s(self.norm_s(x, seg1))
477
- else:
478
- x_s = x
479
- return x_s
480
-
481
- def actvn(self, x):
482
- return F.leaky_relu(x, 2e-1)
483
-
484
- class audio2image(nn.Module):
485
- def __init__(self, generator, kp_extractor, he_estimator_video, he_estimator_audio, train_params):
486
- super().__init__()
487
- # Attributes
488
- self.generator = generator
489
- self.kp_extractor = kp_extractor
490
- self.he_estimator_video = he_estimator_video
491
- self.he_estimator_audio = he_estimator_audio
492
- self.train_params = train_params
493
-
494
- def headpose_pred_to_degree(self, pred):
495
- device = pred.device
496
- idx_tensor = [idx for idx in range(66)]
497
- idx_tensor = torch.FloatTensor(idx_tensor).to(device)
498
- pred = F.softmax(pred)
499
- degree = torch.sum(pred*idx_tensor, 1) * 3 - 99
500
-
501
- return degree
502
-
503
- def get_rotation_matrix(self, yaw, pitch, roll):
504
- yaw = yaw / 180 * 3.14
505
- pitch = pitch / 180 * 3.14
506
- roll = roll / 180 * 3.14
507
-
508
- roll = roll.unsqueeze(1)
509
- pitch = pitch.unsqueeze(1)
510
- yaw = yaw.unsqueeze(1)
511
-
512
- roll_mat = torch.cat([torch.ones_like(roll), torch.zeros_like(roll), torch.zeros_like(roll),
513
- torch.zeros_like(roll), torch.cos(roll), -torch.sin(roll),
514
- torch.zeros_like(roll), torch.sin(roll), torch.cos(roll)], dim=1)
515
- roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)
516
-
517
- pitch_mat = torch.cat([torch.cos(pitch), torch.zeros_like(pitch), torch.sin(pitch),
518
- torch.zeros_like(pitch), torch.ones_like(pitch), torch.zeros_like(pitch),
519
- -torch.sin(pitch), torch.zeros_like(pitch), torch.cos(pitch)], dim=1)
520
- pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)
521
-
522
- yaw_mat = torch.cat([torch.cos(yaw), -torch.sin(yaw), torch.zeros_like(yaw),
523
- torch.sin(yaw), torch.cos(yaw), torch.zeros_like(yaw),
524
- torch.zeros_like(yaw), torch.zeros_like(yaw), torch.ones_like(yaw)], dim=1)
525
- yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)
526
-
527
- rot_mat = torch.einsum('bij,bjk,bkm->bim', roll_mat, pitch_mat, yaw_mat)
528
-
529
- return rot_mat
530
-
531
- def keypoint_transformation(self, kp_canonical, he):
532
- kp = kp_canonical['value'] # (bs, k, 3)
533
- yaw, pitch, roll = he['yaw'], he['pitch'], he['roll']
534
- t, exp = he['t'], he['exp']
535
-
536
- yaw = self.headpose_pred_to_degree(yaw)
537
- pitch = self.headpose_pred_to_degree(pitch)
538
- roll = self.headpose_pred_to_degree(roll)
539
-
540
- rot_mat = self.get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3)
541
-
542
- # keypoint rotation
543
- kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp)
544
-
545
-
546
-
547
- # keypoint translation
548
- t = t.unsqueeze_(1).repeat(1, kp.shape[1], 1)
549
- kp_t = kp_rotated + t
550
-
551
- # add expression deviation
552
- exp = exp.view(exp.shape[0], -1, 3)
553
- kp_transformed = kp_t + exp
554
-
555
- return {'value': kp_transformed}
556
-
557
- def forward(self, source_image, target_audio):
558
- pose_source = self.he_estimator_video(source_image)
559
- pose_generated = self.he_estimator_audio(target_audio)
560
- kp_canonical = self.kp_extractor(source_image)
561
- kp_source = self.keypoint_transformation(kp_canonical, pose_source)
562
- kp_transformed_generated = self.keypoint_transformation(kp_canonical, pose_generated)
563
- generated = self.generator(source_image, kp_source=kp_source, kp_driving=kp_transformed_generated)
564
- return generated
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/app_v2.py DELETED
@@ -1,1839 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
8
- # also released under the MIT license.
9
-
10
- import argparse
11
- from concurrent.futures import ProcessPoolExecutor
12
- import os
13
- from pathlib import Path
14
- import subprocess as sp
15
- from tempfile import NamedTemporaryFile
16
- import time
17
- import warnings
18
- import glob
19
- import re
20
- from PIL import Image
21
- from pydub import AudioSegment
22
- from datetime import datetime
23
-
24
- import json
25
- import shutil
26
- import taglib
27
- import torch
28
- import torchaudio
29
- import gradio as gr
30
- import numpy as np
31
- import typing as tp
32
-
33
- from audiocraft.data.audio_utils import convert_audio
34
- from audiocraft.data.audio import audio_write
35
- from audiocraft.models import AudioGen, MusicGen, MultiBandDiffusion
36
- from audiocraft.utils import ui
37
- import random, string
38
-
39
- version = "2.0.0a"
40
-
41
- theme = gr.themes.Base(
42
- primary_hue="lime",
43
- secondary_hue="lime",
44
- neutral_hue="neutral",
45
- ).set(
46
- button_primary_background_fill_hover='*primary_500',
47
- button_primary_background_fill_hover_dark='*primary_500',
48
- button_secondary_background_fill_hover='*primary_500',
49
- button_secondary_background_fill_hover_dark='*primary_500'
50
- )
51
-
52
- MODEL = None # Last used model
53
- MODELS = None
54
- UNLOAD_MODEL = False
55
- MOVE_TO_CPU = False
56
- IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
57
- print(IS_BATCHED)
58
- MAX_BATCH_SIZE = 12
59
- BATCHED_DURATION = 15
60
- INTERRUPTING = False
61
- MBD = None
62
- # We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
63
- _old_call = sp.call
64
-
65
-
66
- def generate_random_string(length):
67
- characters = string.ascii_letters + string.digits
68
- return ''.join(random.choice(characters) for _ in range(length))
69
-
70
-
71
- def resize_video(input_path, output_path, target_width, target_height):
72
- ffmpeg_cmd = [
73
- 'ffmpeg',
74
- '-y',
75
- '-i', input_path,
76
- '-vf', f'scale={target_width}:{target_height}',
77
- '-c:a', 'copy',
78
- output_path
79
- ]
80
- sp.run(ffmpeg_cmd)
81
-
82
-
83
- def _call_nostderr(*args, **kwargs):
84
- # Avoid ffmpeg vomiting on the logs.
85
- kwargs['stderr'] = sp.DEVNULL
86
- kwargs['stdout'] = sp.DEVNULL
87
- _old_call(*args, **kwargs)
88
-
89
-
90
- sp.call = _call_nostderr
91
- # Preallocating the pool of processes.
92
- pool = ProcessPoolExecutor(4)
93
- pool.__enter__()
94
-
95
-
96
- def interrupt():
97
- global INTERRUPTING
98
- INTERRUPTING = True
99
-
100
-
101
- class FileCleaner:
102
- def __init__(self, file_lifetime: float = 3600):
103
- self.file_lifetime = file_lifetime
104
- self.files = []
105
-
106
- def add(self, path: tp.Union[str, Path]):
107
- self._cleanup()
108
- self.files.append((time.time(), Path(path)))
109
-
110
- def _cleanup(self):
111
- now = time.time()
112
- for time_added, path in list(self.files):
113
- if now - time_added > self.file_lifetime:
114
- if path.exists():
115
- path.unlink()
116
- self.files.pop(0)
117
- else:
118
- break
119
-
120
-
121
- file_cleaner = FileCleaner()
122
-
123
-
124
- def make_waveform(*args, **kwargs):
125
- # Further remove some warnings.
126
- be = time.time()
127
- with warnings.catch_warnings():
128
- warnings.simplefilter('ignore')
129
- height = kwargs.pop('height')
130
- width = kwargs.pop('width')
131
- if height < 256:
132
- height = 256
133
- if width < 256:
134
- width = 256
135
- waveform_video = gr.make_waveform(*args, **kwargs)
136
- out = f"{generate_random_string(12)}.mp4"
137
- image = kwargs.get('bg_image', None)
138
- if image is None:
139
- resize_video(waveform_video, out, 900, 300)
140
- else:
141
- resize_video(waveform_video, out, width, height)
142
- print("Make a video took", time.time() - be)
143
- return out
144
-
145
-
146
- def load_model(version='GrandaddyShmax/musicgen-melody', custom_model=None, base_model='GrandaddyShmax/musicgen-medium', gen_type="music"):
147
- global MODEL, MODELS
148
- print("Loading model", version)
149
- if MODELS is None:
150
- if version == 'GrandaddyShmax/musicgen-custom':
151
- MODEL = MusicGen.get_pretrained(base_model)
152
- file_path = os.path.abspath("models/" + str(custom_model) + ".pt")
153
- MODEL.lm.load_state_dict(torch.load(file_path))
154
- else:
155
- if gen_type == "music":
156
- MODEL = MusicGen.get_pretrained(version)
157
- elif gen_type == "audio":
158
- MODEL = AudioGen.get_pretrained(version)
159
-
160
- return
161
-
162
- else:
163
- t1 = time.monotonic()
164
- if MODEL is not None:
165
- MODEL.to('cpu') # move to cache
166
- print("Previous model moved to CPU in %.2fs" % (time.monotonic() - t1))
167
- t1 = time.monotonic()
168
- if version != 'GrandaddyShmax/musicgen-custom' and MODELS.get(version) is None:
169
- print("Loading model %s from disk" % version)
170
- if gen_type == "music":
171
- result = MusicGen.get_pretrained(version)
172
- elif gen_type == "audio":
173
- result = AudioGen.get_pretrained(version)
174
- MODELS[version] = result
175
- print("Model loaded in %.2fs" % (time.monotonic() - t1))
176
- MODEL = result
177
- return
178
- result = MODELS[version].to('cuda')
179
- print("Cached model loaded in %.2fs" % (time.monotonic() - t1))
180
- MODEL = result
181
-
182
- def get_audio_info(audio_path):
183
- if audio_path is not None:
184
- if audio_path.name.endswith(".wav") or audio_path.name.endswith(".mp4") or audio_path.name.endswith(".json"):
185
- if not audio_path.name.endswith(".json"):
186
- with taglib.File(audio_path.name, save_on_exit=False) as song:
187
- if 'COMMENT' not in song.tags:
188
- return "No tags found. Either the file is not generated by MusicGen+ V1.2.7 and higher or the tags are corrupted. (Discord removes metadata from mp4 and wav files, so you can't use them)"
189
- json_string = song.tags['COMMENT'][0]
190
- data = json.loads(json_string)
191
- global_prompt = str("\nGlobal Prompt: " + (data['global_prompt'] if data['global_prompt'] != "" else "none")) if 'global_prompt' in data else ""
192
- bpm = str("\nBPM: " + data['bpm']) if 'bpm' in data else ""
193
- key = str("\nKey: " + data['key']) if 'key' in data else ""
194
- scale = str("\nScale: " + data['scale']) if 'scale' in data else ""
195
- prompts = str("\nPrompts: " + (data['texts'] if data['texts'] != "['']" else "none")) if 'texts' in data else ""
196
- duration = str("\nDuration: " + data['duration']) if 'duration' in data else ""
197
- overlap = str("\nOverlap: " + data['overlap']) if 'overlap' in data else ""
198
- seed = str("\nSeed: " + data['seed']) if 'seed' in data else ""
199
- audio_mode = str("\nAudio Mode: " + data['audio_mode']) if 'audio_mode' in data else ""
200
- input_length = str("\nInput Length: " + data['input_length']) if 'input_length' in data else ""
201
- channel = str("\nChannel: " + data['channel']) if 'channel' in data else ""
202
- sr_select = str("\nSample Rate: " + data['sr_select']) if 'sr_select' in data else ""
203
- gen_type = str(data['generator'] + "gen-") if 'generator' in data else ""
204
- model = str("\nModel: " + gen_type + data['model']) if 'model' in data else ""
205
- custom_model = str("\nCustom Model: " + data['custom_model']) if 'custom_model' in data else ""
206
- base_model = str("\nBase Model: " + data['base_model']) if 'base_model' in data else ""
207
- decoder = str("\nDecoder: " + data['decoder']) if 'decoder' in data else ""
208
- topk = str("\nTopk: " + data['topk']) if 'topk' in data else ""
209
- topp = str("\nTopp: " + data['topp']) if 'topp' in data else ""
210
- temperature = str("\nTemperature: " + data['temperature']) if 'temperature' in data else ""
211
- cfg_coef = str("\nClassifier Free Guidance: " + data['cfg_coef']) if 'cfg_coef' in data else ""
212
- version = str("Version: " + data['version']) if 'version' in data else "Version: Unknown"
213
- info = str(version + global_prompt + bpm + key + scale + prompts + duration + overlap + seed + audio_mode + input_length + channel + sr_select + model + custom_model + base_model + decoder + topk + topp + temperature + cfg_coef)
214
- if info == "":
215
- return "No tags found. Either the file is not generated by MusicGen+ V1.2.7 and higher or the tags are corrupted. (Discord removes metadata from mp4 and wav files, so you can't use them)"
216
- return info
217
- else:
218
- with open(audio_path.name) as json_file:
219
- data = json.load(json_file)
220
- #if 'global_prompt' not in data:
221
- #return "No tags found. Either the file is not generated by MusicGen+ V1.2.8a and higher or the tags are corrupted."
222
- global_prompt = str("\nGlobal Prompt: " + (data['global_prompt'] if data['global_prompt'] != "" else "none")) if 'global_prompt' in data else ""
223
- bpm = str("\nBPM: " + data['bpm']) if 'bpm' in data else ""
224
- key = str("\nKey: " + data['key']) if 'key' in data else ""
225
- scale = str("\nScale: " + data['scale']) if 'scale' in data else ""
226
- prompts = str("\nPrompts: " + (data['texts'] if data['texts'] != "['']" else "none")) if 'texts' in data else ""
227
- duration = str("\nDuration: " + data['duration']) if 'duration' in data else ""
228
- overlap = str("\nOverlap: " + data['overlap']) if 'overlap' in data else ""
229
- seed = str("\nSeed: " + data['seed']) if 'seed' in data else ""
230
- audio_mode = str("\nAudio Mode: " + data['audio_mode']) if 'audio_mode' in data else ""
231
- input_length = str("\nInput Length: " + data['input_length']) if 'input_length' in data else ""
232
- channel = str("\nChannel: " + data['channel']) if 'channel' in data else ""
233
- sr_select = str("\nSample Rate: " + data['sr_select']) if 'sr_select' in data else ""
234
- gen_type = str(data['generator'] + "gen-") if 'generator' in data else ""
235
- model = str("\nModel: " + gen_type + data['model']) if 'model' in data else ""
236
- custom_model = str("\nCustom Model: " + data['custom_model']) if 'custom_model' in data else ""
237
- base_model = str("\nBase Model: " + data['base_model']) if 'base_model' in data else ""
238
- decoder = str("\nDecoder: " + data['decoder']) if 'decoder' in data else ""
239
- topk = str("\nTopk: " + data['topk']) if 'topk' in data else ""
240
- topp = str("\nTopp: " + data['topp']) if 'topp' in data else ""
241
- temperature = str("\nTemperature: " + data['temperature']) if 'temperature' in data else ""
242
- cfg_coef = str("\nClassifier Free Guidance: " + data['cfg_coef']) if 'cfg_coef' in data else ""
243
- version = str("Version: " + data['version']) if 'version' in data else "Version: Unknown"
244
- info = str(version + global_prompt + bpm + key + scale + prompts + duration + overlap + seed + audio_mode + input_length + channel + sr_select + model + custom_model + base_model + decoder + topk + topp + temperature + cfg_coef)
245
- if info == "":
246
- return "No tags found. Either the file is not generated by MusicGen+ V1.2.7 and higher or the tags are corrupted."
247
- return info
248
- else:
249
- return "Only .wav ,.mp4 and .json files are supported"
250
- else:
251
- return None
252
-
253
-
254
- def info_to_params(audio_path):
255
- if audio_path is not None:
256
- if audio_path.name.endswith(".wav") or audio_path.name.endswith(".mp4") or audio_path.name.endswith(".json"):
257
- if not audio_path.name.endswith(".json"):
258
- with taglib.File(audio_path.name, save_on_exit=False) as song:
259
- if 'COMMENT' not in song.tags:
260
- return "Default", False, "", 120, "C", "Major", "large", None, "medium", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "sample", 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000"
261
- json_string = song.tags['COMMENT'][0]
262
- data = json.loads(json_string)
263
- struc_prompt = (False if data['bpm'] == "none" else True) if 'bpm' in data else False
264
- global_prompt = data['global_prompt'] if 'global_prompt' in data else ""
265
- bpm = (120 if data['bpm'] == "none" else int(data['bpm'])) if 'bpm' in data else 120
266
- key = ("C" if data['key'] == "none" else data['key']) if 'key' in data else "C"
267
- scale = ("Major" if data['scale'] == "none" else data['scale']) if 'scale' in data else "Major"
268
- model = data['model'] if 'model' in data else "large"
269
- custom_model = (data['custom_model'] if data['custom_model'] in get_available_models() else None) if 'custom_model' in data else None
270
- base_model = data['base_model'] if 'base_model' in data else "medium"
271
- decoder = data['decoder'] if 'decoder' in data else "Default"
272
- if 'texts' not in data:
273
- unique_prompts = 1
274
- text = ["", "", "", "", "", "", "", "", "", ""]
275
- repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
276
- else:
277
- s = data['texts']
278
- s = re.findall(r"'(.*?)'", s)
279
- text = []
280
- repeat = []
281
- i = 0
282
- for elem in s:
283
- if elem.strip():
284
- if i == 0 or elem != s[i-1]:
285
- text.append(elem)
286
- repeat.append(1)
287
- else:
288
- repeat[-1] += 1
289
- i += 1
290
- text.extend([""] * (10 - len(text)))
291
- repeat.extend([1] * (10 - len(repeat)))
292
- unique_prompts = len([t for t in text if t])
293
- audio_mode = ("sample" if data['audio_mode'] == "none" else data['audio_mode']) if 'audio_mode' in data else "sample"
294
- duration = int(data['duration']) if 'duration' in data else 10
295
- topk = float(data['topk']) if 'topk' in data else 250
296
- topp = float(data['topp']) if 'topp' in data else 0
297
- temperature = float(data['temperature']) if 'temperature' in data else 1.0
298
- cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0
299
- seed = int(data['seed']) if 'seed' in data else -1
300
- overlap = int(data['overlap']) if 'overlap' in data else 12
301
- channel = data['channel'] if 'channel' in data else "stereo"
302
- sr_select = data['sr_select'] if 'sr_select' in data else "48000"
303
- return decoder, struc_prompt, global_prompt, bpm, key, scale, model, custom_model, base_model, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], audio_mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select
304
- else:
305
- with open(audio_path.name) as json_file:
306
- data = json.load(json_file)
307
- struc_prompt = (False if data['bpm'] == "none" else True) if 'bpm' in data else False
308
- global_prompt = data['global_prompt'] if 'global_prompt' in data else ""
309
- bpm = (120 if data['bpm'] == "none" else int(data['bpm'])) if 'bpm' in data else 120
310
- key = ("C" if data['key'] == "none" else data['key']) if 'key' in data else "C"
311
- scale = ("Major" if data['scale'] == "none" else data['scale']) if 'scale' in data else "Major"
312
- model = data['model'] if 'model' in data else "large"
313
- custom_model = (data['custom_model'] if data['custom_model'] in get_available_models() else None) if 'custom_model' in data else None
314
- base_model = data['base_model'] if 'base_model' in data else "medium"
315
- decoder = data['decoder'] if 'decoder' in data else "Default"
316
- if 'texts' not in data:
317
- unique_prompts = 1
318
- text = ["", "", "", "", "", "", "", "", "", ""]
319
- repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
320
- else:
321
- s = data['texts']
322
- s = re.findall(r"'(.*?)'", s)
323
- text = []
324
- repeat = []
325
- i = 0
326
- for elem in s:
327
- if elem.strip():
328
- if i == 0 or elem != s[i-1]:
329
- text.append(elem)
330
- repeat.append(1)
331
- else:
332
- repeat[-1] += 1
333
- i += 1
334
- text.extend([""] * (10 - len(text)))
335
- repeat.extend([1] * (10 - len(repeat)))
336
- unique_prompts = len([t for t in text if t])
337
- audio_mode = ("sample" if data['audio_mode'] == "none" else data['audio_mode']) if 'audio_mode' in data else "sample"
338
- duration = int(data['duration']) if 'duration' in data else 10
339
- topk = float(data['topk']) if 'topk' in data else 250
340
- topp = float(data['topp']) if 'topp' in data else 0
341
- temperature = float(data['temperature']) if 'temperature' in data else 1.0
342
- cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0
343
- seed = int(data['seed']) if 'seed' in data else -1
344
- overlap = int(data['overlap']) if 'overlap' in data else 12
345
- channel = data['channel'] if 'channel' in data else "stereo"
346
- sr_select = data['sr_select'] if 'sr_select' in data else "48000"
347
- return decoder, struc_prompt, global_prompt, bpm, key, scale, model, custom_model, base_model, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], audio_mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select
348
- else:
349
- return "Default", False, "", 120, "C", "Major", "large", None, "medium", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "sample", 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000"
350
- else:
351
- return "Default", False, "", 120, "C", "Major", "large", None, "medium", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "sample", 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000"
352
-
353
-
354
- def info_to_params_a(audio_path):
355
- if audio_path is not None:
356
- if audio_path.name.endswith(".wav") or audio_path.name.endswith(".mp4") or audio_path.name.endswith(".json"):
357
- if not audio_path.name.endswith(".json"):
358
- with taglib.File(audio_path.name, save_on_exit=False) as song:
359
- if 'COMMENT' not in song.tags:
360
- return "Default", False, "", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000"
361
- json_string = song.tags['COMMENT'][0]
362
- data = json.loads(json_string)
363
- struc_prompt = (False if data['global_prompt'] == "" else True) if 'global_prompt' in data else False
364
- global_prompt = data['global_prompt'] if 'global_prompt' in data else ""
365
- decoder = data['decoder'] if 'decoder' in data else "Default"
366
- if 'texts' not in data:
367
- unique_prompts = 1
368
- text = ["", "", "", "", "", "", "", "", "", ""]
369
- repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
370
- else:
371
- s = data['texts']
372
- s = re.findall(r"'(.*?)'", s)
373
- text = []
374
- repeat = []
375
- i = 0
376
- for elem in s:
377
- if elem.strip():
378
- if i == 0 or elem != s[i-1]:
379
- text.append(elem)
380
- repeat.append(1)
381
- else:
382
- repeat[-1] += 1
383
- i += 1
384
- text.extend([""] * (10 - len(text)))
385
- repeat.extend([1] * (10 - len(repeat)))
386
- unique_prompts = len([t for t in text if t])
387
- duration = int(data['duration']) if 'duration' in data else 10
388
- topk = float(data['topk']) if 'topk' in data else 250
389
- topp = float(data['topp']) if 'topp' in data else 0
390
- temperature = float(data['temperature']) if 'temperature' in data else 1.0
391
- cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0
392
- seed = int(data['seed']) if 'seed' in data else -1
393
- overlap = int(data['overlap']) if 'overlap' in data else 12
394
- channel = data['channel'] if 'channel' in data else "stereo"
395
- sr_select = data['sr_select'] if 'sr_select' in data else "48000"
396
- return decoder, struc_prompt, global_prompt, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select
397
- else:
398
- with open(audio_path.name) as json_file:
399
- data = json.load(json_file)
400
- struc_prompt = (False if data['global_prompt'] == "" else True) if 'global_prompt' in data else False
401
- global_prompt = data['global_prompt'] if 'global_prompt' in data else ""
402
- decoder = data['decoder'] if 'decoder' in data else "Default"
403
- if 'texts' not in data:
404
- unique_prompts = 1
405
- text = ["", "", "", "", "", "", "", "", "", ""]
406
- repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
407
- else:
408
- s = data['texts']
409
- s = re.findall(r"'(.*?)'", s)
410
- text = []
411
- repeat = []
412
- i = 0
413
- for elem in s:
414
- if elem.strip():
415
- if i == 0 or elem != s[i-1]:
416
- text.append(elem)
417
- repeat.append(1)
418
- else:
419
- repeat[-1] += 1
420
- i += 1
421
- text.extend([""] * (10 - len(text)))
422
- repeat.extend([1] * (10 - len(repeat)))
423
- unique_prompts = len([t for t in text if t])
424
- duration = int(data['duration']) if 'duration' in data else 10
425
- topk = float(data['topk']) if 'topk' in data else 250
426
- topp = float(data['topp']) if 'topp' in data else 0
427
- temperature = float(data['temperature']) if 'temperature' in data else 1.0
428
- cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0
429
- seed = int(data['seed']) if 'seed' in data else -1
430
- overlap = int(data['overlap']) if 'overlap' in data else 12
431
- channel = data['channel'] if 'channel' in data else "stereo"
432
- sr_select = data['sr_select'] if 'sr_select' in data else "48000"
433
- return decoder, struc_prompt, global_prompt, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select
434
-
435
- else:
436
- return "Default", False, "", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000"
437
- else:
438
- return "Default", False, "", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000"
439
-
440
-
441
- def make_pseudo_stereo (filename, sr_select, pan, delay):
442
- if pan:
443
- temp = AudioSegment.from_wav(filename)
444
- if sr_select != "32000":
445
- temp = temp.set_frame_rate(int(sr_select))
446
- left = temp.pan(-0.5) - 5
447
- right = temp.pan(0.6) - 5
448
- temp = left.overlay(right, position=5)
449
- temp.export(filename, format="wav")
450
- if delay:
451
- waveform, sample_rate = torchaudio.load(filename) # load mono WAV file
452
- delay_seconds = 0.01 # set delay 10ms
453
- delay_samples = int(delay_seconds * sample_rate) # Calculating delay value in number of samples
454
- stereo_waveform = torch.stack([waveform[0], torch.cat((torch.zeros(delay_samples), waveform[0][:-delay_samples]))]) # Generate a stereo file with original mono audio and delayed version
455
- torchaudio.save(filename, stereo_waveform, sample_rate)
456
- return
457
-
458
-
459
- def normalize_audio(audio_data):
460
- audio_data = audio_data.astype(np.float32)
461
- max_value = np.max(np.abs(audio_data))
462
- audio_data /= max_value
463
- return audio_data
464
-
465
-
466
- def load_diffusion():
467
- global MBD
468
- if MBD is None:
469
- print("loading MBD")
470
- MBD = MultiBandDiffusion.get_mbd_musicgen()
471
-
472
-
473
- def unload_diffusion():
474
- global MBD
475
- if MBD is not None:
476
- print("unloading MBD")
477
- MBD = None
478
-
479
-
480
- def _do_predictions(gen_type, texts, melodies, sample, trim_start, trim_end, duration, image, height, width, background, bar1, bar2, channel, sr_select, progress=False, **gen_kwargs):
481
- if gen_type == "music":
482
- maximum_size = 29.5
483
- elif gen_type == "audio":
484
- maximum_size = 9.5
485
- cut_size = 0
486
- input_length = 0
487
- sampleP = None
488
- if sample is not None:
489
- globalSR, sampleM = sample[0], sample[1]
490
- sampleM = normalize_audio(sampleM)
491
- sampleM = torch.from_numpy(sampleM).t()
492
- if sampleM.dim() == 1:
493
- sampleM = sampleM.unsqueeze(0)
494
- sample_length = sampleM.shape[sampleM.dim() - 1] / globalSR
495
- if trim_start >= sample_length:
496
- trim_start = sample_length - 0.5
497
- if trim_end >= sample_length:
498
- trim_end = sample_length - 0.5
499
- if trim_start + trim_end >= sample_length:
500
- tmp = sample_length - 0.5
501
- trim_start = tmp / 2
502
- trim_end = tmp / 2
503
- sampleM = sampleM[..., int(globalSR * trim_start):int(globalSR * (sample_length - trim_end))]
504
- sample_length = sample_length - (trim_start + trim_end)
505
- if sample_length > maximum_size:
506
- cut_size = sample_length - maximum_size
507
- sampleP = sampleM[..., :int(globalSR * cut_size)]
508
- sampleM = sampleM[..., int(globalSR * cut_size):]
509
- if sample_length >= duration:
510
- duration = sample_length + 0.5
511
- input_length = sample_length
512
- global MODEL
513
- MODEL.set_generation_params(duration=(duration - cut_size), **gen_kwargs)
514
- print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies], [None if sample is None else (sample[0], sample[1].shape)])
515
- be = time.time()
516
- processed_melodies = []
517
- if gen_type == "music":
518
- target_sr = 32000
519
- elif gen_type == "audio":
520
- target_sr = 16000
521
- target_ac = 1
522
-
523
- for melody in melodies:
524
- if melody is None:
525
- processed_melodies.append(None)
526
- else:
527
- sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
528
- if melody.dim() == 1:
529
- melody = melody[None]
530
- melody = melody[..., :int(sr * duration)]
531
- melody = convert_audio(melody, sr, target_sr, target_ac)
532
- processed_melodies.append(melody)
533
-
534
- if sample is not None:
535
- if sampleP is None:
536
- if gen_type == "music":
537
- outputs = MODEL.generate_continuation(
538
- prompt=sampleM,
539
- prompt_sample_rate=globalSR,
540
- descriptions=texts,
541
- progress=progress,
542
- return_tokens=USE_DIFFUSION
543
- )
544
- elif gen_type == "audio":
545
- outputs = MODEL.generate_continuation(
546
- prompt=sampleM,
547
- prompt_sample_rate=globalSR,
548
- descriptions=texts,
549
- progress=progress
550
- )
551
- else:
552
- if sampleP.dim() > 1:
553
- sampleP = convert_audio(sampleP, globalSR, target_sr, target_ac)
554
- sampleP = sampleP.to(MODEL.device).float().unsqueeze(0)
555
- if gen_type == "music":
556
- outputs = MODEL.generate_continuation(
557
- prompt=sampleM,
558
- prompt_sample_rate=globalSR,
559
- descriptions=texts,
560
- progress=progress,
561
- return_tokens=USE_DIFFUSION
562
- )
563
- elif gen_type == "audio":
564
- outputs = MODEL.generate_continuation(
565
- prompt=sampleM,
566
- prompt_sample_rate=globalSR,
567
- descriptions=texts,
568
- progress=progress
569
- )
570
- outputs = torch.cat([sampleP, outputs], 2)
571
-
572
- elif any(m is not None for m in processed_melodies):
573
- if gen_type == "music":
574
- outputs = MODEL.generate_with_chroma(
575
- descriptions=texts,
576
- melody_wavs=processed_melodies,
577
- melody_sample_rate=target_sr,
578
- progress=progress,
579
- return_tokens=USE_DIFFUSION
580
- )
581
- elif gen_type == "audio":
582
- outputs = MODEL.generate_with_chroma(
583
- descriptions=texts,
584
- melody_wavs=processed_melodies,
585
- melody_sample_rate=target_sr,
586
- progress=progress
587
- )
588
- else:
589
- if gen_type == "music":
590
- outputs = MODEL.generate(texts, progress=progress, return_tokens=USE_DIFFUSION)
591
- elif gen_type == "audio":
592
- outputs = MODEL.generate(texts, progress=progress)
593
-
594
- if USE_DIFFUSION:
595
- print("outputs: " + str(outputs))
596
- outputs_diffusion = MBD.tokens_to_wav(outputs[1])
597
- outputs = torch.cat([outputs[0], outputs_diffusion], dim=0)
598
- outputs = outputs.detach().cpu().float()
599
- backups = outputs
600
- if channel == "stereo":
601
- outputs = convert_audio(outputs, target_sr, int(sr_select), 2)
602
- elif channel == "mono" and sr_select != "32000":
603
- outputs = convert_audio(outputs, target_sr, int(sr_select), 1)
604
- out_files = []
605
- out_audios = []
606
- out_backup = []
607
- for output in outputs:
608
- with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
609
- audio_write(
610
- file.name, output, (MODEL.sample_rate if channel == "stereo effect" else int(sr_select)), strategy="loudness",
611
- loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
612
-
613
- if channel == "stereo effect":
614
- make_pseudo_stereo(file.name, sr_select, pan=True, delay=True);
615
-
616
- out_files.append(pool.submit(make_waveform, file.name, bg_image=image, bg_color=background, bars_color=(bar1, bar2), fg_alpha=1.0, bar_count=75, height=height, width=width))
617
- out_audios.append(file.name)
618
- file_cleaner.add(file.name)
619
- print(f'wav: {file.name}')
620
- for backup in backups:
621
- with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
622
- audio_write(
623
- file.name, backup, MODEL.sample_rate, strategy="loudness",
624
- loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
625
- out_backup.append(file.name)
626
- file_cleaner.add(file.name)
627
- res = [out_file.result() for out_file in out_files]
628
- res_audio = out_audios
629
- res_backup = out_backup
630
- for file in res:
631
- file_cleaner.add(file)
632
- print(f'video: {file}')
633
- print("batch finished", len(texts), time.time() - be)
634
- print("Tempfiles currently stored: ", len(file_cleaner.files))
635
- if MOVE_TO_CPU:
636
- MODEL.to('cpu')
637
- if UNLOAD_MODEL:
638
- MODEL = None
639
- torch.cuda.empty_cache()
640
- torch.cuda.ipc_collect()
641
- return res, res_audio, res_backup, input_length
642
-
643
-
644
- def predict_batched(texts, melodies):
645
- max_text_length = 512
646
- texts = [text[:max_text_length] for text in texts]
647
- load_model('melody')
648
- res = _do_predictions(texts, melodies, BATCHED_DURATION)
649
- return res
650
-
651
-
652
- def add_tags(filename, tags):
653
- json_string = None
654
-
655
- data = {
656
- "global_prompt": tags[0],
657
- "bpm": tags[1],
658
- "key": tags[2],
659
- "scale": tags[3],
660
- "texts": tags[4],
661
- "duration": tags[5],
662
- "overlap": tags[6],
663
- "seed": tags[7],
664
- "audio_mode": tags[8],
665
- "input_length": tags[9],
666
- "channel": tags[10],
667
- "sr_select": tags[11],
668
- "model": tags[12],
669
- "custom_model": tags[13],
670
- "base_model": tags[14],
671
- "decoder": tags[15],
672
- "topk": tags[16],
673
- "topp": tags[17],
674
- "temperature": tags[18],
675
- "cfg_coef": tags[19],
676
- "generator": tags[20],
677
- "version": version
678
- }
679
-
680
- json_string = json.dumps(data)
681
-
682
- if os.path.exists(filename):
683
- with taglib.File(filename, save_on_exit=True) as song:
684
- song.tags = {'COMMENT': json_string }
685
-
686
- json_file = open(tags[7] + '.json', 'w')
687
- json_file.write(json_string)
688
- json_file.close()
689
-
690
- return json_file.name;
691
-
692
-
693
- def save_outputs(mp4, wav_tmp, tags, gen_type):
694
- # mp4: .mp4 file name in root running folder of app.py
695
- # wav_tmp: temporary wav file located in %TEMP% folder
696
- # seed - used seed
697
- # exanple BgnJtr4Pn1AJ.mp4, C:\Users\Alex\AppData\Local\Temp\tmp4ermrebs.wav, 195123182343465
698
- # procedure read generated .mp4 and wav files, rename it by using seed as name,
699
- # and will store it to ./output/today_date/wav and ./output/today_date/mp4 folders.
700
- # if file with same seed number already exist its make postfix in name like seed(n)
701
- # where is n - consiqunce number 1-2-3-4 and so on
702
- # then we store generated mp4 and wav into destination folders.
703
-
704
- current_date = datetime.now().strftime("%Y%m%d")
705
- wav_directory = os.path.join(os.getcwd(), 'output', current_date, gen_type,'wav')
706
- mp4_directory = os.path.join(os.getcwd(), 'output', current_date, gen_type,'mp4')
707
- json_directory = os.path.join(os.getcwd(), 'output', current_date, gen_type,'json')
708
- os.makedirs(wav_directory, exist_ok=True)
709
- os.makedirs(mp4_directory, exist_ok=True)
710
- os.makedirs(json_directory, exist_ok=True)
711
-
712
- filename = str(tags[7]) + '.wav'
713
- target = os.path.join(wav_directory, filename)
714
- counter = 1
715
- while os.path.exists(target):
716
- filename = str(tags[7]) + f'({counter})' + '.wav'
717
- target = os.path.join(wav_directory, filename)
718
- counter += 1
719
-
720
- shutil.copyfile(wav_tmp, target); # make copy of original file
721
- json_file = add_tags(target, tags);
722
-
723
- wav_target=target;
724
- target=target.replace('wav', 'mp4');
725
- mp4_target=target;
726
-
727
- mp4=r'./' +mp4;
728
- shutil.copyfile(mp4, target); # make copy of original file
729
- _ = add_tags(target, tags);
730
-
731
- target=target.replace('mp4', 'json'); # change the extension to json
732
- json_target=target; # store the json target
733
-
734
- with open(target, 'w') as f: # open a writable file object
735
- shutil.copyfile(json_file, target); # make copy of original file
736
-
737
- os.remove(json_file)
738
-
739
- return wav_target, mp4_target, json_target;
740
-
741
-
742
- def clear_cash():
743
- # delete all temporary files genegated my system
744
- current_date = datetime.now().date()
745
- current_directory = os.getcwd()
746
- files = glob.glob(os.path.join(current_directory, '*.mp4'))
747
- for file in files:
748
- creation_date = datetime.fromtimestamp(os.path.getctime(file)).date()
749
- if creation_date == current_date:
750
- os.remove(file)
751
-
752
- temp_directory = os.environ.get('TEMP')
753
- files = glob.glob(os.path.join(temp_directory, 'tmp*.mp4'))
754
- for file in files:
755
- creation_date = datetime.fromtimestamp(os.path.getctime(file)).date()
756
- if creation_date == current_date:
757
- os.remove(file)
758
-
759
- files = glob.glob(os.path.join(temp_directory, 'tmp*.wav'))
760
- for file in files:
761
- creation_date = datetime.fromtimestamp(os.path.getctime(file)).date()
762
- if creation_date == current_date:
763
- os.remove(file)
764
-
765
- files = glob.glob(os.path.join(temp_directory, 'tmp*.png'))
766
- for file in files:
767
- creation_date = datetime.fromtimestamp(os.path.getctime(file)).date()
768
- if creation_date == current_date:
769
- os.remove(file)
770
- return
771
-
772
-
773
- def s2t(seconds, seconds2):
774
- # convert seconds to time format
775
- # seconds - time in seconds
776
- # return time in format 00:00
777
- m, s = divmod(seconds, 60)
778
- m2, s2 = divmod(seconds2, 60)
779
- if seconds != 0 and seconds < seconds2:
780
- s = s + 1
781
- return ("%02d:%02d - %02d:%02d" % (m, s, m2, s2))
782
-
783
-
784
- def calc_time(gen_type, s, duration, overlap, d0, d1, d2, d3, d4, d5, d6, d7, d8, d9):
785
- # calculate the time of generation
786
- # overlap - overlap in seconds
787
- # d0-d9 - drag
788
- # return time in seconds
789
- d_amount = [int(d0), int(d1), int(d2), int(d3), int(d4), int(d5), int(d6), int(d7), int(d8), int(d9)]
790
- calc = []
791
- tracks = []
792
- time = 0
793
- s = s - 1
794
- max_time = duration
795
- max_limit = 0
796
- if gen_type == "music":
797
- max_limit = 30
798
- elif gen_type == "audio":
799
- max_limit = 10
800
- track_add = max_limit - overlap
801
- tracks.append(max_limit + ((d_amount[0] - 1) * track_add))
802
- for i in range(1, 10):
803
- tracks.append(d_amount[i] * track_add)
804
-
805
- if tracks[0] >= max_time or s == 0:
806
- calc.append(s2t(time, max_time))
807
- time = max_time
808
- else:
809
- calc.append(s2t(time, tracks[0]))
810
- time = tracks[0]
811
-
812
- for i in range(1, 10):
813
- if time + tracks[i] >= max_time or i == s:
814
- calc.append(s2t(time, max_time))
815
- time = max_time
816
- else:
817
- calc.append(s2t(time, time + tracks[i]))
818
- time = time + tracks[i]
819
-
820
- return calc[0], calc[1], calc[2], calc[3], calc[4], calc[5], calc[6], calc[7], calc[8], calc[9]
821
-
822
-
823
- def predict_full(gen_type, model, decoder, custom_model, base_model, prompt_amount, struc_prompt, bpm, key, scale, global_prompt, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, audio, mode, trim_start, trim_end, duration, topk, topp, temperature, cfg_coef, seed, overlap, image, height, width, background, bar1, bar2, channel, sr_select, progress=gr.Progress()):
824
- global INTERRUPTING
825
- global USE_DIFFUSION
826
- INTERRUPTING = False
827
-
828
- if gen_type == "audio":
829
- custom_model = None
830
- base_model = "medium"
831
-
832
- if temperature < 0:
833
- raise gr.Error("Temperature must be >= 0.")
834
- if topk < 0:
835
- raise gr.Error("Topk must be non-negative.")
836
- if topp < 0:
837
- raise gr.Error("Topp must be non-negative.")
838
-
839
- if trim_start < 0:
840
- trim_start = 0
841
- if trim_end < 0:
842
- trim_end = 0
843
-
844
- topk = int(topk)
845
-
846
- if decoder == "MultiBand_Diffusion":
847
- USE_DIFFUSION = True
848
- load_diffusion()
849
- else:
850
- USE_DIFFUSION = False
851
- unload_diffusion()
852
-
853
- if gen_type == "music":
854
- model_shrt = model
855
- model = "GrandaddyShmax/musicgen-" + model
856
- elif gen_type == "audio":
857
- model_shrt = model
858
- model = "GrandaddyShmax/audiogen-" + model
859
- base_model_shrt = base_model
860
- base_model = "GrandaddyShmax/musicgen-" + base_model
861
-
862
- if MODEL is None or MODEL.name != (model):
863
- load_model(model, custom_model, base_model, gen_type)
864
- else:
865
- if MOVE_TO_CPU:
866
- MODEL.to('cuda')
867
-
868
- if seed < 0:
869
- seed = random.randint(0, 0xffff_ffff_ffff)
870
- torch.manual_seed(seed)
871
-
872
- def _progress(generated, to_generate):
873
- progress((min(generated, to_generate), to_generate))
874
- if INTERRUPTING:
875
- raise gr.Error("Interrupted.")
876
- MODEL.set_custom_progress_callback(_progress)
877
-
878
- audio_mode = "none"
879
- melody = None
880
- sample = None
881
- if audio:
882
- audio_mode = mode
883
- if mode == "sample":
884
- sample = audio
885
- elif mode == "melody":
886
- melody = audio
887
-
888
- base_model = "none" if model != "custom" else base_model
889
- custom_model = "none" if model != "custom" else custom_model
890
-
891
- text_cat = [p0, p1, p2, p3, p4, p5, p6, p7, p8, p9]
892
- drag_cat = [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9]
893
- texts = []
894
- raw_texts = []
895
- ind = 0
896
- ind2 = 0
897
- while ind < prompt_amount:
898
- for ind2 in range(int(drag_cat[ind])):
899
- if not struc_prompt:
900
- texts.append(text_cat[ind])
901
- global_prompt = "none"
902
- bpm = "none"
903
- key = "none"
904
- scale = "none"
905
- raw_texts.append(text_cat[ind])
906
- else:
907
- if gen_type == "music":
908
- bpm_str = str(bpm) + " bpm"
909
- key_str = ", " + str(key) + " " + str(scale)
910
- global_str = (", " + str(global_prompt)) if str(global_prompt) != "" else ""
911
- elif gen_type == "audio":
912
- bpm_str = ""
913
- key_str = ""
914
- global_str = (str(global_prompt)) if str(global_prompt) != "" else ""
915
- texts_str = (", " + str(text_cat[ind])) if str(text_cat[ind]) != "" else ""
916
- texts.append(bpm_str + key_str + global_str + texts_str)
917
- raw_texts.append(text_cat[ind])
918
- ind2 = 0
919
- ind = ind + 1
920
-
921
- outs, outs_audio, outs_backup, input_length = _do_predictions(
922
- gen_type, [texts], [melody], sample, trim_start, trim_end, duration, image, height, width, background, bar1, bar2, channel, sr_select, progress=True,
923
- top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef, extend_stride=MODEL.max_duration-overlap)
924
- tags = [str(global_prompt), str(bpm), str(key), str(scale), str(raw_texts), str(duration), str(overlap), str(seed), str(audio_mode), str(input_length), str(channel), str(sr_select), str(model_shrt), str(custom_model), str(base_model_shrt), str(decoder), str(topk), str(topp), str(temperature), str(cfg_coef), str(gen_type)]
925
- wav_target, mp4_target, json_target = save_outputs(outs[0], outs_audio[0], tags, gen_type);
926
- # Removes the temporary files.
927
- for out in outs:
928
- os.remove(out)
929
- for out in outs_audio:
930
- os.remove(out)
931
-
932
- return mp4_target, wav_target, outs_backup[0], [mp4_target, wav_target, json_target], seed
933
-
934
-
935
- max_textboxes = 10
936
-
937
-
938
- def get_available_models():
939
- return sorted([re.sub('.pt$', '', item.name) for item in list(Path('models/').glob('*')) if item.name.endswith('.pt')])
940
-
941
-
942
- def toggle_audio_src(choice):
943
- if choice == "mic":
944
- return gr.update(source="microphone", value=None, label="Microphone")
945
- else:
946
- return gr.update(source="upload", value=None, label="File")
947
-
948
-
949
- def ui_full(launch_kwargs):
950
- with gr.Blocks(title='AudioCraft Plus', theme=theme) as interface:
951
- gr.Markdown(
952
- """
953
- # AudioCraft Plus - v2.0.0a
954
-
955
- ### An All-in-One AudioCraft WebUI
956
-
957
- #### **Disclaimer:** This will not run on CPU only. Its best to clone this App and run on GPU instance!
958
- **Alternatively**, you can run this for free on a google colab:
959
- https://colab.research.google.com/github/camenduru/MusicGen-colab/blob/main/MusicGen_ClownOfMadness_plus_colab.ipynb
960
-
961
- **Or**, run this locally on your PC:
962
- https://github.com/GrandaddyShmax/audiocraft_plus/tree/main
963
-
964
- Thanks to: facebookresearch, Camenduru, rkfg, oobabooga, AlexHK and GrandaddyShmax
965
- """
966
- )
967
- with gr.Tab("MusicGen"):
968
- gr.Markdown(
969
- """
970
- ### MusicGen
971
- """
972
- )
973
- with gr.Row():
974
- with gr.Column():
975
- with gr.Tab("Generation"):
976
- with gr.Accordion("Structure Prompts", open=False):
977
- with gr.Column():
978
- with gr.Row():
979
- struc_prompts = gr.Checkbox(label="Enable", value=False, interactive=True, container=False)
980
- bpm = gr.Number(label="BPM", value=120, interactive=True, scale=1, precision=0)
981
- key = gr.Dropdown(["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "Bb", "B"], label="Key", value="C", interactive=True)
982
- scale = gr.Dropdown(["Major", "Minor"], label="Scale", value="Major", interactive=True)
983
- with gr.Row():
984
- global_prompt = gr.Text(label="Global Prompt", interactive=True, scale=3)
985
- with gr.Row():
986
- s = gr.Slider(1, max_textboxes, value=1, step=1, label="Prompts:", interactive=True, scale=2)
987
- #s_mode = gr.Radio(["segmentation", "batch"], value="segmentation", interactive=True, scale=1, label="Generation Mode")
988
- with gr.Column():
989
- textboxes = []
990
- prompts = []
991
- repeats = []
992
- calcs = []
993
- with gr.Row():
994
- text0 = gr.Text(label="Input Text", interactive=True, scale=4)
995
- prompts.append(text0)
996
- drag0 = gr.Number(label="Repeat", value=1, interactive=True, scale=1)
997
- repeats.append(drag0)
998
- calc0 = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time")
999
- calcs.append(calc0)
1000
- for i in range(max_textboxes):
1001
- with gr.Row(visible=False) as t:
1002
- text = gr.Text(label="Input Text", interactive=True, scale=3)
1003
- repeat = gr.Number(label="Repeat", minimum=1, value=1, interactive=True, scale=1)
1004
- calc = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time")
1005
- textboxes.append(t)
1006
- prompts.append(text)
1007
- repeats.append(repeat)
1008
- calcs.append(calc)
1009
- to_calc = gr.Button("Calculate Timings", variant="secondary")
1010
- with gr.Row():
1011
- duration = gr.Slider(minimum=1, maximum=300, value=10, step=1, label="Duration", interactive=True)
1012
- with gr.Row():
1013
- overlap = gr.Slider(minimum=1, maximum=29, value=12, step=1, label="Overlap", interactive=True)
1014
- with gr.Row():
1015
- seed = gr.Number(label="Seed", value=-1, scale=4, precision=0, interactive=True)
1016
- gr.Button('\U0001f3b2\ufe0f', scale=1).click(fn=lambda: -1, outputs=[seed], queue=False)
1017
- reuse_seed = gr.Button('\u267b\ufe0f', scale=1)
1018
-
1019
- with gr.Tab("Audio"):
1020
- with gr.Row():
1021
- with gr.Column():
1022
- input_type = gr.Radio(["file", "mic"], value="file", label="Input Type (optional)", interactive=True)
1023
- mode = gr.Radio(["melody", "sample"], label="Input Audio Mode (optional)", value="sample", interactive=True)
1024
- with gr.Row():
1025
- trim_start = gr.Number(label="Trim Start", value=0, interactive=True)
1026
- trim_end = gr.Number(label="Trim End", value=0, interactive=True)
1027
- audio = gr.Audio(source="upload", type="numpy", label="Input Audio (optional)", interactive=True)
1028
-
1029
- with gr.Tab("Customization"):
1030
- with gr.Row():
1031
- with gr.Column():
1032
- background = gr.ColorPicker(value="#0f0f0f", label="background color", interactive=True, scale=0)
1033
- bar1 = gr.ColorPicker(value="#84cc16", label="bar color start", interactive=True, scale=0)
1034
- bar2 = gr.ColorPicker(value="#10b981", label="bar color end", interactive=True, scale=0)
1035
- with gr.Column():
1036
- image = gr.Image(label="Background Image", type="filepath", interactive=True, scale=4)
1037
- with gr.Row():
1038
- height = gr.Number(label="Height", value=512, interactive=True)
1039
- width = gr.Number(label="Width", value=768, interactive=True)
1040
-
1041
- with gr.Tab("Settings"):
1042
- with gr.Row():
1043
- channel = gr.Radio(["mono", "stereo", "stereo effect"], label="Output Audio Channels", value="stereo", interactive=True, scale=1)
1044
- sr_select = gr.Dropdown(["11025", "16000", "22050", "24000", "32000", "44100", "48000"], label="Output Audio Sample Rate", value="48000", interactive=True)
1045
- with gr.Row():
1046
- model = gr.Radio(["melody", "small", "medium", "large", "custom"], label="Model", value="large", interactive=True, scale=1)
1047
- with gr.Column():
1048
- dropdown = gr.Dropdown(choices=get_available_models(), value=("No models found" if len(get_available_models()) < 1 else get_available_models()[0]), label='Custom Model (models folder)', elem_classes='slim-dropdown', interactive=True)
1049
- ui.create_refresh_button(dropdown, lambda: None, lambda: {'choices': get_available_models()}, 'refresh-button')
1050
- basemodel = gr.Radio(["small", "medium", "melody", "large"], label="Base Model", value="medium", interactive=True, scale=1)
1051
- with gr.Row():
1052
- decoder = gr.Radio(["Default", "MultiBand_Diffusion"], label="Decoder", value="Default", interactive=True)
1053
- with gr.Row():
1054
- topk = gr.Number(label="Top-k", value=250, interactive=True)
1055
- topp = gr.Number(label="Top-p", value=0, interactive=True)
1056
- temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
1057
- cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
1058
- with gr.Row():
1059
- submit = gr.Button("Generate", variant="primary")
1060
- # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
1061
- _ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
1062
- with gr.Column() as c:
1063
- with gr.Tab("Output"):
1064
- output = gr.Video(label="Generated Music", scale=0)
1065
- with gr.Row():
1066
- audio_only = gr.Audio(type="numpy", label="Audio Only", interactive=False)
1067
- backup_only = gr.Audio(type="numpy", label="Backup Audio", interactive=False, visible=False)
1068
- send_audio = gr.Button("Send to Input Audio")
1069
- seed_used = gr.Number(label='Seed used', value=-1, interactive=False)
1070
- download = gr.File(label="Generated Files", interactive=False)
1071
- with gr.Tab("Wiki"):
1072
- gr.Markdown(
1073
- """
1074
- - **[Generate (button)]:**
1075
- Generates the music with the given settings and prompts.
1076
-
1077
- - **[Interrupt (button)]:**
1078
- Stops the music generation as soon as it can, providing an incomplete output.
1079
-
1080
- ---
1081
-
1082
- ### Generation Tab:
1083
-
1084
- #### Structure Prompts:
1085
-
1086
- This feature helps reduce repetetive prompts by allowing you to set global prompts
1087
- that will be used for all prompt segments.
1088
-
1089
- - **[Structure Prompts (checkbox)]:**
1090
- Enable/Disable the structure prompts feature.
1091
-
1092
- - **[BPM (number)]:**
1093
- Beats per minute of the generated music.
1094
-
1095
- - **[Key (dropdown)]:**
1096
- The key of the generated music.
1097
-
1098
- - **[Scale (dropdown)]:**
1099
- The scale of the generated music.
1100
-
1101
- - **[Global Prompt (text)]:**
1102
- Here write the prompt that you wish to be used for all prompt segments.
1103
-
1104
- #### Multi-Prompt:
1105
-
1106
- This feature allows you to control the music, adding variation to different time segments.
1107
- You have up to 10 prompt segments. the first prompt will always be 30s long
1108
- the other prompts will be [30s - overlap].
1109
- for example if the overlap is 10s, each prompt segment will be 20s.
1110
-
1111
- - **[Prompt Segments (number)]:**
1112
- Amount of unique prompt to generate throughout the music generation.
1113
-
1114
- - **[Prompt/Input Text (prompt)]:**
1115
- Here describe the music you wish the model to generate.
1116
-
1117
- - **[Repeat (number)]:**
1118
- Write how many times this prompt will repeat (instead of wasting another prompt segment on the same prompt).
1119
-
1120
- - **[Time (text)]:**
1121
- The time of the prompt segment.
1122
-
1123
- - **[Calculate Timings (button)]:**
1124
- Calculates the timings of the prompt segments.
1125
-
1126
- - **[Duration (number)]:**
1127
- How long you want the generated music to be (in seconds).
1128
-
1129
- - **[Overlap (number)]:**
1130
- How much each new segment will reference the previous segment (in seconds).
1131
- For example, if you choose 20s: Each new segment after the first one will reference the previous segment 20s
1132
- and will generate only 10s of new music. The model can only process 30s of music.
1133
-
1134
- - **[Seed (number)]:**
1135
- Your generated music id. If you wish to generate the exact same music,
1136
- place the exact seed with the exact prompts
1137
- (This way you can also extend specific song that was generated short).
1138
-
1139
- - **[Random Seed (button)]:**
1140
- Gives "-1" as a seed, which counts as a random seed.
1141
-
1142
- - **[Copy Previous Seed (button)]:**
1143
- Copies the seed from the output seed (if you don't feel like doing it manualy).
1144
-
1145
- ---
1146
-
1147
- ### Audio Tab:
1148
-
1149
- - **[Input Type (selection)]:**
1150
- `File` mode allows you to upload an audio file to use as input
1151
- `Mic` mode allows you to use your microphone as input
1152
-
1153
- - **[Input Audio Mode (selection)]:**
1154
- `Melody` mode only works with the melody model: it conditions the music generation to reference the melody
1155
- `Sample` mode works with any model: it gives a music sample to the model to generate its continuation.
1156
-
1157
- - **[Trim Start and Trim End (numbers)]:**
1158
- `Trim Start` set how much you'd like to trim the input audio from the start
1159
- `Trim End` same as the above but from the end
1160
-
1161
- - **[Input Audio (audio file)]:**
1162
- Input here the audio you wish to use with "melody" or "sample" mode.
1163
-
1164
- ---
1165
-
1166
- ### Customization Tab:
1167
-
1168
- - **[Background Color (color)]:**
1169
- Works only if you don't upload image. Color of the background of the waveform.
1170
-
1171
- - **[Bar Color Start (color)]:**
1172
- First color of the waveform bars.
1173
-
1174
- - **[Bar Color End (color)]:**
1175
- Second color of the waveform bars.
1176
-
1177
- - **[Background Image (image)]:**
1178
- Background image that you wish to be attached to the generated video along with the waveform.
1179
-
1180
- - **[Height and Width (numbers)]:**
1181
- Output video resolution, only works with image.
1182
- (minimum height and width is 256).
1183
-
1184
- ---
1185
-
1186
- ### Settings Tab:
1187
-
1188
- - **[Output Audio Channels (selection)]:**
1189
- With this you can select the amount of channels that you wish for your output audio.
1190
- `mono` is a straightforward single channel audio
1191
- `stereo` is a dual channel audio but it will sound more or less like mono
1192
- `stereo effect` this one is also dual channel but uses tricks to simulate a stereo audio.
1193
-
1194
- - **[Output Audio Sample Rate (dropdown)]:**
1195
- The output audio sample rate, the model default is 32000.
1196
-
1197
- - **[Model (selection)]:**
1198
- Here you can choose which model you wish to use:
1199
- `melody` model is based on the medium model with a unique feature that lets you use melody conditioning
1200
- `small` model is trained on 300M parameters
1201
- `medium` model is trained on 1.5B parameters
1202
- `large` model is trained on 3.3B parameters
1203
- `custom` model runs the custom model that you provided.
1204
-
1205
- - **[Custom Model (selection)]:**
1206
- This dropdown will show you models that are placed in the `models` folder
1207
- you must select `custom` in the model options in order to use it.
1208
-
1209
- - **[Refresh (button)]:**
1210
- Refreshes the dropdown list for custom model.
1211
-
1212
- - **[Base Model (selection)]:**
1213
- Choose here the model that your custom model is based on.
1214
-
1215
- - **[Decoder (selection)]:**
1216
- Choose here the decoder that you wish to use:
1217
- `Default` is the default decoder
1218
- `MultiBand_Diffusion` is a decoder that uses diffusion to generate the audio.
1219
-
1220
- - **[Top-k (number)]:**
1221
- is a parameter used in text generation models, including music generation models. It determines the number of most likely next tokens to consider at each step of the generation process. The model ranks all possible tokens based on their predicted probabilities, and then selects the top-k tokens from the ranked list. The model then samples from this reduced set of tokens to determine the next token in the generated sequence. A smaller value of k results in a more focused and deterministic output, while a larger value of k allows for more diversity in the generated music.
1222
-
1223
- - **[Top-p (number)]:**
1224
- also known as nucleus sampling or probabilistic sampling, is another method used for token selection during text generation. Instead of specifying a fixed number like top-k, top-p considers the cumulative probability distribution of the ranked tokens. It selects the smallest possible set of tokens whose cumulative probability exceeds a certain threshold (usually denoted as p). The model then samples from this set to choose the next token. This approach ensures that the generated output maintains a balance between diversity and coherence, as it allows for a varying number of tokens to be considered based on their probabilities.
1225
-
1226
- - **[Temperature (number)]:**
1227
- is a parameter that controls the randomness of the generated output. It is applied during the sampling process, where a higher temperature value results in more random and diverse outputs, while a lower temperature value leads to more deterministic and focused outputs. In the context of music generation, a higher temperature can introduce more variability and creativity into the generated music, but it may also lead to less coherent or structured compositions. On the other hand, a lower temperature can produce more repetitive and predictable music.
1228
-
1229
- - **[Classifier Free Guidance (number)]:**
1230
- refers to a technique used in some music generation models where a separate classifier network is trained to provide guidance or control over the generated music. This classifier is trained on labeled data to recognize specific musical characteristics or styles. During the generation process, the output of the generator model is evaluated by the classifier, and the generator is encouraged to produce music that aligns with the desired characteristics or style. This approach allows for more fine-grained control over the generated music, enabling users to specify certain attributes they want the model to capture.
1231
- """
1232
- )
1233
- with gr.Tab("AudioGen"):
1234
- gr.Markdown(
1235
- """
1236
- ### AudioGen
1237
- """
1238
- )
1239
- with gr.Row():
1240
- with gr.Column():
1241
- with gr.Tab("Generation"):
1242
- with gr.Accordion("Structure Prompts", open=False):
1243
- with gr.Row():
1244
- struc_prompts_a = gr.Checkbox(label="Enable", value=False, interactive=True, container=False)
1245
- global_prompt_a = gr.Text(label="Global Prompt", interactive=True, scale=3)
1246
- with gr.Row():
1247
- s_a = gr.Slider(1, max_textboxes, value=1, step=1, label="Prompts:", interactive=True, scale=2)
1248
- with gr.Column():
1249
- textboxes_a = []
1250
- prompts_a = []
1251
- repeats_a = []
1252
- calcs_a = []
1253
- with gr.Row():
1254
- text0_a = gr.Text(label="Input Text", interactive=True, scale=4)
1255
- prompts_a.append(text0_a)
1256
- drag0_a = gr.Number(label="Repeat", value=1, interactive=True, scale=1)
1257
- repeats_a.append(drag0_a)
1258
- calc0_a = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time")
1259
- calcs_a.append(calc0_a)
1260
- for i in range(max_textboxes):
1261
- with gr.Row(visible=False) as t_a:
1262
- text_a = gr.Text(label="Input Text", interactive=True, scale=3)
1263
- repeat_a = gr.Number(label="Repeat", minimum=1, value=1, interactive=True, scale=1)
1264
- calc_a = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time")
1265
- textboxes_a.append(t_a)
1266
- prompts_a.append(text_a)
1267
- repeats_a.append(repeat_a)
1268
- calcs_a.append(calc_a)
1269
- to_calc_a = gr.Button("Calculate Timings", variant="secondary")
1270
- with gr.Row():
1271
- duration_a = gr.Slider(minimum=1, maximum=300, value=10, step=1, label="Duration", interactive=True)
1272
- with gr.Row():
1273
- overlap_a = gr.Slider(minimum=1, maximum=9, value=2, step=1, label="Overlap", interactive=True)
1274
- with gr.Row():
1275
- seed_a = gr.Number(label="Seed", value=-1, scale=4, precision=0, interactive=True)
1276
- gr.Button('\U0001f3b2\ufe0f', scale=1).click(fn=lambda: -1, outputs=[seed_a], queue=False)
1277
- reuse_seed_a = gr.Button('\u267b\ufe0f', scale=1)
1278
-
1279
- with gr.Tab("Audio"):
1280
- with gr.Row():
1281
- with gr.Column():
1282
- input_type_a = gr.Radio(["file", "mic"], value="file", label="Input Type (optional)", interactive=True)
1283
- mode_a = gr.Radio(["sample"], label="Input Audio Mode (optional)", value="sample", interactive=False, visible=False)
1284
- with gr.Row():
1285
- trim_start_a = gr.Number(label="Trim Start", value=0, interactive=True)
1286
- trim_end_a = gr.Number(label="Trim End", value=0, interactive=True)
1287
- audio_a = gr.Audio(source="upload", type="numpy", label="Input Audio (optional)", interactive=True)
1288
-
1289
- with gr.Tab("Customization"):
1290
- with gr.Row():
1291
- with gr.Column():
1292
- background_a = gr.ColorPicker(value="#0f0f0f", label="background color", interactive=True, scale=0)
1293
- bar1_a = gr.ColorPicker(value="#84cc16", label="bar color start", interactive=True, scale=0)
1294
- bar2_a = gr.ColorPicker(value="#10b981", label="bar color end", interactive=True, scale=0)
1295
- with gr.Column():
1296
- image_a = gr.Image(label="Background Image", type="filepath", interactive=True, scale=4)
1297
- with gr.Row():
1298
- height_a = gr.Number(label="Height", value=512, interactive=True)
1299
- width_a = gr.Number(label="Width", value=768, interactive=True)
1300
-
1301
- with gr.Tab("Settings"):
1302
- with gr.Row():
1303
- channel_a = gr.Radio(["mono", "stereo", "stereo effect"], label="Output Audio Channels", value="stereo", interactive=True, scale=1)
1304
- sr_select_a = gr.Dropdown(["11025", "16000", "22050", "24000", "32000", "44100", "48000"], label="Output Audio Sample Rate", value="48000", interactive=True)
1305
- with gr.Row():
1306
- model_a = gr.Radio(["medium"], label="Model", value="medium", interactive=False, visible=False)
1307
- decoder_a = gr.Radio(["Default"], label="Decoder", value="Default", interactive=False, visible=False)
1308
- with gr.Row():
1309
- topk_a = gr.Number(label="Top-k", value=250, interactive=True)
1310
- topp_a = gr.Number(label="Top-p", value=0, interactive=True)
1311
- temperature_a = gr.Number(label="Temperature", value=1.0, interactive=True)
1312
- cfg_coef_a = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
1313
- with gr.Row():
1314
- submit_a = gr.Button("Generate", variant="primary")
1315
- _ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
1316
- with gr.Column():
1317
- with gr.Tab("Output"):
1318
- output_a = gr.Video(label="Generated Audio", scale=0)
1319
- with gr.Row():
1320
- audio_only_a = gr.Audio(type="numpy", label="Audio Only", interactive=False)
1321
- backup_only_a = gr.Audio(type="numpy", label="Backup Audio", interactive=False, visible=False)
1322
- send_audio_a = gr.Button("Send to Input Audio")
1323
- seed_used_a = gr.Number(label='Seed used', value=-1, interactive=False)
1324
- download_a = gr.File(label="Generated Files", interactive=False)
1325
- with gr.Tab("Wiki"):
1326
- gr.Markdown(
1327
- """
1328
- - **[Generate (button)]:**
1329
- Generates the audio with the given settings and prompts.
1330
-
1331
- - **[Interrupt (button)]:**
1332
- Stops the audio generation as soon as it can, providing an incomplete output.
1333
-
1334
- ---
1335
-
1336
- ### Generation Tab:
1337
-
1338
- #### Structure Prompts:
1339
-
1340
- This feature helps reduce repetetive prompts by allowing you to set global prompts
1341
- that will be used for all prompt segments.
1342
-
1343
- - **[Structure Prompts (checkbox)]:**
1344
- Enable/Disable the structure prompts feature.
1345
-
1346
- - **[Global Prompt (text)]:**
1347
- Here write the prompt that you wish to be used for all prompt segments.
1348
-
1349
- #### Multi-Prompt:
1350
-
1351
- This feature allows you to control the audio, adding variation to different time segments.
1352
- You have up to 10 prompt segments. the first prompt will always be 10s long
1353
- the other prompts will be [10s - overlap].
1354
- for example if the overlap is 2s, each prompt segment will be 8s.
1355
-
1356
- - **[Prompt Segments (number)]:**
1357
- Amount of unique prompt to generate throughout the audio generation.
1358
-
1359
- - **[Prompt/Input Text (prompt)]:**
1360
- Here describe the audio you wish the model to generate.
1361
-
1362
- - **[Repeat (number)]:**
1363
- Write how many times this prompt will repeat (instead of wasting another prompt segment on the same prompt).
1364
-
1365
- - **[Time (text)]:**
1366
- The time of the prompt segment.
1367
-
1368
- - **[Calculate Timings (button)]:**
1369
- Calculates the timings of the prompt segments.
1370
-
1371
- - **[Duration (number)]:**
1372
- How long you want the generated audio to be (in seconds).
1373
-
1374
- - **[Overlap (number)]:**
1375
- How much each new segment will reference the previous segment (in seconds).
1376
- For example, if you choose 2s: Each new segment after the first one will reference the previous segment 2s
1377
- and will generate only 8s of new audio. The model can only process 10s of music.
1378
-
1379
- - **[Seed (number)]:**
1380
- Your generated audio id. If you wish to generate the exact same audio,
1381
- place the exact seed with the exact prompts
1382
- (This way you can also extend specific song that was generated short).
1383
-
1384
- - **[Random Seed (button)]:**
1385
- Gives "-1" as a seed, which counts as a random seed.
1386
-
1387
- - **[Copy Previous Seed (button)]:**
1388
- Copies the seed from the output seed (if you don't feel like doing it manualy).
1389
-
1390
- ---
1391
-
1392
- ### Audio Tab:
1393
-
1394
- - **[Input Type (selection)]:**
1395
- `File` mode allows you to upload an audio file to use as input
1396
- `Mic` mode allows you to use your microphone as input
1397
-
1398
- - **[Trim Start and Trim End (numbers)]:**
1399
- `Trim Start` set how much you'd like to trim the input audio from the start
1400
- `Trim End` same as the above but from the end
1401
-
1402
- - **[Input Audio (audio file)]:**
1403
- Input here the audio you wish to use.
1404
-
1405
- ---
1406
-
1407
- ### Customization Tab:
1408
-
1409
- - **[Background Color (color)]:**
1410
- Works only if you don't upload image. Color of the background of the waveform.
1411
-
1412
- - **[Bar Color Start (color)]:**
1413
- First color of the waveform bars.
1414
-
1415
- - **[Bar Color End (color)]:**
1416
- Second color of the waveform bars.
1417
-
1418
- - **[Background Image (image)]:**
1419
- Background image that you wish to be attached to the generated video along with the waveform.
1420
-
1421
- - **[Height and Width (numbers)]:**
1422
- Output video resolution, only works with image.
1423
- (minimum height and width is 256).
1424
-
1425
- ---
1426
-
1427
- ### Settings Tab:
1428
-
1429
- - **[Output Audio Channels (selection)]:**
1430
- With this you can select the amount of channels that you wish for your output audio.
1431
- `mono` is a straightforward single channel audio
1432
- `stereo` is a dual channel audio but it will sound more or less like mono
1433
- `stereo effect` this one is also dual channel but uses tricks to simulate a stereo audio.
1434
-
1435
- - **[Output Audio Sample Rate (dropdown)]:**
1436
- The output audio sample rate, the model default is 32000.
1437
-
1438
- - **[Top-k (number)]:**
1439
- is a parameter used in text generation models, including music generation models. It determines the number of most likely next tokens to consider at each step of the generation process. The model ranks all possible tokens based on their predicted probabilities, and then selects the top-k tokens from the ranked list. The model then samples from this reduced set of tokens to determine the next token in the generated sequence. A smaller value of k results in a more focused and deterministic output, while a larger value of k allows for more diversity in the generated music.
1440
-
1441
- - **[Top-p (number)]:**
1442
- also known as nucleus sampling or probabilistic sampling, is another method used for token selection during text generation. Instead of specifying a fixed number like top-k, top-p considers the cumulative probability distribution of the ranked tokens. It selects the smallest possible set of tokens whose cumulative probability exceeds a certain threshold (usually denoted as p). The model then samples from this set to choose the next token. This approach ensures that the generated output maintains a balance between diversity and coherence, as it allows for a varying number of tokens to be considered based on their probabilities.
1443
-
1444
- - **[Temperature (number)]:**
1445
- is a parameter that controls the randomness of the generated output. It is applied during the sampling process, where a higher temperature value results in more random and diverse outputs, while a lower temperature value leads to more deterministic and focused outputs. In the context of music generation, a higher temperature can introduce more variability and creativity into the generated music, but it may also lead to less coherent or structured compositions. On the other hand, a lower temperature can produce more repetitive and predictable music.
1446
-
1447
- - **[Classifier Free Guidance (number)]:**
1448
- refers to a technique used in some music generation models where a separate classifier network is trained to provide guidance or control over the generated music. This classifier is trained on labeled data to recognize specific musical characteristics or styles. During the generation process, the output of the generator model is evaluated by the classifier, and the generator is encouraged to produce music that aligns with the desired characteristics or style. This approach allows for more fine-grained control over the generated music, enabling users to specify certain attributes they want the model to capture.
1449
- """
1450
- )
1451
- with gr.Tab("Audio Info"):
1452
- gr.Markdown(
1453
- """
1454
- ### Audio Info
1455
- """
1456
- )
1457
- with gr.Row():
1458
- with gr.Column():
1459
- in_audio = gr.File(type="file", label="Input Any Audio", interactive=True)
1460
- with gr.Row():
1461
- send_gen = gr.Button("Send to MusicGen", variant="primary")
1462
- send_gen_a = gr.Button("Send to AudioGen", variant="primary")
1463
- with gr.Column():
1464
- info = gr.Textbox(label="Audio Info", lines=10, interactive=False)
1465
- with gr.Tab("Changelog"):
1466
- gr.Markdown(
1467
- """
1468
- ## Changelog:
1469
-
1470
- ### v2.0.0a
1471
-
1472
- - Forgot to move all the update to app.py from temp2.py... oops
1473
-
1474
-
1475
-
1476
- ### v2.0.0
1477
-
1478
- - Changed name from MusicGen+ to AudioCraft Plus
1479
-
1480
- - Complete overhaul of the repo "backend" with the latest changes from the main facebookresearch repo
1481
-
1482
- - Added a new decoder: MultiBand_Diffusion
1483
-
1484
- - Added AudioGen: a new tab for generating audio
1485
-
1486
-
1487
-
1488
- ### v1.2.8c
1489
-
1490
- - Implemented Reverse compatibility for audio info tab with previous versions
1491
-
1492
-
1493
-
1494
- ### v1.2.8b
1495
-
1496
- - Fixed the error when loading default models
1497
-
1498
-
1499
-
1500
- ### v1.2.8a
1501
-
1502
- - Adapted Audio info tab to work with the new structure prompts feature
1503
-
1504
- - Now custom models actually work, make sure you select the correct base model
1505
-
1506
-
1507
-
1508
- ### v1.2.8
1509
-
1510
- - Now you will also recieve json file with metadata of generated audio
1511
-
1512
- - Added error messages in Audio Info tab
1513
-
1514
- - Added structure prompts: you can select bpm, key and global prompt for all prompts
1515
-
1516
- - Added time display next to each prompt, can be calculated with "Calculate Timings" button
1517
-
1518
-
1519
-
1520
- ### v1.2.7
1521
-
1522
- - When sending generated audio to Input Audio, it will send a backup audio with default settings
1523
- (best for continuos generation)
1524
-
1525
- - Added Metadata to generated audio (Thanks to AlexHK ♥)
1526
-
1527
- - Added Audio Info tab that will display the metadata of the input audio
1528
-
1529
- - Added "send to Text2Audio" button in Audio Info tab
1530
-
1531
- - Generated audio is now stored in the "output" folder (Thanks to AlexHK ♥)
1532
-
1533
- - Added an output area with generated files and download buttons
1534
-
1535
- - Enhanced Stereo effect (Thanks to AlexHK ♥)
1536
-
1537
-
1538
-
1539
- ### v1.2.6
1540
-
1541
- - Added option to generate in stereo (instead of only mono)
1542
-
1543
- - Added dropdown for selecting output sample rate (model default is 32000)
1544
-
1545
-
1546
-
1547
- ### v1.2.5a
1548
-
1549
- - Added file cleaner (This comes from the main facebookresearch repo)
1550
-
1551
- - Reorganized a little, moved audio to a seperate tab
1552
-
1553
-
1554
-
1555
- ### v1.2.5
1556
-
1557
- - Gave a unique lime theme to the webui
1558
-
1559
- - Added additional output for audio only
1560
-
1561
- - Added button to send generated audio to Input Audio
1562
-
1563
- - Added option to trim Input Audio
1564
-
1565
-
1566
-
1567
- ### v1.2.4
1568
-
1569
- - Added mic input (This comes from the main facebookresearch repo)
1570
-
1571
-
1572
-
1573
- ### v1.2.3
1574
-
1575
- - Added option to change video size to fit the image you upload
1576
-
1577
-
1578
-
1579
- ### v1.2.2
1580
-
1581
- - Added Wiki, Changelog and About tabs
1582
-
1583
-
1584
-
1585
- ### v1.2.1
1586
-
1587
- - Added tabs and organized the entire interface
1588
-
1589
- - Added option to attach image to the output video
1590
-
1591
- - Added option to load fine-tuned models (Yet to be tested)
1592
-
1593
-
1594
-
1595
- ### v1.2.0
1596
-
1597
- - Added Multi-Prompt
1598
-
1599
-
1600
-
1601
- ### v1.1.3
1602
-
1603
- - Added customization options for generated waveform
1604
-
1605
-
1606
-
1607
- ### v1.1.2
1608
-
1609
- - Removed sample length limit: now you can input audio of any length as music sample
1610
-
1611
-
1612
-
1613
- ### v1.1.1
1614
-
1615
- - Improved music sample audio quality when using music continuation
1616
-
1617
-
1618
-
1619
- ### v1.1.0
1620
-
1621
- - Rebuilt the repo on top of the latest structure of the main MusicGen repo
1622
-
1623
- - Improved Music continuation feature
1624
-
1625
-
1626
-
1627
- ### v1.0.0 - Stable Version
1628
-
1629
- - Added Music continuation
1630
- """
1631
- )
1632
- with gr.Tab("About"):
1633
- gen_type = gr.Text(value="music", interactive=False, visible=False)
1634
- gen_type_a = gr.Text(value="audio", interactive=False, visible=False)
1635
- gr.Markdown(
1636
- """
1637
- This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation
1638
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
1639
-
1640
- ## MusicGen+ is an extended version of the original MusicGen by facebookresearch.
1641
-
1642
- ### Repo: https://github.com/GrandaddyShmax/audiocraft_plus/tree/plus
1643
-
1644
- ---
1645
-
1646
- ### This project was possible thanks to:
1647
-
1648
- #### GrandaddyShmax - https://github.com/GrandaddyShmax
1649
-
1650
- #### Camenduru - https://github.com/camenduru
1651
-
1652
- #### rkfg - https://github.com/rkfg
1653
-
1654
- #### oobabooga - https://github.com/oobabooga
1655
-
1656
- #### AlexHK - https://github.com/alanhk147
1657
- """
1658
- )
1659
-
1660
- send_gen.click(info_to_params, inputs=[in_audio], outputs=[decoder, struc_prompts, global_prompt, bpm, key, scale, model, dropdown, basemodel, s, prompts[0], prompts[1], prompts[2], prompts[3], prompts[4], prompts[5], prompts[6], prompts[7], prompts[8], prompts[9], repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9], mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select], queue=False)
1661
- reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False)
1662
- send_audio.click(fn=lambda x: x, inputs=[backup_only], outputs=[audio], queue=False)
1663
- submit.click(predict_full, inputs=[gen_type, model, decoder, dropdown, basemodel, s, struc_prompts, bpm, key, scale, global_prompt, prompts[0], prompts[1], prompts[2], prompts[3], prompts[4], prompts[5], prompts[6], prompts[7], prompts[8], prompts[9], repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9], audio, mode, trim_start, trim_end, duration, topk, topp, temperature, cfg_coef, seed, overlap, image, height, width, background, bar1, bar2, channel, sr_select], outputs=[output, audio_only, backup_only, download, seed_used])
1664
- input_type.change(toggle_audio_src, input_type, [audio], queue=False, show_progress=False)
1665
- to_calc.click(calc_time, inputs=[gen_type, s, duration, overlap, repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9]], outputs=[calcs[0], calcs[1], calcs[2], calcs[3], calcs[4], calcs[5], calcs[6], calcs[7], calcs[8], calcs[9]], queue=False)
1666
-
1667
- send_gen_a.click(info_to_params_a, inputs=[in_audio], outputs=[decoder_a, struc_prompts_a, global_prompt_a, s_a, prompts_a[0], prompts_a[1], prompts_a[2], prompts_a[3], prompts_a[4], prompts_a[5], prompts_a[6], prompts_a[7], prompts_a[8], prompts_a[9], repeats_a[0], repeats_a[1], repeats_a[2], repeats_a[3], repeats_a[4], repeats_a[5], repeats_a[6], repeats_a[7], repeats_a[8], repeats_a[9], duration_a, topk_a, topp_a, temperature_a, cfg_coef_a, seed_a, overlap_a, channel_a, sr_select_a], queue=False)
1668
- reuse_seed_a.click(fn=lambda x: x, inputs=[seed_used_a], outputs=[seed_a], queue=False)
1669
- send_audio_a.click(fn=lambda x: x, inputs=[backup_only_a], outputs=[audio_a], queue=False)
1670
- submit_a.click(predict_full, inputs=[gen_type_a, model_a, decoder_a, dropdown, basemodel, s_a, struc_prompts_a, bpm, key, scale, global_prompt_a, prompts_a[0], prompts_a[1], prompts_a[2], prompts_a[3], prompts_a[4], prompts_a[5], prompts_a[6], prompts_a[7], prompts_a[8], prompts_a[9], repeats_a[0], repeats_a[1], repeats_a[2], repeats_a[3], repeats_a[4], repeats_a[5], repeats_a[6], repeats_a[7], repeats_a[8], repeats_a[9], audio_a, mode_a, trim_start_a, trim_end_a, duration_a, topk_a, topp_a, temperature_a, cfg_coef_a, seed_a, overlap_a, image_a, height_a, width_a, background_a, bar1_a, bar2_a, channel_a, sr_select_a], outputs=[output_a, audio_only_a, backup_only_a, download_a, seed_used_a])
1671
- input_type_a.change(toggle_audio_src, input_type_a, [audio_a], queue=False, show_progress=False)
1672
- to_calc_a.click(calc_time, inputs=[gen_type_a, s_a, duration_a, overlap_a, repeats_a[0], repeats_a[1], repeats_a[2], repeats_a[3], repeats_a[4], repeats_a[5], repeats_a[6], repeats_a[7], repeats_a[8], repeats_a[9]], outputs=[calcs_a[0], calcs_a[1], calcs_a[2], calcs_a[3], calcs_a[4], calcs_a[5], calcs_a[6], calcs_a[7], calcs_a[8], calcs_a[9]], queue=False)
1673
-
1674
- in_audio.change(get_audio_info, in_audio, outputs=[info])
1675
-
1676
- def variable_outputs(k):
1677
- k = int(k) - 1
1678
- return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)
1679
- def get_size(image):
1680
- if image is not None:
1681
- img = Image.open(image)
1682
- img_height = img.height
1683
- img_width = img.width
1684
- if (img_height%2) != 0:
1685
- img_height = img_height + 1
1686
- if (img_width%2) != 0:
1687
- img_width = img_width + 1
1688
- return img_height, img_width
1689
- else:
1690
- return 512, 768
1691
-
1692
- image.change(get_size, image, outputs=[height, width])
1693
- image_a.change(get_size, image_a, outputs=[height_a, width_a])
1694
- s.change(variable_outputs, s, textboxes)
1695
- s_a.change(variable_outputs, s_a, textboxes_a)
1696
- interface.queue().launch(**launch_kwargs)
1697
-
1698
-
1699
- def ui_batched(launch_kwargs):
1700
- with gr.Blocks() as demo:
1701
- gr.Markdown(
1702
- """
1703
- # MusicGen
1704
-
1705
- This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
1706
- a simple and controllable model for music generation
1707
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
1708
- <br/>
1709
- <a href="https://huggingface.co/spaces/facebook/MusicGen?duplicate=true"
1710
- style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
1711
- <img style="margin-bottom: 0em;display: inline;margin-top: -.25em;"
1712
- src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
1713
- for longer sequences, more control and no queue.</p>
1714
- """
1715
- )
1716
- with gr.Row():
1717
- with gr.Column():
1718
- with gr.Row():
1719
- text = gr.Text(label="Describe your music", lines=2, interactive=True)
1720
- with gr.Column():
1721
- radio = gr.Radio(["file", "mic"], value="file",
1722
- label="Condition on a melody (optional) File or Mic")
1723
- melody = gr.Audio(source="upload", type="numpy", label="File",
1724
- interactive=True, elem_id="melody-input")
1725
- with gr.Row():
1726
- submit = gr.Button("Generate")
1727
- with gr.Column():
1728
- output = gr.Video(label="Generated Music")
1729
- audio_output = gr.Audio(label="Generated Music (wav)", type='filepath')
1730
- submit.click(predict_batched, inputs=[text, melody],
1731
- outputs=[output, audio_output], batch=True, max_batch_size=MAX_BATCH_SIZE)
1732
- radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
1733
- gr.Examples(
1734
- fn=predict_batched,
1735
- examples=[
1736
- [
1737
- "An 80s driving pop song with heavy drums and synth pads in the background",
1738
- "./assets/bach.mp3",
1739
- ],
1740
- [
1741
- "A cheerful country song with acoustic guitars",
1742
- "./assets/bolero_ravel.mp3",
1743
- ],
1744
- [
1745
- "90s rock song with electric guitar and heavy drums",
1746
- None,
1747
- ],
1748
- [
1749
- "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
1750
- "./assets/bach.mp3",
1751
- ],
1752
- [
1753
- "lofi slow bpm electro chill with organic samples",
1754
- None,
1755
- ],
1756
- ],
1757
- inputs=[text, melody],
1758
- outputs=[output]
1759
- )
1760
- gr.Markdown("""
1761
- ### More details
1762
-
1763
- The model will generate 12 seconds of audio based on the description you provided.
1764
- You can optionally provide a reference audio from which a broad melody will be extracted.
1765
- The model will then try to follow both the description and melody provided.
1766
- All samples are generated with the `melody` model.
1767
-
1768
- You can also use your own GPU or a Google Colab by following the instructions on our repo.
1769
-
1770
- See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
1771
- for more details.
1772
- """)
1773
-
1774
- demo.queue(max_size=8 * 4).launch(**launch_kwargs)
1775
-
1776
-
1777
- if __name__ == "__main__":
1778
- parser = argparse.ArgumentParser()
1779
- parser.add_argument(
1780
- '--listen',
1781
- type=str,
1782
- default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
1783
- help='IP to listen on for connections to Gradio',
1784
- )
1785
- parser.add_argument(
1786
- '--username', type=str, default='', help='Username for authentication'
1787
- )
1788
- parser.add_argument(
1789
- '--password', type=str, default='', help='Password for authentication'
1790
- )
1791
- parser.add_argument(
1792
- '--server_port',
1793
- type=int,
1794
- default=0,
1795
- help='Port to run the server listener on',
1796
- )
1797
- parser.add_argument(
1798
- '--inbrowser', action='store_true', help='Open in browser'
1799
- )
1800
- parser.add_argument(
1801
- '--share', action='store_true', help='Share the gradio UI'
1802
- )
1803
- parser.add_argument(
1804
- '--unload_model', action='store_true', help='Unload the model after every generation to save GPU memory'
1805
- )
1806
-
1807
- parser.add_argument(
1808
- '--unload_to_cpu', action='store_true', help='Move the model to main RAM after every generation to save GPU memory but reload faster than after full unload (see above)'
1809
- )
1810
-
1811
- parser.add_argument(
1812
- '--cache', action='store_true', help='Cache models in RAM to quickly switch between them'
1813
- )
1814
-
1815
- args = parser.parse_args()
1816
- UNLOAD_MODEL = args.unload_model
1817
- MOVE_TO_CPU = args.unload_to_cpu
1818
- if args.cache:
1819
- MODELS = {}
1820
-
1821
- launch_kwargs = {}
1822
- launch_kwargs['server_name'] = args.listen
1823
-
1824
- if args.username and args.password:
1825
- launch_kwargs['auth'] = (args.username, args.password)
1826
- if args.server_port:
1827
- launch_kwargs['server_port'] = args.server_port
1828
- if args.inbrowser:
1829
- launch_kwargs['inbrowser'] = args.inbrowser
1830
- if args.share:
1831
- launch_kwargs['share'] = args.share
1832
-
1833
- # Show the interface
1834
- if IS_BATCHED:
1835
- global USE_DIFFUSION
1836
- USE_DIFFUSION = False
1837
- ui_batched(launch_kwargs)
1838
- else:
1839
- ui_full(launch_kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/source.py DELETED
@@ -1,538 +0,0 @@
1
- import torch
2
- import numpy as np
3
- import sys
4
- import torch.nn.functional as torch_nn_func
5
-
6
-
7
- class SineGen(torch.nn.Module):
8
- """ Definition of sine generator
9
- SineGen(samp_rate, harmonic_num = 0,
10
- sine_amp = 0.1, noise_std = 0.003,
11
- voiced_threshold = 0,
12
- flag_for_pulse=False)
13
-
14
- samp_rate: sampling rate in Hz
15
- harmonic_num: number of harmonic overtones (default 0)
16
- sine_amp: amplitude of sine-wavefrom (default 0.1)
17
- noise_std: std of Gaussian noise (default 0.003)
18
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
19
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
20
-
21
- Note: when flag_for_pulse is True, the first time step of a voiced
22
- segment is always sin(np.pi) or cos(0)
23
- """
24
-
25
- def __init__(self, samp_rate, harmonic_num=0,
26
- sine_amp=0.1, noise_std=0.003,
27
- voiced_threshold=0,
28
- flag_for_pulse=False):
29
- super(SineGen, self).__init__()
30
- self.sine_amp = sine_amp
31
- self.noise_std = noise_std
32
- self.harmonic_num = harmonic_num
33
- self.dim = self.harmonic_num + 1
34
- self.sampling_rate = samp_rate
35
- self.voiced_threshold = voiced_threshold
36
- self.flag_for_pulse = flag_for_pulse
37
-
38
- def _f02uv(self, f0):
39
- # generate uv signal
40
- uv = torch.ones_like(f0)
41
- uv = uv * (f0 > self.voiced_threshold)
42
- return uv
43
-
44
- def _f02sine(self, f0_values):
45
- """ f0_values: (batchsize, length, dim)
46
- where dim indicates fundamental tone and overtones
47
- """
48
- # convert to F0 in rad. The interger part n can be ignored
49
- # because 2 * np.pi * n doesn't affect phase
50
- rad_values = (f0_values / self.sampling_rate) % 1
51
-
52
- # initial phase noise (no noise for fundamental component)
53
- rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
54
- device=f0_values.device)
55
- rand_ini[:, 0] = 0
56
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
57
-
58
- # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
59
- if not self.flag_for_pulse:
60
- # for normal case
61
-
62
- # To prevent torch.cumsum numerical overflow,
63
- # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
64
- # Buffer tmp_over_one_idx indicates the time step to add -1.
65
- # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
66
- tmp_over_one = torch.cumsum(rad_values, 1) % 1
67
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
68
- tmp_over_one[:, :-1, :]) < 0
69
- cumsum_shift = torch.zeros_like(rad_values)
70
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
71
-
72
- sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
73
- * 2 * np.pi)
74
- else:
75
- # If necessary, make sure that the first time step of every
76
- # voiced segments is sin(pi) or cos(0)
77
- # This is used for pulse-train generation
78
-
79
- # identify the last time step in unvoiced segments
80
- uv = self._f02uv(f0_values)
81
- uv_1 = torch.roll(uv, shifts=-1, dims=1)
82
- uv_1[:, -1, :] = 1
83
- u_loc = (uv < 1) * (uv_1 > 0)
84
-
85
- # get the instantanouse phase
86
- tmp_cumsum = torch.cumsum(rad_values, dim=1)
87
- # different batch needs to be processed differently
88
- for idx in range(f0_values.shape[0]):
89
- temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
90
- temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
91
- # stores the accumulation of i.phase within
92
- # each voiced segments
93
- tmp_cumsum[idx, :, :] = 0
94
- tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
95
-
96
- # rad_values - tmp_cumsum: remove the accumulation of i.phase
97
- # within the previous voiced segment.
98
- i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
99
-
100
- # get the sines
101
- sines = torch.cos(i_phase * 2 * np.pi)
102
- return sines
103
-
104
- def forward(self, f0):
105
- """ sine_tensor, uv = forward(f0)
106
- input F0: tensor(batchsize=1, length, dim=1)
107
- f0 for unvoiced steps should be 0
108
- output sine_tensor: tensor(batchsize=1, length, dim)
109
- output uv: tensor(batchsize=1, length, 1)
110
- """
111
- with torch.no_grad():
112
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
113
- device=f0.device)
114
- # fundamental component
115
- f0_buf[:, :, 0] = f0[:, :, 0]
116
- for idx in np.arange(self.harmonic_num):
117
- # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
118
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
119
-
120
- # generate sine waveforms
121
- sine_waves = self._f02sine(f0_buf) * self.sine_amp
122
-
123
- # generate uv signal
124
- # uv = torch.ones(f0.shape)
125
- # uv = uv * (f0 > self.voiced_threshold)
126
- uv = self._f02uv(f0)
127
-
128
- # noise: for unvoiced should be similar to sine_amp
129
- # std = self.sine_amp/3 -> max value ~ self.sine_amp
130
- # . for voiced regions is self.noise_std
131
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
132
- noise = noise_amp * torch.randn_like(sine_waves)
133
-
134
- # first: set the unvoiced part to 0 by uv
135
- # then: additive noise
136
- sine_waves = sine_waves * uv + noise
137
- return sine_waves, uv, noise
138
-
139
-
140
- class PulseGen(torch.nn.Module):
141
- """ Definition of Pulse train generator
142
-
143
- There are many ways to implement pulse generator.
144
- Here, PulseGen is based on SinGen. For a perfect
145
- """
146
- def __init__(self, samp_rate, pulse_amp = 0.1,
147
- noise_std = 0.003, voiced_threshold = 0):
148
- super(PulseGen, self).__init__()
149
- self.pulse_amp = pulse_amp
150
- self.sampling_rate = samp_rate
151
- self.voiced_threshold = voiced_threshold
152
- self.noise_std = noise_std
153
- self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0, \
154
- sine_amp=self.pulse_amp, noise_std=0, \
155
- voiced_threshold=self.voiced_threshold, \
156
- flag_for_pulse=True)
157
-
158
- def forward(self, f0):
159
- """ Pulse train generator
160
- pulse_train, uv = forward(f0)
161
- input F0: tensor(batchsize=1, length, dim=1)
162
- f0 for unvoiced steps should be 0
163
- output pulse_train: tensor(batchsize=1, length, dim)
164
- output uv: tensor(batchsize=1, length, 1)
165
-
166
- Note: self.l_sine doesn't make sure that the initial phase of
167
- a voiced segment is np.pi, the first pulse in a voiced segment
168
- may not be at the first time step within a voiced segment
169
- """
170
- with torch.no_grad():
171
- sine_wav, uv, noise = self.l_sinegen(f0)
172
-
173
- # sine without additive noise
174
- pure_sine = sine_wav - noise
175
-
176
- # step t corresponds to a pulse if
177
- # sine[t] > sine[t+1] & sine[t] > sine[t-1]
178
- # & sine[t-1], sine[t+1], and sine[t] are voiced
179
- # or
180
- # sine[t] is voiced, sine[t-1] is unvoiced
181
- # we use torch.roll to simulate sine[t+1] and sine[t-1]
182
- sine_1 = torch.roll(pure_sine, shifts=1, dims=1)
183
- uv_1 = torch.roll(uv, shifts=1, dims=1)
184
- uv_1[:, 0, :] = 0
185
- sine_2 = torch.roll(pure_sine, shifts=-1, dims=1)
186
- uv_2 = torch.roll(uv, shifts=-1, dims=1)
187
- uv_2[:, -1, :] = 0
188
-
189
- loc = (pure_sine > sine_1) * (pure_sine > sine_2) \
190
- * (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \
191
- + (uv_1 < 1) * (uv > 0)
192
-
193
- # pulse train without noise
194
- pulse_train = pure_sine * loc
195
-
196
- # additive noise to pulse train
197
- # note that noise from sinegen is zero in voiced regions
198
- pulse_noise = torch.randn_like(pure_sine) * self.noise_std
199
-
200
- # with additive noise on pulse, and unvoiced regions
201
- pulse_train += pulse_noise * loc + pulse_noise * (1 - uv)
202
- return pulse_train, sine_wav, uv, pulse_noise
203
-
204
-
205
- class SignalsConv1d(torch.nn.Module):
206
- """ Filtering input signal with time invariant filter
207
- Note: FIRFilter conducted filtering given fixed FIR weight
208
- SignalsConv1d convolves two signals
209
- Note: this is based on torch.nn.functional.conv1d
210
-
211
- """
212
-
213
- def __init__(self):
214
- super(SignalsConv1d, self).__init__()
215
-
216
- def forward(self, signal, system_ir):
217
- """ output = forward(signal, system_ir)
218
-
219
- signal: (batchsize, length1, dim)
220
- system_ir: (length2, dim)
221
-
222
- output: (batchsize, length1, dim)
223
- """
224
- if signal.shape[-1] != system_ir.shape[-1]:
225
- print("Error: SignalsConv1d expects shape:")
226
- print("signal (batchsize, length1, dim)")
227
- print("system_id (batchsize, length2, dim)")
228
- print("But received signal: {:s}".format(str(signal.shape)))
229
- print(" system_ir: {:s}".format(str(system_ir.shape)))
230
- sys.exit(1)
231
- padding_length = system_ir.shape[0] - 1
232
- groups = signal.shape[-1]
233
-
234
- # pad signal on the left
235
- signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1), \
236
- (padding_length, 0))
237
- # prepare system impulse response as (dim, 1, length2)
238
- # also flip the impulse response
239
- ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \
240
- dims=[2])
241
- # convolute
242
- output = torch_nn_func.conv1d(signal_pad, ir, groups=groups)
243
- return output.permute(0, 2, 1)
244
-
245
-
246
- class CyclicNoiseGen_v1(torch.nn.Module):
247
- """ CyclicnoiseGen_v1
248
- Cyclic noise with a single parameter of beta.
249
- Pytorch v1 implementation assumes f_t is also fixed
250
- """
251
-
252
- def __init__(self, samp_rate,
253
- noise_std=0.003, voiced_threshold=0):
254
- super(CyclicNoiseGen_v1, self).__init__()
255
- self.samp_rate = samp_rate
256
- self.noise_std = noise_std
257
- self.voiced_threshold = voiced_threshold
258
-
259
- self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0,
260
- noise_std=noise_std,
261
- voiced_threshold=voiced_threshold)
262
- self.l_conv = SignalsConv1d()
263
-
264
- def noise_decay(self, beta, f0mean):
265
- """ decayed_noise = noise_decay(beta, f0mean)
266
- decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate)
267
-
268
- beta: (dim=1) or (batchsize=1, 1, dim=1)
269
- f0mean (batchsize=1, 1, dim=1)
270
-
271
- decayed_noise (batchsize=1, length, dim=1)
272
- """
273
- with torch.no_grad():
274
- # exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T
275
- # truncate the noise when decayed by -40 dB
276
- length = 4.6 * self.samp_rate / f0mean
277
- length = length.int()
278
- time_idx = torch.arange(0, length, device=beta.device)
279
- time_idx = time_idx.unsqueeze(0).unsqueeze(2)
280
- time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2])
281
-
282
- noise = torch.randn(time_idx.shape, device=beta.device)
283
-
284
- # due to Pytorch implementation, use f0_mean as the f0 factor
285
- decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate)
286
- return noise * self.noise_std * decay
287
-
288
- def forward(self, f0s, beta):
289
- """ Producde cyclic-noise
290
- """
291
- # pulse train
292
- pulse_train, sine_wav, uv, noise = self.l_pulse(f0s)
293
- pure_pulse = pulse_train - noise
294
-
295
- # decayed_noise (length, dim=1)
296
- if (uv < 1).all():
297
- # all unvoiced
298
- cyc_noise = torch.zeros_like(sine_wav)
299
- else:
300
- f0mean = f0s[uv > 0].mean()
301
-
302
- decayed_noise = self.noise_decay(beta, f0mean)[0, :, :]
303
- # convolute
304
- cyc_noise = self.l_conv(pure_pulse, decayed_noise)
305
-
306
- # add noise in invoiced segments
307
- cyc_noise = cyc_noise + noise * (1.0 - uv)
308
- return cyc_noise, pulse_train, sine_wav, uv, noise
309
-
310
-
311
- class SineGen(torch.nn.Module):
312
- """ Definition of sine generator
313
- SineGen(samp_rate, harmonic_num = 0,
314
- sine_amp = 0.1, noise_std = 0.003,
315
- voiced_threshold = 0,
316
- flag_for_pulse=False)
317
-
318
- samp_rate: sampling rate in Hz
319
- harmonic_num: number of harmonic overtones (default 0)
320
- sine_amp: amplitude of sine-wavefrom (default 0.1)
321
- noise_std: std of Gaussian noise (default 0.003)
322
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
323
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
324
-
325
- Note: when flag_for_pulse is True, the first time step of a voiced
326
- segment is always sin(np.pi) or cos(0)
327
- """
328
-
329
- def __init__(self, samp_rate, harmonic_num=0,
330
- sine_amp=0.1, noise_std=0.003,
331
- voiced_threshold=0,
332
- flag_for_pulse=False):
333
- super(SineGen, self).__init__()
334
- self.sine_amp = sine_amp
335
- self.noise_std = noise_std
336
- self.harmonic_num = harmonic_num
337
- self.dim = self.harmonic_num + 1
338
- self.sampling_rate = samp_rate
339
- self.voiced_threshold = voiced_threshold
340
- self.flag_for_pulse = flag_for_pulse
341
-
342
- def _f02uv(self, f0):
343
- # generate uv signal
344
- uv = torch.ones_like(f0)
345
- uv = uv * (f0 > self.voiced_threshold)
346
- return uv
347
-
348
- def _f02sine(self, f0_values):
349
- """ f0_values: (batchsize, length, dim)
350
- where dim indicates fundamental tone and overtones
351
- """
352
- # convert to F0 in rad. The interger part n can be ignored
353
- # because 2 * np.pi * n doesn't affect phase
354
- rad_values = (f0_values / self.sampling_rate) % 1
355
-
356
- # initial phase noise (no noise for fundamental component)
357
- rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
358
- device=f0_values.device)
359
- rand_ini[:, 0] = 0
360
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
361
-
362
- # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
363
- if not self.flag_for_pulse:
364
- # for normal case
365
-
366
- # To prevent torch.cumsum numerical overflow,
367
- # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
368
- # Buffer tmp_over_one_idx indicates the time step to add -1.
369
- # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
370
- tmp_over_one = torch.cumsum(rad_values, 1) % 1
371
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
372
- tmp_over_one[:, :-1, :]) < 0
373
- cumsum_shift = torch.zeros_like(rad_values)
374
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
375
-
376
- sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
377
- * 2 * np.pi)
378
- else:
379
- # If necessary, make sure that the first time step of every
380
- # voiced segments is sin(pi) or cos(0)
381
- # This is used for pulse-train generation
382
-
383
- # identify the last time step in unvoiced segments
384
- uv = self._f02uv(f0_values)
385
- uv_1 = torch.roll(uv, shifts=-1, dims=1)
386
- uv_1[:, -1, :] = 1
387
- u_loc = (uv < 1) * (uv_1 > 0)
388
-
389
- # get the instantanouse phase
390
- tmp_cumsum = torch.cumsum(rad_values, dim=1)
391
- # different batch needs to be processed differently
392
- for idx in range(f0_values.shape[0]):
393
- temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
394
- temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
395
- # stores the accumulation of i.phase within
396
- # each voiced segments
397
- tmp_cumsum[idx, :, :] = 0
398
- tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
399
-
400
- # rad_values - tmp_cumsum: remove the accumulation of i.phase
401
- # within the previous voiced segment.
402
- i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
403
-
404
- # get the sines
405
- sines = torch.cos(i_phase * 2 * np.pi)
406
- return sines
407
-
408
- def forward(self, f0):
409
- """ sine_tensor, uv = forward(f0)
410
- input F0: tensor(batchsize=1, length, dim=1)
411
- f0 for unvoiced steps should be 0
412
- output sine_tensor: tensor(batchsize=1, length, dim)
413
- output uv: tensor(batchsize=1, length, 1)
414
- """
415
- with torch.no_grad():
416
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
417
- device=f0.device)
418
- # fundamental component
419
- f0_buf[:, :, 0] = f0[:, :, 0]
420
- for idx in np.arange(self.harmonic_num):
421
- # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
422
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
423
-
424
- # generate sine waveforms
425
- sine_waves = self._f02sine(f0_buf) * self.sine_amp
426
-
427
- # generate uv signal
428
- # uv = torch.ones(f0.shape)
429
- # uv = uv * (f0 > self.voiced_threshold)
430
- uv = self._f02uv(f0)
431
-
432
- # noise: for unvoiced should be similar to sine_amp
433
- # std = self.sine_amp/3 -> max value ~ self.sine_amp
434
- # . for voiced regions is self.noise_std
435
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
436
- noise = noise_amp * torch.randn_like(sine_waves)
437
-
438
- # first: set the unvoiced part to 0 by uv
439
- # then: additive noise
440
- sine_waves = sine_waves * uv + noise
441
- return sine_waves, uv, noise
442
-
443
-
444
- class SourceModuleCycNoise_v1(torch.nn.Module):
445
- """ SourceModuleCycNoise_v1
446
- SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0)
447
- sampling_rate: sampling_rate in Hz
448
-
449
- noise_std: std of Gaussian noise (default: 0.003)
450
- voiced_threshold: threshold to set U/V given F0 (default: 0)
451
-
452
- cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta)
453
- F0_upsampled (batchsize, length, 1)
454
- beta (1)
455
- cyc (batchsize, length, 1)
456
- noise (batchsize, length, 1)
457
- uv (batchsize, length, 1)
458
- """
459
-
460
- def __init__(self, sampling_rate, noise_std=0.003, voiced_threshod=0):
461
- super(SourceModuleCycNoise_v1, self).__init__()
462
- self.sampling_rate = sampling_rate
463
- self.noise_std = noise_std
464
- self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std,
465
- voiced_threshod)
466
-
467
- def forward(self, f0_upsamped, beta):
468
- """
469
- cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta)
470
- F0_upsampled (batchsize, length, 1)
471
- beta (1)
472
- cyc (batchsize, length, 1)
473
- noise (batchsize, length, 1)
474
- uv (batchsize, length, 1)
475
- """
476
- # source for harmonic branch
477
- cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta)
478
-
479
- # source for noise branch, in the same shape as uv
480
- noise = torch.randn_like(uv) * self.noise_std / 3
481
- return cyc, noise, uv
482
-
483
-
484
- class SourceModuleHnNSF(torch.nn.Module):
485
- """ SourceModule for hn-nsf
486
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
487
- add_noise_std=0.003, voiced_threshod=0)
488
- sampling_rate: sampling_rate in Hz
489
- harmonic_num: number of harmonic above F0 (default: 0)
490
- sine_amp: amplitude of sine source signal (default: 0.1)
491
- add_noise_std: std of additive Gaussian noise (default: 0.003)
492
- note that amplitude of noise in unvoiced is decided
493
- by sine_amp
494
- voiced_threshold: threhold to set U/V given F0 (default: 0)
495
-
496
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
497
- F0_sampled (batchsize, length, 1)
498
- Sine_source (batchsize, length, 1)
499
- noise_source (batchsize, length 1)
500
- uv (batchsize, length, 1)
501
- """
502
-
503
- def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
504
- add_noise_std=0.003, voiced_threshod=0):
505
- super(SourceModuleHnNSF, self).__init__()
506
-
507
- self.sine_amp = sine_amp
508
- self.noise_std = add_noise_std
509
-
510
- # to produce sine waveforms
511
- self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
512
- sine_amp, add_noise_std, voiced_threshod)
513
-
514
- # to merge source harmonics into a single excitation
515
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
516
- self.l_tanh = torch.nn.Tanh()
517
-
518
- def forward(self, x):
519
- """
520
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
521
- F0_sampled (batchsize, length, 1)
522
- Sine_source (batchsize, length, 1)
523
- noise_source (batchsize, length 1)
524
- """
525
- # source for harmonic branch
526
- sine_wavs, uv, _ = self.l_sin_gen(x)
527
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
528
-
529
- # source for noise branch, in the same shape as uv
530
- noise = torch.randn_like(uv) * self.sine_amp / 3
531
- return sine_merge, noise, uv
532
-
533
-
534
- if __name__ == '__main__':
535
- source = SourceModuleCycNoise_v1(24000)
536
- x = torch.randn(16, 25600, 1)
537
-
538
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/x_transformer.py DELETED
@@ -1,641 +0,0 @@
1
- """shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
2
- import torch
3
- from torch import nn, einsum
4
- import torch.nn.functional as F
5
- from functools import partial
6
- from inspect import isfunction
7
- from collections import namedtuple
8
- from einops import rearrange, repeat, reduce
9
-
10
- # constants
11
-
12
- DEFAULT_DIM_HEAD = 64
13
-
14
- Intermediates = namedtuple('Intermediates', [
15
- 'pre_softmax_attn',
16
- 'post_softmax_attn'
17
- ])
18
-
19
- LayerIntermediates = namedtuple('Intermediates', [
20
- 'hiddens',
21
- 'attn_intermediates'
22
- ])
23
-
24
-
25
- class AbsolutePositionalEmbedding(nn.Module):
26
- def __init__(self, dim, max_seq_len):
27
- super().__init__()
28
- self.emb = nn.Embedding(max_seq_len, dim)
29
- self.init_()
30
-
31
- def init_(self):
32
- nn.init.normal_(self.emb.weight, std=0.02)
33
-
34
- def forward(self, x):
35
- n = torch.arange(x.shape[1], device=x.device)
36
- return self.emb(n)[None, :, :]
37
-
38
-
39
- class FixedPositionalEmbedding(nn.Module):
40
- def __init__(self, dim):
41
- super().__init__()
42
- inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
43
- self.register_buffer('inv_freq', inv_freq)
44
-
45
- def forward(self, x, seq_dim=1, offset=0):
46
- t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
47
- sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
48
- emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
49
- return emb[None, :, :]
50
-
51
-
52
- # helpers
53
-
54
- def exists(val):
55
- return val is not None
56
-
57
-
58
- def default(val, d):
59
- if exists(val):
60
- return val
61
- return d() if isfunction(d) else d
62
-
63
-
64
- def always(val):
65
- def inner(*args, **kwargs):
66
- return val
67
- return inner
68
-
69
-
70
- def not_equals(val):
71
- def inner(x):
72
- return x != val
73
- return inner
74
-
75
-
76
- def equals(val):
77
- def inner(x):
78
- return x == val
79
- return inner
80
-
81
-
82
- def max_neg_value(tensor):
83
- return -torch.finfo(tensor.dtype).max
84
-
85
-
86
- # keyword argument helpers
87
-
88
- def pick_and_pop(keys, d):
89
- values = list(map(lambda key: d.pop(key), keys))
90
- return dict(zip(keys, values))
91
-
92
-
93
- def group_dict_by_key(cond, d):
94
- return_val = [dict(), dict()]
95
- for key in d.keys():
96
- match = bool(cond(key))
97
- ind = int(not match)
98
- return_val[ind][key] = d[key]
99
- return (*return_val,)
100
-
101
-
102
- def string_begins_with(prefix, str):
103
- return str.startswith(prefix)
104
-
105
-
106
- def group_by_key_prefix(prefix, d):
107
- return group_dict_by_key(partial(string_begins_with, prefix), d)
108
-
109
-
110
- def groupby_prefix_and_trim(prefix, d):
111
- kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
112
- kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
113
- return kwargs_without_prefix, kwargs
114
-
115
-
116
- # classes
117
- class Scale(nn.Module):
118
- def __init__(self, value, fn):
119
- super().__init__()
120
- self.value = value
121
- self.fn = fn
122
-
123
- def forward(self, x, **kwargs):
124
- x, *rest = self.fn(x, **kwargs)
125
- return (x * self.value, *rest)
126
-
127
-
128
- class Rezero(nn.Module):
129
- def __init__(self, fn):
130
- super().__init__()
131
- self.fn = fn
132
- self.g = nn.Parameter(torch.zeros(1))
133
-
134
- def forward(self, x, **kwargs):
135
- x, *rest = self.fn(x, **kwargs)
136
- return (x * self.g, *rest)
137
-
138
-
139
- class ScaleNorm(nn.Module):
140
- def __init__(self, dim, eps=1e-5):
141
- super().__init__()
142
- self.scale = dim ** -0.5
143
- self.eps = eps
144
- self.g = nn.Parameter(torch.ones(1))
145
-
146
- def forward(self, x):
147
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
148
- return x / norm.clamp(min=self.eps) * self.g
149
-
150
-
151
- class RMSNorm(nn.Module):
152
- def __init__(self, dim, eps=1e-8):
153
- super().__init__()
154
- self.scale = dim ** -0.5
155
- self.eps = eps
156
- self.g = nn.Parameter(torch.ones(dim))
157
-
158
- def forward(self, x):
159
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
160
- return x / norm.clamp(min=self.eps) * self.g
161
-
162
-
163
- class Residual(nn.Module):
164
- def forward(self, x, residual):
165
- return x + residual
166
-
167
-
168
- class GRUGating(nn.Module):
169
- def __init__(self, dim):
170
- super().__init__()
171
- self.gru = nn.GRUCell(dim, dim)
172
-
173
- def forward(self, x, residual):
174
- gated_output = self.gru(
175
- rearrange(x, 'b n d -> (b n) d'),
176
- rearrange(residual, 'b n d -> (b n) d')
177
- )
178
-
179
- return gated_output.reshape_as(x)
180
-
181
-
182
- # feedforward
183
-
184
- class GEGLU(nn.Module):
185
- def __init__(self, dim_in, dim_out):
186
- super().__init__()
187
- self.proj = nn.Linear(dim_in, dim_out * 2)
188
-
189
- def forward(self, x):
190
- x, gate = self.proj(x).chunk(2, dim=-1)
191
- return x * F.gelu(gate)
192
-
193
-
194
- class FeedForward(nn.Module):
195
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
196
- super().__init__()
197
- inner_dim = int(dim * mult)
198
- dim_out = default(dim_out, dim)
199
- project_in = nn.Sequential(
200
- nn.Linear(dim, inner_dim),
201
- nn.GELU()
202
- ) if not glu else GEGLU(dim, inner_dim)
203
-
204
- self.net = nn.Sequential(
205
- project_in,
206
- nn.Dropout(dropout),
207
- nn.Linear(inner_dim, dim_out)
208
- )
209
-
210
- def forward(self, x):
211
- return self.net(x)
212
-
213
-
214
- # attention.
215
- class Attention(nn.Module):
216
- def __init__(
217
- self,
218
- dim,
219
- dim_head=DEFAULT_DIM_HEAD,
220
- heads=8,
221
- causal=False,
222
- mask=None,
223
- talking_heads=False,
224
- sparse_topk=None,
225
- use_entmax15=False,
226
- num_mem_kv=0,
227
- dropout=0.,
228
- on_attn=False
229
- ):
230
- super().__init__()
231
- if use_entmax15:
232
- raise NotImplementedError("Check out entmax activation instead of softmax activation!")
233
- self.scale = dim_head ** -0.5
234
- self.heads = heads
235
- self.causal = causal
236
- self.mask = mask
237
-
238
- inner_dim = dim_head * heads
239
-
240
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
241
- self.to_k = nn.Linear(dim, inner_dim, bias=False)
242
- self.to_v = nn.Linear(dim, inner_dim, bias=False)
243
- self.dropout = nn.Dropout(dropout)
244
-
245
- # talking heads
246
- self.talking_heads = talking_heads
247
- if talking_heads:
248
- self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
249
- self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
250
-
251
- # explicit topk sparse attention
252
- self.sparse_topk = sparse_topk
253
-
254
- # entmax
255
- #self.attn_fn = entmax15 if use_entmax15 else F.softmax
256
- self.attn_fn = F.softmax
257
-
258
- # add memory key / values
259
- self.num_mem_kv = num_mem_kv
260
- if num_mem_kv > 0:
261
- self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
262
- self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
263
-
264
- # attention on attention
265
- self.attn_on_attn = on_attn
266
- self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
267
-
268
- def forward(
269
- self,
270
- x,
271
- context=None,
272
- mask=None,
273
- context_mask=None,
274
- rel_pos=None,
275
- sinusoidal_emb=None,
276
- prev_attn=None,
277
- mem=None
278
- ):
279
- b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
280
- kv_input = default(context, x)
281
-
282
- q_input = x
283
- k_input = kv_input
284
- v_input = kv_input
285
-
286
- if exists(mem):
287
- k_input = torch.cat((mem, k_input), dim=-2)
288
- v_input = torch.cat((mem, v_input), dim=-2)
289
-
290
- if exists(sinusoidal_emb):
291
- # in shortformer, the query would start at a position offset depending on the past cached memory
292
- offset = k_input.shape[-2] - q_input.shape[-2]
293
- q_input = q_input + sinusoidal_emb(q_input, offset=offset)
294
- k_input = k_input + sinusoidal_emb(k_input)
295
-
296
- q = self.to_q(q_input)
297
- k = self.to_k(k_input)
298
- v = self.to_v(v_input)
299
-
300
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
301
-
302
- input_mask = None
303
- if any(map(exists, (mask, context_mask))):
304
- q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
305
- k_mask = q_mask if not exists(context) else context_mask
306
- k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
307
- q_mask = rearrange(q_mask, 'b i -> b () i ()')
308
- k_mask = rearrange(k_mask, 'b j -> b () () j')
309
- input_mask = q_mask * k_mask
310
-
311
- if self.num_mem_kv > 0:
312
- mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
313
- k = torch.cat((mem_k, k), dim=-2)
314
- v = torch.cat((mem_v, v), dim=-2)
315
- if exists(input_mask):
316
- input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
317
-
318
- dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
319
- mask_value = max_neg_value(dots)
320
-
321
- if exists(prev_attn):
322
- dots = dots + prev_attn
323
-
324
- pre_softmax_attn = dots
325
-
326
- if talking_heads:
327
- dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
328
-
329
- if exists(rel_pos):
330
- dots = rel_pos(dots)
331
-
332
- if exists(input_mask):
333
- dots.masked_fill_(~input_mask, mask_value)
334
- del input_mask
335
-
336
- if self.causal:
337
- i, j = dots.shape[-2:]
338
- r = torch.arange(i, device=device)
339
- mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
340
- mask = F.pad(mask, (j - i, 0), value=False)
341
- dots.masked_fill_(mask, mask_value)
342
- del mask
343
-
344
- if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
345
- top, _ = dots.topk(self.sparse_topk, dim=-1)
346
- vk = top[..., -1].unsqueeze(-1).expand_as(dots)
347
- mask = dots < vk
348
- dots.masked_fill_(mask, mask_value)
349
- del mask
350
-
351
- attn = self.attn_fn(dots, dim=-1)
352
- post_softmax_attn = attn
353
-
354
- attn = self.dropout(attn)
355
-
356
- if talking_heads:
357
- attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
358
-
359
- out = einsum('b h i j, b h j d -> b h i d', attn, v)
360
- out = rearrange(out, 'b h n d -> b n (h d)')
361
-
362
- intermediates = Intermediates(
363
- pre_softmax_attn=pre_softmax_attn,
364
- post_softmax_attn=post_softmax_attn
365
- )
366
-
367
- return self.to_out(out), intermediates
368
-
369
-
370
- class AttentionLayers(nn.Module):
371
- def __init__(
372
- self,
373
- dim,
374
- depth,
375
- heads=8,
376
- causal=False,
377
- cross_attend=False,
378
- only_cross=False,
379
- use_scalenorm=False,
380
- use_rmsnorm=False,
381
- use_rezero=False,
382
- rel_pos_num_buckets=32,
383
- rel_pos_max_distance=128,
384
- position_infused_attn=False,
385
- custom_layers=None,
386
- sandwich_coef=None,
387
- par_ratio=None,
388
- residual_attn=False,
389
- cross_residual_attn=False,
390
- macaron=False,
391
- pre_norm=True,
392
- gate_residual=False,
393
- **kwargs
394
- ):
395
- super().__init__()
396
- ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
397
- attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
398
-
399
- dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
400
-
401
- self.dim = dim
402
- self.depth = depth
403
- self.layers = nn.ModuleList([])
404
-
405
- self.has_pos_emb = position_infused_attn
406
- self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
407
- self.rotary_pos_emb = always(None)
408
-
409
- assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
410
- self.rel_pos = None
411
-
412
- self.pre_norm = pre_norm
413
-
414
- self.residual_attn = residual_attn
415
- self.cross_residual_attn = cross_residual_attn
416
-
417
- norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
418
- norm_class = RMSNorm if use_rmsnorm else norm_class
419
- norm_fn = partial(norm_class, dim)
420
-
421
- norm_fn = nn.Identity if use_rezero else norm_fn
422
- branch_fn = Rezero if use_rezero else None
423
-
424
- if cross_attend and not only_cross:
425
- default_block = ('a', 'c', 'f')
426
- elif cross_attend and only_cross:
427
- default_block = ('c', 'f')
428
- else:
429
- default_block = ('a', 'f')
430
-
431
- if macaron:
432
- default_block = ('f',) + default_block
433
-
434
- if exists(custom_layers):
435
- layer_types = custom_layers
436
- elif exists(par_ratio):
437
- par_depth = depth * len(default_block)
438
- assert 1 < par_ratio <= par_depth, 'par ratio out of range'
439
- default_block = tuple(filter(not_equals('f'), default_block))
440
- par_attn = par_depth // par_ratio
441
- depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
442
- par_width = (depth_cut + depth_cut // par_attn) // par_attn
443
- assert len(default_block) <= par_width, 'default block is too large for par_ratio'
444
- par_block = default_block + ('f',) * (par_width - len(default_block))
445
- par_head = par_block * par_attn
446
- layer_types = par_head + ('f',) * (par_depth - len(par_head))
447
- elif exists(sandwich_coef):
448
- assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
449
- layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
450
- else:
451
- layer_types = default_block * depth
452
-
453
- self.layer_types = layer_types
454
- self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
455
-
456
- for layer_type in self.layer_types:
457
- if layer_type == 'a':
458
- layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
459
- elif layer_type == 'c':
460
- layer = Attention(dim, heads=heads, **attn_kwargs)
461
- elif layer_type == 'f':
462
- layer = FeedForward(dim, **ff_kwargs)
463
- layer = layer if not macaron else Scale(0.5, layer)
464
- else:
465
- raise Exception(f'invalid layer type {layer_type}')
466
-
467
- if isinstance(layer, Attention) and exists(branch_fn):
468
- layer = branch_fn(layer)
469
-
470
- if gate_residual:
471
- residual_fn = GRUGating(dim)
472
- else:
473
- residual_fn = Residual()
474
-
475
- self.layers.append(nn.ModuleList([
476
- norm_fn(),
477
- layer,
478
- residual_fn
479
- ]))
480
-
481
- def forward(
482
- self,
483
- x,
484
- context=None,
485
- mask=None,
486
- context_mask=None,
487
- mems=None,
488
- return_hiddens=False
489
- ):
490
- hiddens = []
491
- intermediates = []
492
- prev_attn = None
493
- prev_cross_attn = None
494
-
495
- mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
496
-
497
- for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
498
- is_last = ind == (len(self.layers) - 1)
499
-
500
- if layer_type == 'a':
501
- hiddens.append(x)
502
- layer_mem = mems.pop(0)
503
-
504
- residual = x
505
-
506
- if self.pre_norm:
507
- x = norm(x)
508
-
509
- if layer_type == 'a':
510
- out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
511
- prev_attn=prev_attn, mem=layer_mem)
512
- elif layer_type == 'c':
513
- out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
514
- elif layer_type == 'f':
515
- out = block(x)
516
-
517
- x = residual_fn(out, residual)
518
-
519
- if layer_type in ('a', 'c'):
520
- intermediates.append(inter)
521
-
522
- if layer_type == 'a' and self.residual_attn:
523
- prev_attn = inter.pre_softmax_attn
524
- elif layer_type == 'c' and self.cross_residual_attn:
525
- prev_cross_attn = inter.pre_softmax_attn
526
-
527
- if not self.pre_norm and not is_last:
528
- x = norm(x)
529
-
530
- if return_hiddens:
531
- intermediates = LayerIntermediates(
532
- hiddens=hiddens,
533
- attn_intermediates=intermediates
534
- )
535
-
536
- return x, intermediates
537
-
538
- return x
539
-
540
-
541
- class Encoder(AttentionLayers):
542
- def __init__(self, **kwargs):
543
- assert 'causal' not in kwargs, 'cannot set causality on encoder'
544
- super().__init__(causal=False, **kwargs)
545
-
546
-
547
-
548
- class TransformerWrapper(nn.Module):
549
- def __init__(
550
- self,
551
- *,
552
- num_tokens,
553
- max_seq_len,
554
- attn_layers,
555
- emb_dim=None,
556
- max_mem_len=0.,
557
- emb_dropout=0.,
558
- num_memory_tokens=None,
559
- tie_embedding=False,
560
- use_pos_emb=True
561
- ):
562
- super().__init__()
563
- assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
564
-
565
- dim = attn_layers.dim
566
- emb_dim = default(emb_dim, dim)
567
-
568
- self.max_seq_len = max_seq_len
569
- self.max_mem_len = max_mem_len
570
- self.num_tokens = num_tokens
571
-
572
- self.token_emb = nn.Embedding(num_tokens, emb_dim)
573
- self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
574
- use_pos_emb and not attn_layers.has_pos_emb) else always(0)
575
- self.emb_dropout = nn.Dropout(emb_dropout)
576
-
577
- self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
578
- self.attn_layers = attn_layers
579
- self.norm = nn.LayerNorm(dim)
580
-
581
- self.init_()
582
-
583
- self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
584
-
585
- # memory tokens (like [cls]) from Memory Transformers paper
586
- num_memory_tokens = default(num_memory_tokens, 0)
587
- self.num_memory_tokens = num_memory_tokens
588
- if num_memory_tokens > 0:
589
- self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
590
-
591
- # let funnel encoder know number of memory tokens, if specified
592
- if hasattr(attn_layers, 'num_memory_tokens'):
593
- attn_layers.num_memory_tokens = num_memory_tokens
594
-
595
- def init_(self):
596
- nn.init.normal_(self.token_emb.weight, std=0.02)
597
-
598
- def forward(
599
- self,
600
- x,
601
- return_embeddings=False,
602
- mask=None,
603
- return_mems=False,
604
- return_attn=False,
605
- mems=None,
606
- **kwargs
607
- ):
608
- b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
609
- x = self.token_emb(x)
610
- x += self.pos_emb(x)
611
- x = self.emb_dropout(x)
612
-
613
- x = self.project_emb(x)
614
-
615
- if num_mem > 0:
616
- mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
617
- x = torch.cat((mem, x), dim=1)
618
-
619
- # auto-handle masking after appending memory tokens
620
- if exists(mask):
621
- mask = F.pad(mask, (num_mem, 0), value=True)
622
-
623
- x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
624
- x = self.norm(x)
625
-
626
- mem, x = x[:, :num_mem], x[:, num_mem:]
627
-
628
- out = self.to_logits(x) if not return_embeddings else x
629
-
630
- if return_mems:
631
- hiddens = intermediates.hiddens
632
- new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
633
- new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
634
- return out, new_mems
635
-
636
- if return_attn:
637
- attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
638
- return out, attn_maps
639
-
640
- return out
641
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/qformer_causual.py DELETED
@@ -1,1169 +0,0 @@
1
- """
2
- * Copyright (c) 2023, salesforce.com, inc.
3
- * All rights reserved.
4
- * SPDX-License-Identifier: BSD-3-Clause
5
- * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- * By Junnan Li
7
- * Based on huggingface code base
8
- * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
9
- """
10
-
11
- import math
12
- import os
13
- import warnings
14
- from dataclasses import dataclass
15
- from typing import Optional, Tuple, Dict, Any
16
-
17
- import torch
18
- from torch import Tensor, device, dtype, nn
19
- import torch.utils.checkpoint
20
- from torch.nn import CrossEntropyLoss
21
- import torch.nn.functional as F
22
- import numpy as np
23
-
24
- from transformers.activations import ACT2FN
25
- from transformers.file_utils import (
26
- ModelOutput, )
27
- from transformers.modeling_outputs import (
28
- BaseModelOutputWithPastAndCrossAttentions,
29
- BaseModelOutputWithPoolingAndCrossAttentions,
30
- CausalLMOutputWithCrossAttentions,
31
- MaskedLMOutput,
32
- MultipleChoiceModelOutput,
33
- NextSentencePredictorOutput,
34
- QuestionAnsweringModelOutput,
35
- SequenceClassifierOutput,
36
- TokenClassifierOutput,
37
- )
38
- from transformers.modeling_utils import (
39
- PreTrainedModel,
40
- apply_chunking_to_forward,
41
- find_pruneable_heads_and_indices,
42
- prune_linear_layer,
43
- )
44
- from transformers.utils import logging
45
- from transformers.models.bert.configuration_bert import BertConfig
46
-
47
- #torch.set_printoptions(profile="full")
48
- logger = logging.get_logger(__name__)
49
-
50
-
51
- class BertEmbeddings(nn.Module):
52
- """Construct the embeddings from word and position embeddings."""
53
- def __init__(self, config):
54
- super().__init__()
55
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
56
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
57
-
58
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
59
- # any TensorFlow checkpoint file
60
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
61
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
62
-
63
- # position_ids (1, len position emb) is contiguous in memory and exported when serialized
64
- self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
65
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
66
-
67
- self.config = config
68
-
69
- def forward(
70
- self,
71
- input_ids=None,
72
- position_ids=None,
73
- query_embeds=None,
74
- past_key_values_length=0,
75
- ):
76
- if input_ids is not None:
77
- seq_length = input_ids.size()[1]
78
- else:
79
- seq_length = 0
80
-
81
- if position_ids is None:
82
- position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length].clone()
83
-
84
- if input_ids is not None:
85
- embeddings = self.word_embeddings(input_ids)
86
- if self.position_embedding_type == "absolute":
87
- position_embeddings = self.position_embeddings(position_ids)
88
- embeddings = embeddings + position_embeddings
89
-
90
- if query_embeds is not None:
91
- embeddings = torch.cat((query_embeds, embeddings), dim=1)
92
- #print(query_embeds.shape, embeddings.shape)
93
- else:
94
- embeddings = query_embeds
95
-
96
- embeddings = self.LayerNorm(embeddings)
97
- embeddings = self.dropout(embeddings)
98
- return embeddings
99
-
100
-
101
- class BertSelfAttention(nn.Module):
102
- def __init__(self, config, is_cross_attention):
103
- super().__init__()
104
- self.config = config
105
- if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
106
- raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
107
- "heads (%d)" % (config.hidden_size, config.num_attention_heads))
108
-
109
- self.num_attention_heads = config.num_attention_heads
110
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
111
- self.all_head_size = self.num_attention_heads * self.attention_head_size
112
-
113
- self.query = nn.Linear(config.hidden_size, self.all_head_size)
114
- if is_cross_attention:
115
- self.key = nn.Linear(config.encoder_width, self.all_head_size)
116
- self.value = nn.Linear(config.encoder_width, self.all_head_size)
117
- else:
118
- self.key = nn.Linear(config.hidden_size, self.all_head_size)
119
- self.value = nn.Linear(config.hidden_size, self.all_head_size)
120
-
121
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
122
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
123
- if (self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query"):
124
- self.max_position_embeddings = config.max_position_embeddings
125
- self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
126
- self.save_attention = False
127
-
128
- def save_attn_gradients(self, attn_gradients):
129
- self.attn_gradients = attn_gradients
130
-
131
- def get_attn_gradients(self):
132
- return self.attn_gradients
133
-
134
- def save_attention_map(self, attention_map):
135
- self.attention_map = attention_map
136
-
137
- def get_attention_map(self):
138
- return self.attention_map
139
-
140
- def transpose_for_scores(self, x):
141
- new_x_shape = x.size()[:-1] + (
142
- self.num_attention_heads,
143
- self.attention_head_size,
144
- )
145
- x = x.view(*new_x_shape)
146
- return x.permute(0, 2, 1, 3)
147
-
148
- def forward(
149
- self,
150
- hidden_states,
151
- attention_mask=None,
152
- head_mask=None,
153
- encoder_hidden_states=None,
154
- encoder_attention_mask=None,
155
- past_key_value=None,
156
- output_attentions=False,
157
- ):
158
-
159
- # If this is instantiated as a cross-attention module, the keys
160
- # and values come from an encoder; the attention mask needs to be
161
- # such that the encoder's padding tokens are not attended to.
162
- is_cross_attention = encoder_hidden_states is not None
163
-
164
- if is_cross_attention:
165
- key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
166
- value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
167
- #print(key_layer.shape, value_layer.shape)
168
- attention_mask = encoder_attention_mask
169
- elif past_key_value is not None:
170
- key_layer = self.transpose_for_scores(self.key(hidden_states))
171
- value_layer = self.transpose_for_scores(self.value(hidden_states))
172
- key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
173
- value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
174
- #print(past_key_value[0].shape, key_layer.shape)
175
- else:
176
- key_layer = self.transpose_for_scores(self.key(hidden_states))
177
- value_layer = self.transpose_for_scores(self.value(hidden_states))
178
-
179
- mixed_query_layer = self.query(hidden_states)
180
-
181
- query_layer = self.transpose_for_scores(mixed_query_layer)
182
- # if past_key_value is not None:
183
- # print(query_layer.shape)
184
-
185
- past_key_value = (key_layer, value_layer)
186
- #print(key_layer.shape, value_layer.shape)
187
-
188
- # Take the dot product between "query" and "key" to get the raw attention scores.
189
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
190
- #if is_cross_attention:
191
- # if attention_scores.shape[2] == 32:
192
- # attention_scores_save = attention_scores[0].detach().cpu().numpy()
193
- # print(attention_scores_save.shape)
194
- # np.save('attention_scores_causal_text_child.npy', attention_scores_save)
195
-
196
- if (self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query"):
197
- seq_length = hidden_states.size()[1]
198
- position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
199
- position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
200
- distance = position_ids_l - position_ids_r
201
- positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
202
- positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
203
-
204
- if self.position_embedding_type == "relative_key":
205
- relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
206
- attention_scores = attention_scores + relative_position_scores
207
- elif self.position_embedding_type == "relative_key_query":
208
- relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
209
- relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
210
- attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key)
211
-
212
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
213
- if attention_mask is not None:
214
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
215
- attention_scores = attention_scores + attention_mask
216
-
217
- # Normalize the attention scores to probabilities.
218
- attention_probs = nn.Softmax(dim=-1)(attention_scores)
219
-
220
- if is_cross_attention and self.save_attention:
221
- self.save_attention_map(attention_probs)
222
- attention_probs.register_hook(self.save_attn_gradients)
223
-
224
- # This is actually dropping out entire tokens to attend to, which might
225
- # seem a bit unusual, but is taken from the original Transformer paper.
226
- attention_probs_dropped = self.dropout(attention_probs)
227
-
228
- # Mask heads if we want to
229
- if head_mask is not None:
230
- attention_probs_dropped = attention_probs_dropped * head_mask
231
-
232
- context_layer = torch.matmul(attention_probs_dropped, value_layer)
233
-
234
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
235
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
236
- context_layer = context_layer.view(*new_context_layer_shape)
237
-
238
- outputs = ((context_layer, attention_probs) if output_attentions else (context_layer, ))
239
-
240
- outputs = outputs + (past_key_value, )
241
- return outputs
242
-
243
-
244
- class BertSelfOutput(nn.Module):
245
- def __init__(self, config):
246
- super().__init__()
247
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
248
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
249
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
250
-
251
- def forward(self, hidden_states, input_tensor):
252
- hidden_states = self.dense(hidden_states)
253
- hidden_states = self.dropout(hidden_states)
254
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
255
- return hidden_states
256
-
257
-
258
- class BertAttention(nn.Module):
259
- def __init__(self, config, is_cross_attention=False):
260
- super().__init__()
261
- self.self = BertSelfAttention(config, is_cross_attention)
262
- self.output = BertSelfOutput(config)
263
- self.pruned_heads = set()
264
-
265
- def prune_heads(self, heads):
266
- if len(heads) == 0:
267
- return
268
- heads, index = find_pruneable_heads_and_indices(
269
- heads,
270
- self.self.num_attention_heads,
271
- self.self.attention_head_size,
272
- self.pruned_heads,
273
- )
274
-
275
- # Prune linear layers
276
- self.self.query = prune_linear_layer(self.self.query, index)
277
- self.self.key = prune_linear_layer(self.self.key, index)
278
- self.self.value = prune_linear_layer(self.self.value, index)
279
- self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
280
-
281
- # Update hyper params and store pruned heads
282
- self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
283
- self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
284
- self.pruned_heads = self.pruned_heads.union(heads)
285
-
286
- def forward(
287
- self,
288
- hidden_states,
289
- attention_mask=None,
290
- head_mask=None,
291
- encoder_hidden_states=None,
292
- encoder_attention_mask=None,
293
- past_key_value=None,
294
- output_attentions=False,
295
- ):
296
- self_outputs = self.self(
297
- hidden_states,
298
- attention_mask,
299
- head_mask,
300
- encoder_hidden_states,
301
- encoder_attention_mask,
302
- past_key_value,
303
- output_attentions,
304
- )
305
- attention_output = self.output(self_outputs[0], hidden_states)
306
-
307
- outputs = (attention_output, ) + self_outputs[1:] # add attentions if we output them
308
- return outputs
309
-
310
-
311
- class BertIntermediate(nn.Module):
312
- def __init__(self, config):
313
- super().__init__()
314
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
315
- if isinstance(config.hidden_act, str):
316
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
317
- else:
318
- self.intermediate_act_fn = config.hidden_act
319
-
320
- def forward(self, hidden_states):
321
- hidden_states = self.dense(hidden_states)
322
- hidden_states = self.intermediate_act_fn(hidden_states)
323
- return hidden_states
324
-
325
-
326
- class BertOutput(nn.Module):
327
- def __init__(self, config):
328
- super().__init__()
329
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
330
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
331
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
332
-
333
- def forward(self, hidden_states, input_tensor):
334
- hidden_states = self.dense(hidden_states)
335
- hidden_states = self.dropout(hidden_states)
336
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
337
- return hidden_states
338
-
339
-
340
- class BertLayer(nn.Module):
341
- def __init__(self, config, layer_num):
342
- super().__init__()
343
- self.config = config
344
- self.chunk_size_feed_forward = config.chunk_size_feed_forward
345
- self.seq_len_dim = 1
346
- self.attention = BertAttention(config)
347
- self.layer_num = layer_num
348
- if (self.config.add_cross_attention and layer_num % self.config.cross_attention_freq == 0):
349
- self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
350
- self.has_cross_attention = True
351
- else:
352
- self.has_cross_attention = False
353
- self.intermediate = BertIntermediate(config)
354
- self.output = BertOutput(config)
355
-
356
- self.intermediate_query = BertIntermediate(config)
357
- self.output_query = BertOutput(config)
358
-
359
- def forward(
360
- self,
361
- hidden_states,
362
- attention_mask=None,
363
- head_mask=None,
364
- encoder_hidden_states=None,
365
- encoder_attention_mask=None,
366
- past_key_value=None,
367
- output_attentions=False,
368
- query_length=0,
369
- ):
370
- # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
371
- self_attn_past_key_value = (past_key_value[:2] if past_key_value is not None else None)
372
- # if past_key_value is not None:
373
- # print(hidden_states.shape, attention_mask.shape)
374
- #print(hidden_states.shape, attention_mask.shape)
375
- # casual attention for query embeds with self attention
376
- self_attention_outputs = self.attention(
377
- hidden_states,
378
- attention_mask,
379
- head_mask,
380
- output_attentions=output_attentions,
381
- past_key_value=self_attn_past_key_value,
382
- )
383
- #print('attention_mask', attention_mask.shape)
384
- # if attention_mask.shape[-1] == 77:
385
- # print('attention_mask', attention_mask[0])
386
- attention_output = self_attention_outputs[0]
387
- outputs = self_attention_outputs[1:-1]
388
-
389
- present_key_value = self_attention_outputs[-1]
390
- #print(present_key_value[0].shape)
391
-
392
- if query_length > 0:
393
- query_attention_output = attention_output[:, :query_length, :]
394
-
395
- if self.has_cross_attention:
396
- assert (encoder_hidden_states is not None), "encoder_hidden_states must be given for cross-attention layers"
397
- #print(attention_mask.shape)
398
- cross_attention_outputs = self.crossattention(
399
- query_attention_output,
400
- attention_mask,
401
- head_mask,
402
- encoder_hidden_states,
403
- encoder_attention_mask,
404
- output_attentions=output_attentions,
405
- )
406
- query_attention_output = cross_attention_outputs[0]
407
- outputs = (outputs + cross_attention_outputs[1:-1]) # add cross attentions if we output attention weights
408
-
409
- layer_output = apply_chunking_to_forward(
410
- self.feed_forward_chunk_query,
411
- self.chunk_size_feed_forward,
412
- self.seq_len_dim,
413
- query_attention_output,
414
- )
415
- if attention_output.shape[1] > query_length:
416
- layer_output_text = apply_chunking_to_forward(
417
- self.feed_forward_chunk,
418
- self.chunk_size_feed_forward,
419
- self.seq_len_dim,
420
- attention_output[:, query_length:, :],
421
- )
422
- layer_output = torch.cat([layer_output, layer_output_text], dim=1)
423
- else:
424
- layer_output = apply_chunking_to_forward(
425
- self.feed_forward_chunk,
426
- self.chunk_size_feed_forward,
427
- self.seq_len_dim,
428
- attention_output,
429
- )
430
- outputs = (layer_output, ) + outputs
431
-
432
- outputs = outputs + (present_key_value, )
433
-
434
- return outputs
435
-
436
- def feed_forward_chunk(self, attention_output):
437
- intermediate_output = self.intermediate(attention_output)
438
- layer_output = self.output(intermediate_output, attention_output)
439
- return layer_output
440
-
441
- def feed_forward_chunk_query(self, attention_output):
442
- intermediate_output = self.intermediate_query(attention_output)
443
- layer_output = self.output_query(intermediate_output, attention_output)
444
- return layer_output
445
-
446
-
447
- class BertEncoder(nn.Module):
448
- def __init__(self, config):
449
- super().__init__()
450
- self.config = config
451
- self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)])
452
-
453
- def forward(
454
- self,
455
- hidden_states,
456
- attention_mask=None,
457
- head_mask=None,
458
- encoder_hidden_states=None,
459
- encoder_attention_mask=None,
460
- past_key_values=None,
461
- use_cache=None,
462
- output_attentions=False,
463
- output_hidden_states=False,
464
- return_dict=True,
465
- query_length=0,
466
- ):
467
- all_hidden_states = () if output_hidden_states else None
468
- all_self_attentions = () if output_attentions else None
469
- all_cross_attentions = (() if output_attentions and self.config.add_cross_attention else None)
470
-
471
- next_decoder_cache = () if use_cache else None
472
-
473
- for i in range(self.config.num_hidden_layers):
474
- layer_module = self.layer[i]
475
- if output_hidden_states:
476
- all_hidden_states = all_hidden_states + (hidden_states, )
477
-
478
- layer_head_mask = head_mask[i] if head_mask is not None else None
479
- past_key_value = past_key_values[i] if past_key_values is not None else None
480
- # if past_key_value is not None:
481
- # print(past_key_value[0].shape, past_key_value[1].shape)
482
-
483
- if getattr(self.config, "gradient_checkpointing", False) and self.training:
484
-
485
- if use_cache:
486
- logger.warn("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
487
- use_cache = False
488
-
489
- def create_custom_forward(module):
490
- def custom_forward(*inputs):
491
- return module(*inputs, past_key_value, output_attentions, query_length)
492
-
493
- return custom_forward
494
-
495
- layer_outputs = torch.utils.checkpoint.checkpoint(
496
- create_custom_forward(layer_module),
497
- hidden_states,
498
- attention_mask,
499
- layer_head_mask,
500
- encoder_hidden_states,
501
- encoder_attention_mask,
502
- )
503
- else:
504
- layer_outputs = layer_module(
505
- hidden_states,
506
- attention_mask,
507
- layer_head_mask,
508
- encoder_hidden_states,
509
- encoder_attention_mask,
510
- past_key_value,
511
- output_attentions,
512
- query_length,
513
- )
514
- # if past_key_value is not None:
515
- # print(hidden_states.shape, attention_mask.shape)
516
- # print(len(past_key_value))
517
-
518
- hidden_states = layer_outputs[0]
519
- if use_cache:
520
- next_decoder_cache += (layer_outputs[-1], )
521
- #print(layer_outputs[-1][0].shape)
522
- if output_attentions:
523
- all_self_attentions = all_self_attentions + (layer_outputs[1], )
524
- all_cross_attentions = all_cross_attentions + (layer_outputs[2], )
525
-
526
- if output_hidden_states:
527
- all_hidden_states = all_hidden_states + (hidden_states, )
528
-
529
- if not return_dict:
530
- return tuple(v for v in [
531
- hidden_states,
532
- next_decoder_cache,
533
- all_hidden_states,
534
- all_self_attentions,
535
- all_cross_attentions,
536
- ] if v is not None)
537
- return BaseModelOutputWithPastAndCrossAttentions(
538
- last_hidden_state=hidden_states,
539
- past_key_values=next_decoder_cache,
540
- hidden_states=all_hidden_states,
541
- attentions=all_self_attentions,
542
- cross_attentions=all_cross_attentions,
543
- )
544
-
545
-
546
- class BertPooler(nn.Module):
547
- def __init__(self, config):
548
- super().__init__()
549
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
550
- self.activation = nn.Tanh()
551
-
552
- def forward(self, hidden_states):
553
- # We "pool" the model by simply taking the hidden state corresponding
554
- # to the first token.
555
- first_token_tensor = hidden_states[:, 0]
556
- pooled_output = self.dense(first_token_tensor)
557
- pooled_output = self.activation(pooled_output)
558
- return pooled_output
559
-
560
-
561
- class BertPredictionHeadTransform(nn.Module):
562
- def __init__(self, config):
563
- super().__init__()
564
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
565
- if isinstance(config.hidden_act, str):
566
- self.transform_act_fn = ACT2FN[config.hidden_act]
567
- else:
568
- self.transform_act_fn = config.hidden_act
569
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
570
-
571
- def forward(self, hidden_states):
572
- hidden_states = self.dense(hidden_states)
573
- hidden_states = self.transform_act_fn(hidden_states)
574
- hidden_states = self.LayerNorm(hidden_states)
575
- return hidden_states
576
-
577
-
578
- class BertLMPredictionHead(nn.Module):
579
- def __init__(self, config):
580
- super().__init__()
581
- self.transform = BertPredictionHeadTransform(config)
582
-
583
- # The output weights are the same as the input embeddings, but there is
584
- # an output-only bias for each token.
585
- self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
586
-
587
- self.bias = nn.Parameter(torch.zeros(config.vocab_size))
588
-
589
- # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
590
- self.decoder.bias = self.bias
591
-
592
- def forward(self, hidden_states):
593
- hidden_states = self.transform(hidden_states)
594
- hidden_states = self.decoder(hidden_states)
595
- return hidden_states
596
-
597
-
598
- class BertOnlyMLMHead(nn.Module):
599
- def __init__(self, config):
600
- super().__init__()
601
- self.predictions = BertLMPredictionHead(config)
602
-
603
- def forward(self, sequence_output):
604
- prediction_scores = self.predictions(sequence_output)
605
- return prediction_scores
606
-
607
-
608
- class BertPreTrainedModel(PreTrainedModel):
609
- """
610
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
611
- models.
612
- """
613
-
614
- config_class = BertConfig
615
- base_model_prefix = "bert"
616
- _keys_to_ignore_on_load_missing = [r"position_ids"]
617
-
618
- def _init_weights(self, module):
619
- """Initialize the weights"""
620
- if isinstance(module, (nn.Linear, nn.Embedding)):
621
- # Slightly different from the TF version which uses truncated_normal for initialization
622
- # cf https://github.com/pytorch/pytorch/pull/5617
623
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
624
- elif isinstance(module, nn.LayerNorm):
625
- module.bias.data.zero_()
626
- module.weight.data.fill_(1.0)
627
- if isinstance(module, nn.Linear) and module.bias is not None:
628
- module.bias.data.zero_()
629
-
630
-
631
- class BertModel(BertPreTrainedModel):
632
- """
633
- The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
634
- cross-attention is added between the self-attention layers, following the architecture described in `Attention is
635
- all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
636
- Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
637
- argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
638
- input to the forward pass.
639
- """
640
- def __init__(self, config, add_pooling_layer=False):
641
- super().__init__(config)
642
- self.config = config
643
-
644
- self.embeddings = BertEmbeddings(config)
645
-
646
- self.encoder = BertEncoder(config)
647
-
648
- self.pooler = BertPooler(config) if add_pooling_layer else None
649
-
650
- self.init_weights()
651
-
652
- def get_input_embeddings(self):
653
- return self.embeddings.word_embeddings
654
-
655
- def set_input_embeddings(self, value):
656
- self.embeddings.word_embeddings = value
657
-
658
- def _prune_heads(self, heads_to_prune):
659
- """
660
- Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
661
- class PreTrainedModel
662
- """
663
- for layer, heads in heads_to_prune.items():
664
- self.encoder.layer[layer].attention.prune_heads(heads)
665
-
666
- def get_extended_attention_mask(
667
- self,
668
- attention_mask: Tensor,
669
- input_shape: Tuple[int],
670
- device: device,
671
- is_decoder: bool,
672
- is_casual: bool,
673
- has_query: bool = False,
674
- ) -> Tensor:
675
- """
676
- Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
677
-
678
- Arguments:
679
- attention_mask (:obj:`torch.Tensor`):
680
- Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
681
- input_shape (:obj:`Tuple[int]`):
682
- The shape of the input to the model.
683
- device: (:obj:`torch.device`):
684
- The device of the input to the model.
685
-
686
- Returns:
687
- :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
688
- """
689
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
690
- # ourselves in which case we just need to make it broadcastable to all heads.
691
- #print(attention_mask.dim())
692
- if attention_mask.dim() == 3:
693
- extended_attention_mask = attention_mask[:, None, :, :]
694
- elif attention_mask.dim() == 2:
695
- # Provided a padding mask of dimensions [batch_size, seq_length]
696
- # - if the model is a decoder, apply a causal mask in addition to the padding mask
697
- # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
698
- if is_decoder or is_casual:
699
- batch_size, seq_length = input_shape
700
- #print(input_shape)
701
- if not is_decoder and seq_length > 32:
702
- query_length = 32
703
- text_length = seq_length - query_length
704
- query_ids = torch.arange(query_length, device=device)
705
- query_causal_mask = (query_ids[None, None, :].repeat(batch_size, query_length, 1) <= query_ids[None, :,
706
- None])
707
- causal_mask = torch.ones((batch_size, seq_length, seq_length), device=device)
708
- causal_mask[:, :query_length, :query_length] = query_causal_mask
709
- # print(query_causal_mask.shape, causal_mask.shape)
710
- #print(causal_mask[0])
711
-
712
- else:
713
- seq_ids = torch.arange(seq_length, device=device)
714
- causal_mask = (seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None])
715
-
716
- # add a prefix ones mask to the causal mask
717
- # causal and attention masks must have same type with pytorch version < 1.3
718
- causal_mask = causal_mask.to(attention_mask.dtype)
719
- # if is_decoder:
720
- # print(causal_mask.shape, attention_mask.shape)
721
- #print(causal_mask.shape, attention_mask.shape)
722
-
723
- if causal_mask.shape[1] < attention_mask.shape[1]:
724
- prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
725
- if has_query: # UniLM style attention mask
726
- causal_mask = torch.cat(
727
- [
728
- torch.zeros(
729
- (batch_size, prefix_seq_len, seq_length),
730
- device=device,
731
- dtype=causal_mask.dtype,
732
- ),
733
- causal_mask,
734
- ],
735
- axis=1,
736
- )
737
- causal_mask = torch.cat(
738
- [
739
- torch.ones(
740
- (batch_size, causal_mask.shape[1], prefix_seq_len),
741
- device=device,
742
- dtype=causal_mask.dtype,
743
- ),
744
- causal_mask,
745
- ],
746
- axis=-1,
747
- )
748
- #print(has_query, causal_mask.shape)
749
- #print(causal_mask[0])
750
- extended_attention_mask = (causal_mask[:, None, :, :] * attention_mask[:, None, None, :])
751
- #print(extended_attention_mask[0])
752
- #print('extended_attention_mask', extended_attention_mask.shape)
753
- else:
754
- extended_attention_mask = attention_mask[:, None, None, :]
755
- #print(attention_mask.shape, extended_attention_mask.shape)
756
- else:
757
- raise ValueError("Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
758
- input_shape, attention_mask.shape))
759
-
760
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
761
- # masked positions, this operation will create a tensor which is 0.0 for
762
- # positions we want to attend and -10000.0 for masked positions.
763
- # Since we are adding it to the raw scores before the softmax, this is
764
- # effectively the same as removing these entirely.
765
- extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
766
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
767
- return extended_attention_mask
768
-
769
- def forward(
770
- self,
771
- input_ids=None,
772
- attention_mask=None,
773
- position_ids=None,
774
- head_mask=None,
775
- query_embeds=None,
776
- encoder_hidden_states=None,
777
- encoder_attention_mask=None,
778
- past_key_values=None,
779
- use_cache=None,
780
- output_attentions=None,
781
- output_hidden_states=None,
782
- return_dict=None,
783
- is_decoder=False,
784
- ):
785
- r"""
786
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
787
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
788
- the model is configured as a decoder.
789
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
790
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
791
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
792
- - 1 for tokens that are **not masked**,
793
- - 0 for tokens that are **masked**.
794
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
795
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
796
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
797
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
798
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
799
- use_cache (:obj:`bool`, `optional`):
800
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
801
- decoding (see :obj:`past_key_values`).
802
- """
803
- output_attentions = (output_attentions if output_attentions is not None else self.config.output_attentions)
804
- output_hidden_states = (output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states)
805
- return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
806
-
807
- # use_cache = use_cache if use_cache is not None else self.config.use_cache
808
-
809
- if input_ids is None:
810
- assert (query_embeds is not None), "You have to specify query_embeds when input_ids is None"
811
-
812
- #if query_embeds is not None:
813
- if query_embeds is not None and query_embeds.shape[1] == 32:
814
- is_casual = True
815
- else:
816
- is_casual = False
817
- past_key_values_length = (past_key_values[0][0].shape[2] -
818
- self.config.query_length if past_key_values is not None else 0)
819
-
820
- query_length = query_embeds.shape[1] if query_embeds is not None else 0
821
-
822
- embedding_output = self.embeddings(
823
- input_ids=input_ids,
824
- position_ids=position_ids,
825
- query_embeds=query_embeds,
826
- past_key_values_length=past_key_values_length,
827
- )
828
-
829
- input_shape = embedding_output.size()[:-1]
830
- batch_size, seq_length = input_shape
831
- device = embedding_output.device
832
-
833
- #print('attention_mask', attention_mask)
834
- if attention_mask is None:
835
- attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
836
- #print(seq_length, past_key_values_length)
837
-
838
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
839
- # ourselves in which case we just need to make it broadcastable to all heads.
840
- if is_decoder:
841
- #print(attention_mask.shape, input_ids.shape)
842
- extended_attention_mask = self.get_extended_attention_mask(
843
- attention_mask,
844
- input_ids.shape,
845
- device,
846
- is_decoder,
847
- is_casual,
848
- has_query=(query_embeds is not None),
849
- )
850
- else:
851
- extended_attention_mask = self.get_extended_attention_mask(
852
- attention_mask,
853
- input_shape,
854
- device,
855
- is_decoder,
856
- is_casual,
857
- )
858
- #print(is_decoder, extended_attention_mask.shape)
859
- # if is_decoder:
860
- # print(extended_attention_mask[0,0,:,32:])
861
- # if attention_mask is not None:
862
- # print(input_ids, embedding_output.shape, extended_attention_mask.shape)
863
-
864
- # If a 2D or 3D attention mask is provided for the cross-attention
865
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
866
- if encoder_hidden_states is not None:
867
- if type(encoder_hidden_states) == list:
868
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
869
- else:
870
- (
871
- encoder_batch_size,
872
- encoder_sequence_length,
873
- _,
874
- ) = encoder_hidden_states.size()
875
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
876
-
877
- if type(encoder_attention_mask) == list:
878
- encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
879
- elif encoder_attention_mask is None:
880
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
881
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
882
- else:
883
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
884
- #print(is_casual, extended_attention_mask.shape, encoder_attention_mask.shape, encoder_extended_attention_mask.shape)
885
- else:
886
- encoder_extended_attention_mask = None
887
-
888
- # if input_ids is not None and query_embeds is not None:
889
- # print(extended_attention_mask.shape, encoder_extended_attention_mask.shape)
890
- # Prepare head mask if needed
891
- # 1.0 in head_mask indicate we keep the head
892
- # attention_probs has shape bsz x n_heads x N x N
893
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
894
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
895
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
896
- #print(head_mask)
897
-
898
- encoder_outputs = self.encoder(
899
- embedding_output,
900
- attention_mask=extended_attention_mask,
901
- head_mask=head_mask,
902
- encoder_hidden_states=encoder_hidden_states,
903
- encoder_attention_mask=encoder_extended_attention_mask,
904
- past_key_values=past_key_values,
905
- use_cache=use_cache,
906
- output_attentions=output_attentions,
907
- output_hidden_states=output_hidden_states,
908
- return_dict=return_dict,
909
- query_length=query_length,
910
- )
911
- # if is_decoder:
912
- # print(embedding_output.shape, attention_mask.shape, len(past_key_values))
913
- #print(embedding_output.shape, extended_attention_mask.shape, encoder_hidden_states.shape, encoder_extended_attention_mask.shape)
914
- #print(extended_attention_mask[0], encoder_extended_attention_mask[0])
915
-
916
- #print(query_embeds.shape, encoder_hidden_states.shape)
917
-
918
- sequence_output = encoder_outputs[0]
919
- pooled_output = (self.pooler(sequence_output) if self.pooler is not None else None)
920
-
921
- if not return_dict:
922
- return (sequence_output, pooled_output) + encoder_outputs[1:]
923
-
924
- return BaseModelOutputWithPoolingAndCrossAttentions(
925
- last_hidden_state=sequence_output,
926
- pooler_output=pooled_output,
927
- past_key_values=encoder_outputs.past_key_values,
928
- hidden_states=encoder_outputs.hidden_states,
929
- attentions=encoder_outputs.attentions,
930
- cross_attentions=encoder_outputs.cross_attentions,
931
- )
932
-
933
-
934
- class BertLMHeadModel(BertPreTrainedModel):
935
-
936
- _keys_to_ignore_on_load_unexpected = [r"pooler"]
937
- _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
938
-
939
- def __init__(self, config):
940
- super().__init__(config)
941
-
942
- self.bert = BertModel(config, add_pooling_layer=False)
943
- self.cls = BertOnlyMLMHead(config)
944
-
945
- self.init_weights()
946
-
947
- def get_output_embeddings(self):
948
- return self.cls.predictions.decoder
949
-
950
- def set_output_embeddings(self, new_embeddings):
951
- self.cls.predictions.decoder = new_embeddings
952
-
953
- def forward(
954
- self,
955
- input_ids=None,
956
- attention_mask=None,
957
- position_ids=None,
958
- head_mask=None,
959
- query_embeds=None,
960
- encoder_hidden_states=None,
961
- encoder_attention_mask=None,
962
- labels=None,
963
- past_key_values=None,
964
- use_cache=True,
965
- output_attentions=None,
966
- output_hidden_states=None,
967
- return_dict=None,
968
- return_logits=False,
969
- is_decoder=True,
970
- reduction="mean",
971
- ):
972
- r"""
973
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
974
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
975
- the model is configured as a decoder.
976
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
977
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
978
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
979
- - 1 for tokens that are **not masked**,
980
- - 0 for tokens that are **masked**.
981
- labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
982
- Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
983
- ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
984
- ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
985
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
986
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
987
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
988
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
989
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
990
- use_cache (:obj:`bool`, `optional`):
991
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
992
- decoding (see :obj:`past_key_values`).
993
- Returns:
994
- Example::
995
- >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
996
- >>> import torch
997
- >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
998
- >>> config = BertConfig.from_pretrained("bert-base-cased")
999
- >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
1000
- >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1001
- >>> outputs = model(**inputs)
1002
- >>> prediction_logits = outputs.logits
1003
- """
1004
- return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
1005
- if labels is not None:
1006
- use_cache = False
1007
- if past_key_values is not None:
1008
- query_embeds = None
1009
- #print(len(past_key_values))
1010
- #print('attention_mask', attention_mask)
1011
- outputs = self.bert(
1012
- input_ids,
1013
- attention_mask=attention_mask,
1014
- position_ids=position_ids,
1015
- head_mask=head_mask,
1016
- query_embeds=query_embeds,
1017
- encoder_hidden_states=encoder_hidden_states,
1018
- encoder_attention_mask=encoder_attention_mask,
1019
- past_key_values=past_key_values,
1020
- use_cache=use_cache,
1021
- output_attentions=output_attentions,
1022
- output_hidden_states=output_hidden_states,
1023
- return_dict=return_dict,
1024
- is_decoder=is_decoder,
1025
- )
1026
-
1027
- sequence_output = outputs[0]
1028
- if query_embeds is not None:
1029
- sequence_output = outputs[0][:, query_embeds.shape[1]:, :]
1030
-
1031
- prediction_scores = self.cls(sequence_output)
1032
-
1033
- if return_logits:
1034
- return prediction_scores[:, :-1, :].contiguous()
1035
-
1036
- lm_loss = None
1037
- if labels is not None:
1038
- # we are doing next-token prediction; shift prediction scores and input ids by one
1039
- shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1040
- labels = labels[:, 1:].contiguous()
1041
- loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
1042
- lm_loss = loss_fct(
1043
- shifted_prediction_scores.view(-1, self.config.vocab_size),
1044
- labels.view(-1),
1045
- )
1046
- if reduction == "none":
1047
- lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
1048
-
1049
- if not return_dict:
1050
- output = (prediction_scores, ) + outputs[2:]
1051
- return ((lm_loss, ) + output) if lm_loss is not None else output
1052
-
1053
- return CausalLMOutputWithCrossAttentions(
1054
- loss=lm_loss,
1055
- logits=prediction_scores,
1056
- past_key_values=outputs.past_key_values,
1057
- hidden_states=outputs.hidden_states,
1058
- attentions=outputs.attentions,
1059
- cross_attentions=outputs.cross_attentions,
1060
- )
1061
-
1062
- def prepare_inputs_for_generation(self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs):
1063
- # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1064
- if attention_mask is None:
1065
- attention_mask = input_ids.new_ones(input_ids.shape)
1066
- query_mask = input_ids.new_ones(query_embeds.shape[:-1])
1067
- attention_mask = torch.cat([query_mask, attention_mask], dim=-1)
1068
-
1069
- # cut decoder_input_ids if past is used
1070
- if past is not None:
1071
- input_ids = input_ids[:, -1:]
1072
-
1073
- return {
1074
- "input_ids": input_ids,
1075
- "query_embeds": query_embeds,
1076
- "attention_mask": attention_mask,
1077
- "past_key_values": past,
1078
- "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
1079
- "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
1080
- "is_decoder": True,
1081
- }
1082
-
1083
- def _reorder_cache(self, past, beam_idx):
1084
- reordered_past = ()
1085
- for layer_past in past:
1086
- reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past), )
1087
- return reordered_past
1088
-
1089
-
1090
- class BertForMaskedLM(BertPreTrainedModel):
1091
-
1092
- _keys_to_ignore_on_load_unexpected = [r"pooler"]
1093
- _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
1094
-
1095
- def __init__(self, config):
1096
- super().__init__(config)
1097
-
1098
- self.bert = BertModel(config, add_pooling_layer=False)
1099
- self.cls = BertOnlyMLMHead(config)
1100
-
1101
- self.init_weights()
1102
-
1103
- def get_output_embeddings(self):
1104
- return self.cls.predictions.decoder
1105
-
1106
- def set_output_embeddings(self, new_embeddings):
1107
- self.cls.predictions.decoder = new_embeddings
1108
-
1109
- def forward(
1110
- self,
1111
- input_ids=None,
1112
- attention_mask=None,
1113
- position_ids=None,
1114
- head_mask=None,
1115
- query_embeds=None,
1116
- encoder_hidden_states=None,
1117
- encoder_attention_mask=None,
1118
- labels=None,
1119
- output_attentions=None,
1120
- output_hidden_states=None,
1121
- return_dict=None,
1122
- return_logits=False,
1123
- is_decoder=False,
1124
- ):
1125
- r"""
1126
- labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1127
- Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
1128
- config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
1129
- (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
1130
- """
1131
-
1132
- return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
1133
-
1134
- outputs = self.bert(
1135
- input_ids,
1136
- attention_mask=attention_mask,
1137
- position_ids=position_ids,
1138
- head_mask=head_mask,
1139
- query_embeds=query_embeds,
1140
- encoder_hidden_states=encoder_hidden_states,
1141
- encoder_attention_mask=encoder_attention_mask,
1142
- output_attentions=output_attentions,
1143
- output_hidden_states=output_hidden_states,
1144
- return_dict=return_dict,
1145
- is_decoder=is_decoder,
1146
- )
1147
-
1148
- if query_embeds is not None:
1149
- sequence_output = outputs[0][:, query_embeds.shape[1]:, :]
1150
- prediction_scores = self.cls(sequence_output)
1151
-
1152
- if return_logits:
1153
- return prediction_scores
1154
-
1155
- masked_lm_loss = None
1156
- if labels is not None:
1157
- loss_fct = CrossEntropyLoss() # -100 index = padding token
1158
- masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1159
-
1160
- if not return_dict:
1161
- output = (prediction_scores, ) + outputs[2:]
1162
- return (((masked_lm_loss, ) + output) if masked_lm_loss is not None else output)
1163
-
1164
- return MaskedLMOutput(
1165
- loss=masked_lm_loss,
1166
- logits=prediction_scores,
1167
- hidden_states=outputs.hidden_states,
1168
- attentions=outputs.attentions,
1169
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Software_Company/src/agents/Agent/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .Agent import Agent
 
 
spaces/AP123/dreamgaussian/main.py DELETED
@@ -1,882 +0,0 @@
1
- import os
2
- import cv2
3
- import time
4
- import tqdm
5
- import numpy as np
6
- import dearpygui.dearpygui as dpg
7
-
8
- import torch
9
- import torch.nn.functional as F
10
-
11
- import rembg
12
-
13
- from cam_utils import orbit_camera, OrbitCamera
14
- from gs_renderer import Renderer, MiniCam
15
-
16
- from grid_put import mipmap_linear_grid_put_2d
17
- from mesh import Mesh, safe_normalize
18
-
19
- class GUI:
20
- def __init__(self, opt):
21
- self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.
22
- self.gui = opt.gui # enable gui
23
- self.W = opt.W
24
- self.H = opt.H
25
- self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)
26
-
27
- self.mode = "image"
28
- self.seed = "random"
29
-
30
- self.buffer_image = np.ones((self.W, self.H, 3), dtype=np.float32)
31
- self.need_update = True # update buffer_image
32
-
33
- # models
34
- self.device = torch.device("cuda")
35
- self.bg_remover = None
36
-
37
- self.guidance_sd = None
38
- self.guidance_zero123 = None
39
-
40
- self.enable_sd = False
41
- self.enable_zero123 = False
42
-
43
- # renderer
44
- self.renderer = Renderer(sh_degree=self.opt.sh_degree)
45
- self.gaussain_scale_factor = 1
46
-
47
- # input image
48
- self.input_img = None
49
- self.input_mask = None
50
- self.input_img_torch = None
51
- self.input_mask_torch = None
52
- self.overlay_input_img = False
53
- self.overlay_input_img_ratio = 0.5
54
-
55
- # input text
56
- self.prompt = ""
57
- self.negative_prompt = ""
58
-
59
- # training stuff
60
- self.training = False
61
- self.optimizer = None
62
- self.step = 0
63
- self.train_steps = 1 # steps per rendering loop
64
-
65
- # load input data from cmdline
66
- if self.opt.input is not None:
67
- self.load_input(self.opt.input)
68
-
69
- # override prompt from cmdline
70
- if self.opt.prompt is not None:
71
- self.prompt = self.opt.prompt
72
-
73
- # override if provide a checkpoint
74
- if self.opt.load is not None:
75
- self.renderer.initialize(self.opt.load)
76
- else:
77
- # initialize gaussians to a blob
78
- self.renderer.initialize(num_pts=self.opt.num_pts)
79
-
80
- if self.gui:
81
- dpg.create_context()
82
- self.register_dpg()
83
- self.test_step()
84
-
85
- def __del__(self):
86
- if self.gui:
87
- dpg.destroy_context()
88
-
89
- def seed_everything(self):
90
- try:
91
- seed = int(self.seed)
92
- except:
93
- seed = np.random.randint(0, 1000000)
94
-
95
- os.environ["PYTHONHASHSEED"] = str(seed)
96
- np.random.seed(seed)
97
- torch.manual_seed(seed)
98
- torch.cuda.manual_seed(seed)
99
- torch.backends.cudnn.deterministic = True
100
- torch.backends.cudnn.benchmark = True
101
-
102
- self.last_seed = seed
103
-
104
- def prepare_train(self):
105
-
106
- self.step = 0
107
-
108
- # setup training
109
- self.renderer.gaussians.training_setup(self.opt)
110
- # do not do progressive sh-level
111
- self.renderer.gaussians.active_sh_degree = self.renderer.gaussians.max_sh_degree
112
- self.optimizer = self.renderer.gaussians.optimizer
113
-
114
- # default camera
115
- pose = orbit_camera(self.opt.elevation, 0, self.opt.radius)
116
- self.fixed_cam = MiniCam(
117
- pose,
118
- self.opt.ref_size,
119
- self.opt.ref_size,
120
- self.cam.fovy,
121
- self.cam.fovx,
122
- self.cam.near,
123
- self.cam.far,
124
- )
125
-
126
- self.enable_sd = self.opt.lambda_sd > 0 and self.prompt != ""
127
- self.enable_zero123 = self.opt.lambda_zero123 > 0 and self.input_img is not None
128
-
129
- # lazy load guidance model
130
- if self.guidance_sd is None and self.enable_sd:
131
- print(f"[INFO] loading SD...")
132
- from guidance.sd_utils import StableDiffusion
133
- self.guidance_sd = StableDiffusion(self.device)
134
- print(f"[INFO] loaded SD!")
135
-
136
- if self.guidance_zero123 is None and self.enable_zero123:
137
- print(f"[INFO] loading zero123...")
138
- from guidance.zero123_utils import Zero123
139
- self.guidance_zero123 = Zero123(self.device)
140
- print(f"[INFO] loaded zero123!")
141
-
142
- # input image
143
- if self.input_img is not None:
144
- self.input_img_torch = torch.from_numpy(self.input_img).permute(2, 0, 1).unsqueeze(0).to(self.device)
145
- self.input_img_torch = F.interpolate(self.input_img_torch, (self.opt.ref_size, self.opt.ref_size), mode="bilinear", align_corners=False)
146
-
147
- self.input_mask_torch = torch.from_numpy(self.input_mask).permute(2, 0, 1).unsqueeze(0).to(self.device)
148
- self.input_mask_torch = F.interpolate(self.input_mask_torch, (self.opt.ref_size, self.opt.ref_size), mode="bilinear", align_corners=False)
149
-
150
- # prepare embeddings
151
- with torch.no_grad():
152
-
153
- if self.enable_sd:
154
- self.guidance_sd.get_text_embeds([self.prompt], [self.negative_prompt])
155
-
156
- if self.enable_zero123:
157
- self.guidance_zero123.get_img_embeds(self.input_img_torch)
158
-
159
- def train_step(self):
160
- starter = torch.cuda.Event(enable_timing=True)
161
- ender = torch.cuda.Event(enable_timing=True)
162
- starter.record()
163
-
164
- for _ in range(self.train_steps):
165
-
166
- self.step += 1
167
- step_ratio = min(1, self.step / self.opt.iters)
168
-
169
- # update lr
170
- self.renderer.gaussians.update_learning_rate(self.step)
171
-
172
- loss = 0
173
-
174
- ### known view
175
- if self.input_img_torch is not None:
176
- cur_cam = self.fixed_cam
177
- out = self.renderer.render(cur_cam)
178
-
179
- # rgb loss
180
- image = out["image"].unsqueeze(0) # [1, 3, H, W] in [0, 1]
181
- loss = loss + 10000 * step_ratio * F.mse_loss(image, self.input_img_torch)
182
-
183
- # mask loss
184
- mask = out["alpha"].unsqueeze(0) # [1, 1, H, W] in [0, 1]
185
- loss = loss + 1000 * step_ratio * F.mse_loss(mask, self.input_mask_torch)
186
-
187
- ### novel view (manual batch)
188
- render_resolution = 128 if step_ratio < 0.3 else (256 if step_ratio < 0.6 else 512)
189
- images = []
190
- vers, hors, radii = [], [], []
191
- # avoid too large elevation (> 80 or < -80), and make sure it always cover [-30, 30]
192
- min_ver = max(min(-30, -30 - self.opt.elevation), -80 - self.opt.elevation)
193
- max_ver = min(max(30, 30 - self.opt.elevation), 80 - self.opt.elevation)
194
- for _ in range(self.opt.batch_size):
195
-
196
- # render random view
197
- ver = np.random.randint(min_ver, max_ver)
198
- hor = np.random.randint(-180, 180)
199
- radius = 0
200
-
201
- vers.append(ver)
202
- hors.append(hor)
203
- radii.append(radius)
204
-
205
- pose = orbit_camera(self.opt.elevation + ver, hor, self.opt.radius + radius)
206
-
207
- cur_cam = MiniCam(
208
- pose,
209
- render_resolution,
210
- render_resolution,
211
- self.cam.fovy,
212
- self.cam.fovx,
213
- self.cam.near,
214
- self.cam.far,
215
- )
216
-
217
- invert_bg_color = np.random.rand() > self.opt.invert_bg_prob
218
- out = self.renderer.render(cur_cam, invert_bg_color=invert_bg_color)
219
-
220
- image = out["image"].unsqueeze(0)# [1, 3, H, W] in [0, 1]
221
- images.append(image)
222
-
223
- images = torch.cat(images, dim=0)
224
-
225
- # import kiui
226
- # kiui.lo(hor, ver)
227
- # kiui.vis.plot_image(image)
228
-
229
- # guidance loss
230
- if self.enable_sd:
231
- loss = loss + self.opt.lambda_sd * self.guidance_sd.train_step(images, step_ratio)
232
-
233
- if self.enable_zero123:
234
- loss = loss + self.opt.lambda_zero123 * self.guidance_zero123.train_step(images, vers, hors, radii, step_ratio)
235
-
236
- # optimize step
237
- loss.backward()
238
- self.optimizer.step()
239
- self.optimizer.zero_grad()
240
-
241
- # densify and prune
242
- if self.step >= self.opt.density_start_iter and self.step <= self.opt.density_end_iter:
243
- viewspace_point_tensor, visibility_filter, radii = out["viewspace_points"], out["visibility_filter"], out["radii"]
244
- self.renderer.gaussians.max_radii2D[visibility_filter] = torch.max(self.renderer.gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
245
- self.renderer.gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
246
-
247
- if self.step % self.opt.densification_interval == 0:
248
- # size_threshold = 20 if self.step > self.opt.opacity_reset_interval else None
249
- self.renderer.gaussians.densify_and_prune(self.opt.densify_grad_threshold, min_opacity=0.01, extent=0.5, max_screen_size=1)
250
-
251
- if self.step % self.opt.opacity_reset_interval == 0:
252
- self.renderer.gaussians.reset_opacity()
253
-
254
- ender.record()
255
- torch.cuda.synchronize()
256
- t = starter.elapsed_time(ender)
257
-
258
- self.need_update = True
259
-
260
- if self.gui:
261
- dpg.set_value("_log_train_time", f"{t:.4f}ms")
262
- dpg.set_value(
263
- "_log_train_log",
264
- f"step = {self.step: 5d} (+{self.train_steps: 2d}) loss = {loss.item():.4f}",
265
- )
266
-
267
- # dynamic train steps (no need for now)
268
- # max allowed train time per-frame is 500 ms
269
- # full_t = t / self.train_steps * 16
270
- # train_steps = min(16, max(4, int(16 * 500 / full_t)))
271
- # if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:
272
- # self.train_steps = train_steps
273
-
274
- @torch.no_grad()
275
- def test_step(self):
276
- # ignore if no need to update
277
- if not self.need_update:
278
- return
279
-
280
- starter = torch.cuda.Event(enable_timing=True)
281
- ender = torch.cuda.Event(enable_timing=True)
282
- starter.record()
283
-
284
- # should update image
285
- if self.need_update:
286
- # render image
287
-
288
- cur_cam = MiniCam(
289
- self.cam.pose,
290
- self.W,
291
- self.H,
292
- self.cam.fovy,
293
- self.cam.fovx,
294
- self.cam.near,
295
- self.cam.far,
296
- )
297
-
298
- out = self.renderer.render(cur_cam, self.gaussain_scale_factor)
299
-
300
- buffer_image = out[self.mode] # [3, H, W]
301
-
302
- if self.mode in ['depth', 'alpha']:
303
- buffer_image = buffer_image.repeat(3, 1, 1)
304
- if self.mode == 'depth':
305
- buffer_image = (buffer_image - buffer_image.min()) / (buffer_image.max() - buffer_image.min() + 1e-20)
306
-
307
- buffer_image = F.interpolate(
308
- buffer_image.unsqueeze(0),
309
- size=(self.H, self.W),
310
- mode="bilinear",
311
- align_corners=False,
312
- ).squeeze(0)
313
-
314
- self.buffer_image = (
315
- buffer_image.permute(1, 2, 0)
316
- .contiguous()
317
- .clamp(0, 1)
318
- .contiguous()
319
- .detach()
320
- .cpu()
321
- .numpy()
322
- )
323
-
324
- # display input_image
325
- if self.overlay_input_img and self.input_img is not None:
326
- self.buffer_image = (
327
- self.buffer_image * (1 - self.overlay_input_img_ratio)
328
- + self.input_img * self.overlay_input_img_ratio
329
- )
330
-
331
- self.need_update = False
332
-
333
- ender.record()
334
- torch.cuda.synchronize()
335
- t = starter.elapsed_time(ender)
336
-
337
- if self.gui:
338
- dpg.set_value("_log_infer_time", f"{t:.4f}ms ({int(1000/t)} FPS)")
339
- dpg.set_value(
340
- "_texture", self.buffer_image
341
- ) # buffer must be contiguous, else seg fault!
342
-
343
-
344
- def load_input(self, file):
345
- # load image
346
- print(f'[INFO] load image from {file}...')
347
- img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
348
- if img.shape[-1] == 3:
349
- if self.bg_remover is None:
350
- self.bg_remover = rembg.new_session()
351
- img = rembg.remove(img, session=self.bg_remover)
352
-
353
- img = cv2.resize(img, (self.W, self.H), interpolation=cv2.INTER_AREA)
354
- img = img.astype(np.float32) / 255.0
355
-
356
- self.input_mask = img[..., 3:]
357
- # white bg
358
- self.input_img = img[..., :3] * self.input_mask + (1 - self.input_mask)
359
- # bgr to rgb
360
- self.input_img = self.input_img[..., ::-1].copy()
361
-
362
- # load prompt
363
- file_prompt = file.replace("_rgba.png", "_caption.txt")
364
- if os.path.exists(file_prompt):
365
- print(f'[INFO] load prompt from {file_prompt}...')
366
- with open(file_prompt, "r") as f:
367
- self.prompt = f.read().strip()
368
-
369
- @torch.no_grad()
370
- def save_model(self, mode='geo', texture_size=1024):
371
- os.makedirs(self.opt.outdir, exist_ok=True)
372
- if mode == 'geo':
373
- path = os.path.join(self.opt.outdir, self.opt.save_path + '_mesh.ply')
374
- mesh = self.renderer.gaussians.extract_mesh(path, self.opt.density_thresh)
375
- mesh.write_ply(path)
376
-
377
- elif mode == 'geo+tex':
378
- path = os.path.join(self.opt.outdir, self.opt.save_path + '_mesh.obj')
379
- mesh = self.renderer.gaussians.extract_mesh(path, self.opt.density_thresh)
380
-
381
- # perform texture extraction
382
- print(f"[INFO] unwrap uv...")
383
- h = w = texture_size
384
- mesh.auto_uv()
385
- mesh.auto_normal()
386
-
387
- albedo = torch.zeros((h, w, 3), device=self.device, dtype=torch.float32)
388
- cnt = torch.zeros((h, w, 1), device=self.device, dtype=torch.float32)
389
-
390
- # self.prepare_train() # tmp fix for not loading 0123
391
- # vers = [0]
392
- # hors = [0]
393
- vers = [0] * 8 + [-45] * 8 + [45] * 8 + [-89.9, 89.9]
394
- hors = [0, 45, -45, 90, -90, 135, -135, 180] * 3 + [0, 0]
395
-
396
- render_resolution = 512
397
-
398
- import nvdiffrast.torch as dr
399
-
400
- if not self.opt.gui or os.name == 'nt':
401
- glctx = dr.RasterizeGLContext()
402
- else:
403
- glctx = dr.RasterizeCudaContext()
404
-
405
- for ver, hor in zip(vers, hors):
406
- # render image
407
- pose = orbit_camera(ver, hor, self.cam.radius)
408
-
409
- cur_cam = MiniCam(
410
- pose,
411
- render_resolution,
412
- render_resolution,
413
- self.cam.fovy,
414
- self.cam.fovx,
415
- self.cam.near,
416
- self.cam.far,
417
- )
418
-
419
- cur_out = self.renderer.render(cur_cam)
420
-
421
- rgbs = cur_out["image"].unsqueeze(0) # [1, 3, H, W] in [0, 1]
422
-
423
- # enhance texture quality with zero123 [not working well]
424
- # if self.opt.guidance_model == 'zero123':
425
- # rgbs = self.guidance.refine(rgbs, [ver], [hor], [0])
426
- # import kiui
427
- # kiui.vis.plot_image(rgbs)
428
-
429
- # get coordinate in texture image
430
- pose = torch.from_numpy(pose.astype(np.float32)).to(self.device)
431
- proj = torch.from_numpy(self.cam.perspective.astype(np.float32)).to(self.device)
432
-
433
- v_cam = torch.matmul(F.pad(mesh.v, pad=(0, 1), mode='constant', value=1.0), torch.inverse(pose).T).float().unsqueeze(0)
434
- v_clip = v_cam @ proj.T
435
- rast, rast_db = dr.rasterize(glctx, v_clip, mesh.f, (render_resolution, render_resolution))
436
-
437
- depth, _ = dr.interpolate(-v_cam[..., [2]], rast, mesh.f) # [1, H, W, 1]
438
- depth = depth.squeeze(0) # [H, W, 1]
439
-
440
- alpha = (rast[0, ..., 3:] > 0).float()
441
-
442
- uvs, _ = dr.interpolate(mesh.vt.unsqueeze(0), rast, mesh.ft) # [1, 512, 512, 2] in [0, 1]
443
-
444
- # use normal to produce a back-project mask
445
- normal, _ = dr.interpolate(mesh.vn.unsqueeze(0).contiguous(), rast, mesh.fn)
446
- normal = safe_normalize(normal[0])
447
-
448
- # rotated normal (where [0, 0, 1] always faces camera)
449
- rot_normal = normal @ pose[:3, :3]
450
- viewcos = rot_normal[..., [2]]
451
-
452
- mask = (alpha > 0) & (viewcos > 0.5) # [H, W, 1]
453
- mask = mask.view(-1)
454
-
455
- uvs = uvs.view(-1, 2).clamp(0, 1)[mask]
456
- rgbs = rgbs.view(3, -1).permute(1, 0)[mask].contiguous()
457
-
458
- # update texture image
459
- cur_albedo, cur_cnt = mipmap_linear_grid_put_2d(
460
- h, w,
461
- uvs[..., [1, 0]] * 2 - 1,
462
- rgbs,
463
- min_resolution=256,
464
- return_count=True,
465
- )
466
-
467
- # albedo += cur_albedo
468
- # cnt += cur_cnt
469
- mask = cnt.squeeze(-1) < 0.1
470
- albedo[mask] += cur_albedo[mask]
471
- cnt[mask] += cur_cnt[mask]
472
-
473
- mask = cnt.squeeze(-1) > 0
474
- albedo[mask] = albedo[mask] / cnt[mask].repeat(1, 3)
475
-
476
- mask = mask.view(h, w)
477
-
478
- albedo = albedo.detach().cpu().numpy()
479
- mask = mask.detach().cpu().numpy()
480
-
481
- # dilate texture
482
- from sklearn.neighbors import NearestNeighbors
483
- from scipy.ndimage import binary_dilation, binary_erosion
484
-
485
- inpaint_region = binary_dilation(mask, iterations=32)
486
- inpaint_region[mask] = 0
487
-
488
- search_region = mask.copy()
489
- not_search_region = binary_erosion(search_region, iterations=3)
490
- search_region[not_search_region] = 0
491
-
492
- search_coords = np.stack(np.nonzero(search_region), axis=-1)
493
- inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)
494
-
495
- knn = NearestNeighbors(n_neighbors=1, algorithm="kd_tree").fit(
496
- search_coords
497
- )
498
- _, indices = knn.kneighbors(inpaint_coords)
499
-
500
- albedo[tuple(inpaint_coords.T)] = albedo[tuple(search_coords[indices[:, 0]].T)]
501
-
502
- mesh.albedo = torch.from_numpy(albedo).to(self.device)
503
- mesh.write(path)
504
-
505
- else:
506
- path = os.path.join(self.opt.outdir, self.opt.save_path + '_model.ply')
507
- self.renderer.gaussians.save_ply(path)
508
-
509
- print(f"[INFO] save model to {path}.")
510
-
511
- def register_dpg(self):
512
- ### register texture
513
-
514
- with dpg.texture_registry(show=False):
515
- dpg.add_raw_texture(
516
- self.W,
517
- self.H,
518
- self.buffer_image,
519
- format=dpg.mvFormat_Float_rgb,
520
- tag="_texture",
521
- )
522
-
523
- ### register window
524
-
525
- # the rendered image, as the primary window
526
- with dpg.window(
527
- tag="_primary_window",
528
- width=self.W,
529
- height=self.H,
530
- pos=[0, 0],
531
- no_move=True,
532
- no_title_bar=True,
533
- no_scrollbar=True,
534
- ):
535
- # add the texture
536
- dpg.add_image("_texture")
537
-
538
- # dpg.set_primary_window("_primary_window", True)
539
-
540
- # control window
541
- with dpg.window(
542
- label="Control",
543
- tag="_control_window",
544
- width=600,
545
- height=self.H,
546
- pos=[self.W, 0],
547
- no_move=True,
548
- no_title_bar=True,
549
- ):
550
- # button theme
551
- with dpg.theme() as theme_button:
552
- with dpg.theme_component(dpg.mvButton):
553
- dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
554
- dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
555
- dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
556
- dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
557
- dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
558
-
559
- # timer stuff
560
- with dpg.group(horizontal=True):
561
- dpg.add_text("Infer time: ")
562
- dpg.add_text("no data", tag="_log_infer_time")
563
-
564
- def callback_setattr(sender, app_data, user_data):
565
- setattr(self, user_data, app_data)
566
-
567
- # init stuff
568
- with dpg.collapsing_header(label="Initialize", default_open=True):
569
-
570
- # seed stuff
571
- def callback_set_seed(sender, app_data):
572
- self.seed = app_data
573
- self.seed_everything()
574
-
575
- dpg.add_input_text(
576
- label="seed",
577
- default_value=self.seed,
578
- on_enter=True,
579
- callback=callback_set_seed,
580
- )
581
-
582
- # input stuff
583
- def callback_select_input(sender, app_data):
584
- # only one item
585
- for k, v in app_data["selections"].items():
586
- dpg.set_value("_log_input", k)
587
- self.load_input(v)
588
-
589
- self.need_update = True
590
-
591
- with dpg.file_dialog(
592
- directory_selector=False,
593
- show=False,
594
- callback=callback_select_input,
595
- file_count=1,
596
- tag="file_dialog_tag",
597
- width=700,
598
- height=400,
599
- ):
600
- dpg.add_file_extension("Images{.jpg,.jpeg,.png}")
601
-
602
- with dpg.group(horizontal=True):
603
- dpg.add_button(
604
- label="input",
605
- callback=lambda: dpg.show_item("file_dialog_tag"),
606
- )
607
- dpg.add_text("", tag="_log_input")
608
-
609
- # overlay stuff
610
- with dpg.group(horizontal=True):
611
-
612
- def callback_toggle_overlay_input_img(sender, app_data):
613
- self.overlay_input_img = not self.overlay_input_img
614
- self.need_update = True
615
-
616
- dpg.add_checkbox(
617
- label="overlay image",
618
- default_value=self.overlay_input_img,
619
- callback=callback_toggle_overlay_input_img,
620
- )
621
-
622
- def callback_set_overlay_input_img_ratio(sender, app_data):
623
- self.overlay_input_img_ratio = app_data
624
- self.need_update = True
625
-
626
- dpg.add_slider_float(
627
- label="ratio",
628
- min_value=0,
629
- max_value=1,
630
- format="%.1f",
631
- default_value=self.overlay_input_img_ratio,
632
- callback=callback_set_overlay_input_img_ratio,
633
- )
634
-
635
- # prompt stuff
636
-
637
- dpg.add_input_text(
638
- label="prompt",
639
- default_value=self.prompt,
640
- callback=callback_setattr,
641
- user_data="prompt",
642
- )
643
-
644
- dpg.add_input_text(
645
- label="negative",
646
- default_value=self.negative_prompt,
647
- callback=callback_setattr,
648
- user_data="negative_prompt",
649
- )
650
-
651
- # save current model
652
- with dpg.group(horizontal=True):
653
- dpg.add_text("Save: ")
654
-
655
- def callback_save(sender, app_data, user_data):
656
- self.save_model(mode=user_data)
657
-
658
- dpg.add_button(
659
- label="model",
660
- tag="_button_save_model",
661
- callback=callback_save,
662
- user_data='model',
663
- )
664
- dpg.bind_item_theme("_button_save_model", theme_button)
665
-
666
- dpg.add_button(
667
- label="geo",
668
- tag="_button_save_mesh",
669
- callback=callback_save,
670
- user_data='geo',
671
- )
672
- dpg.bind_item_theme("_button_save_mesh", theme_button)
673
-
674
- dpg.add_button(
675
- label="geo+tex",
676
- tag="_button_save_mesh_with_tex",
677
- callback=callback_save,
678
- user_data='geo+tex',
679
- )
680
- dpg.bind_item_theme("_button_save_mesh_with_tex", theme_button)
681
-
682
- dpg.add_input_text(
683
- label="",
684
- default_value=self.opt.save_path,
685
- callback=callback_setattr,
686
- user_data="save_path",
687
- )
688
-
689
- # training stuff
690
- with dpg.collapsing_header(label="Train", default_open=True):
691
- # lr and train button
692
- with dpg.group(horizontal=True):
693
- dpg.add_text("Train: ")
694
-
695
- def callback_train(sender, app_data):
696
- if self.training:
697
- self.training = False
698
- dpg.configure_item("_button_train", label="start")
699
- else:
700
- self.prepare_train()
701
- self.training = True
702
- dpg.configure_item("_button_train", label="stop")
703
-
704
- # dpg.add_button(
705
- # label="init", tag="_button_init", callback=self.prepare_train
706
- # )
707
- # dpg.bind_item_theme("_button_init", theme_button)
708
-
709
- dpg.add_button(
710
- label="start", tag="_button_train", callback=callback_train
711
- )
712
- dpg.bind_item_theme("_button_train", theme_button)
713
-
714
- with dpg.group(horizontal=True):
715
- dpg.add_text("", tag="_log_train_time")
716
- dpg.add_text("", tag="_log_train_log")
717
-
718
- # rendering options
719
- with dpg.collapsing_header(label="Rendering", default_open=True):
720
- # mode combo
721
- def callback_change_mode(sender, app_data):
722
- self.mode = app_data
723
- self.need_update = True
724
-
725
- dpg.add_combo(
726
- ("image", "depth", "alpha"),
727
- label="mode",
728
- default_value=self.mode,
729
- callback=callback_change_mode,
730
- )
731
-
732
- # fov slider
733
- def callback_set_fovy(sender, app_data):
734
- self.cam.fovy = np.deg2rad(app_data)
735
- self.need_update = True
736
-
737
- dpg.add_slider_int(
738
- label="FoV (vertical)",
739
- min_value=1,
740
- max_value=120,
741
- format="%d deg",
742
- default_value=np.rad2deg(self.cam.fovy),
743
- callback=callback_set_fovy,
744
- )
745
-
746
- def callback_set_gaussain_scale(sender, app_data):
747
- self.gaussain_scale_factor = app_data
748
- self.need_update = True
749
-
750
- dpg.add_slider_float(
751
- label="gaussain scale",
752
- min_value=0,
753
- max_value=1,
754
- format="%.2f",
755
- default_value=self.gaussain_scale_factor,
756
- callback=callback_set_gaussain_scale,
757
- )
758
-
759
- ### register camera handler
760
-
761
- def callback_camera_drag_rotate_or_draw_mask(sender, app_data):
762
- if not dpg.is_item_focused("_primary_window"):
763
- return
764
-
765
- dx = app_data[1]
766
- dy = app_data[2]
767
-
768
- self.cam.orbit(dx, dy)
769
- self.need_update = True
770
-
771
- def callback_camera_wheel_scale(sender, app_data):
772
- if not dpg.is_item_focused("_primary_window"):
773
- return
774
-
775
- delta = app_data
776
-
777
- self.cam.scale(delta)
778
- self.need_update = True
779
-
780
- def callback_camera_drag_pan(sender, app_data):
781
- if not dpg.is_item_focused("_primary_window"):
782
- return
783
-
784
- dx = app_data[1]
785
- dy = app_data[2]
786
-
787
- self.cam.pan(dx, dy)
788
- self.need_update = True
789
-
790
- def callback_set_mouse_loc(sender, app_data):
791
- if not dpg.is_item_focused("_primary_window"):
792
- return
793
-
794
- # just the pixel coordinate in image
795
- self.mouse_loc = np.array(app_data)
796
-
797
- with dpg.handler_registry():
798
- # for camera moving
799
- dpg.add_mouse_drag_handler(
800
- button=dpg.mvMouseButton_Left,
801
- callback=callback_camera_drag_rotate_or_draw_mask,
802
- )
803
- dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)
804
- dpg.add_mouse_drag_handler(
805
- button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan
806
- )
807
-
808
- dpg.create_viewport(
809
- title="Gaussian3D",
810
- width=self.W + 600,
811
- height=self.H + (45 if os.name == "nt" else 0),
812
- resizable=False,
813
- )
814
-
815
- ### global theme
816
- with dpg.theme() as theme_no_padding:
817
- with dpg.theme_component(dpg.mvAll):
818
- # set all padding to 0 to avoid scroll bar
819
- dpg.add_theme_style(
820
- dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core
821
- )
822
- dpg.add_theme_style(
823
- dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core
824
- )
825
- dpg.add_theme_style(
826
- dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core
827
- )
828
-
829
- dpg.bind_item_theme("_primary_window", theme_no_padding)
830
-
831
- dpg.setup_dearpygui()
832
-
833
- ### register a larger font
834
- # get it from: https://github.com/lxgw/LxgwWenKai/releases/download/v1.300/LXGWWenKai-Regular.ttf
835
- if os.path.exists("LXGWWenKai-Regular.ttf"):
836
- with dpg.font_registry():
837
- with dpg.font("LXGWWenKai-Regular.ttf", 18) as default_font:
838
- dpg.bind_font(default_font)
839
-
840
- # dpg.show_metrics()
841
-
842
- dpg.show_viewport()
843
-
844
- def render(self):
845
- assert self.gui
846
- while dpg.is_dearpygui_running():
847
- # update texture every frame
848
- if self.training:
849
- self.train_step()
850
- self.test_step()
851
- dpg.render_dearpygui_frame()
852
-
853
- # no gui mode
854
- def train(self, iters=500):
855
- if iters > 0:
856
- self.prepare_train()
857
- for i in tqdm.trange(iters):
858
- self.train_step()
859
- # do a last prune
860
- self.renderer.gaussians.prune(min_opacity=0.01, extent=1, max_screen_size=1)
861
- # save
862
- self.save_model(mode='model')
863
- self.save_model(mode='geo+tex')
864
-
865
-
866
- if __name__ == "__main__":
867
- import argparse
868
- from omegaconf import OmegaConf
869
-
870
- parser = argparse.ArgumentParser()
871
- parser.add_argument("--config", required=True, help="path to the yaml config file")
872
- args, extras = parser.parse_known_args()
873
-
874
- # override default config from cli
875
- opt = OmegaConf.merge(OmegaConf.load(args.config), OmegaConf.from_cli(extras))
876
-
877
- gui = GUI(opt)
878
-
879
- if opt.gui:
880
- gui.render()
881
- else:
882
- gui.train(opt.iters)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/memory/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- from agentverse.registry import Registry
2
-
3
- memory_registry = Registry(name="MemoryRegistry")
4
-
5
- from .base import BaseMemory
6
- from .chat_history import ChatHistoryMemory
7
- from .summary import SummaryMemory
8
- from .sde_team import SdeTeamMemory
9
- from .vectorstore import VectorStoreMemory
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/states/MatchState.js DELETED
@@ -1,160 +0,0 @@
1
- import BaseState from './BaseState.js';
2
- import EliminateChess from '../actions/EliminateChess.js';
3
- import FallingAllChess from '../actions/FallingAllChess.js';
4
-
5
- const GetValue = Phaser.Utils.Objects.GetValue;
6
- const SetStruct = Phaser.Structs.Set;
7
-
8
- class State extends BaseState {
9
- constructor(bejeweled, config) {
10
- super(bejeweled, config);
11
- // this.bejeweled = bejeweled; // Bejeweled
12
- // this.board = bejeweled.board; // Bejeweled.board
13
-
14
- this.totalMatchedLinesCount = 0;
15
- this.eliminatedChessArray;
16
-
17
- // Actions
18
- // Eliminating action
19
- this.eliminatingAction = GetValue(config, 'eliminatingAction', EliminateChess);
20
- // on falling chess
21
- this.fallingAction = GetValue(config, 'fallingAction', FallingAllChess);
22
-
23
- var debug = GetValue(config, 'debug', false);
24
- if (debug) {
25
- this.on('statechange', this.printState, this);
26
- }
27
- }
28
-
29
- shutdown() {
30
- super.shutdown();
31
-
32
- this.eliminatedChessArray = undefined;
33
- // Actions
34
- this.eliminatingAction = undefined;
35
- this.fallingAction = undefined;
36
- return this;
37
- }
38
-
39
- destroy() {
40
- this.shutdown();
41
- return this;
42
- }
43
-
44
- // START
45
- enter_START() {
46
- this.totalMatchedLinesCount = 0;
47
-
48
- this.bejeweled.emit('match-start', this.board.board, this.bejeweled);
49
-
50
- this.next();
51
- }
52
- next_START() {
53
- return 'MATCH3';
54
- }
55
- // START
56
-
57
- // MATCH3
58
- enter_MATCH3() {
59
- var matchedLines = this.board.getAllMatch();
60
-
61
- this.bejeweled.emit('match', matchedLines, this.board.board, this.bejeweled);
62
-
63
- var matchedLinesCount = matchedLines.length;
64
- this.totalMatchedLinesCount += matchedLinesCount;
65
- switch (matchedLinesCount) {
66
- case 0:
67
- this.eliminatedChessArray = [];
68
- break;
69
- case 1:
70
- this.eliminatedChessArray = matchedLines[0].entries;
71
- break;
72
- default:
73
- // Put all chess to a set
74
- var newSet = new SetStruct();
75
- for (var i = 0; i < matchedLinesCount; i++) {
76
- matchedLines[i].entries.forEach(function (value) {
77
- newSet.set(value);
78
- });
79
- }
80
- this.eliminatedChessArray = newSet.entries;
81
- break;
82
- }
83
- this.next();
84
- }
85
- next_MATCH3() {
86
- var nextState;
87
- if (this.eliminatedChessArray.length === 0) {
88
- nextState = 'END'
89
- } else {
90
- nextState = 'ELIMINATING';
91
- }
92
- return nextState;
93
- }
94
- // MATCH3
95
-
96
- // ELIMINATING
97
- enter_ELIMINATING() {
98
- var board = this.board.board,
99
- chessArray = this.eliminatedChessArray;
100
-
101
- this.bejeweled.emit('eliminate', chessArray, board, this.bejeweled);
102
-
103
- this.eliminatingAction(chessArray, board, this.bejeweled);
104
-
105
- // Remove eliminated chess
106
- chessArray.forEach(board.removeChess, board);
107
-
108
- // To next state when all completed
109
- this.next();
110
- }
111
- next_ELIMINATING() {
112
- return 'FALLING';
113
- }
114
- exit_ELIMINATING() {
115
- this.eliminatedChessArray = undefined;
116
- }
117
- // ELIMINATING
118
-
119
- // FALLING
120
- enter_FALLING() {
121
- var board = this.board.board;
122
-
123
- this.bejeweled.emit('fall', board, this.bejeweled);
124
-
125
- this.fallingAction(board, this.bejeweled);
126
-
127
- // To next state when all completed
128
- this.next();
129
- }
130
- next_FALLING() {
131
- return 'FILL';
132
- }
133
- // FALLING
134
-
135
- // FILL
136
- enter_FILL() {
137
- this.board.fill(true); // Fill upper board only
138
-
139
- this.bejeweled.emit('fill', this.board.board, this.bejeweled);
140
-
141
- this.next();
142
- }
143
- next_FILL() {
144
- return 'MATCH3';
145
- }
146
- // FILL
147
-
148
- // END
149
- enter_END() {
150
- this.bejeweled.emit('match-end', this.board.board, this.bejeweled);
151
-
152
- this.emit('complete');
153
- }
154
- // END
155
-
156
- printState() {
157
- console.log('Match state: ' + this.prevState + ' -> ' + this.state);
158
- }
159
- }
160
- export default State;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/cube/Cube.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Base from '../base/Base';
2
- export default class Cube extends Base { }
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/Factory.d.ts DELETED
@@ -1,17 +0,0 @@
1
- import FixWidthSizer from './FixWidthSizer';
2
-
3
-
4
- export default function (
5
- config?: FixWidthSizer.IConfig
6
- ): FixWidthSizer;
7
-
8
- export default function (
9
- x: number, y: number,
10
- config?: FixWidthSizer.IConfig
11
- ): FixWidthSizer;
12
-
13
- export default function (
14
- x: number, y: number,
15
- width: number, height: number,
16
- config?: FixWidthSizer.IConfig
17
- ): FixWidthSizer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/main.py DELETED
@@ -1,145 +0,0 @@
1
- import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
2
- import gradio as gr
3
- from predict import predict
4
- from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf
5
-
6
- # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
7
- proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT = \
8
- get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT')
9
-
10
- # 如果WEB_PORT是-1, 则随机选取WEB端口
11
- PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
12
- if not AUTHENTICATION: AUTHENTICATION = None
13
-
14
- initial_prompt = "Serve me as a writing and programming assistant."
15
- title_html = "<h1 align=\"center\">ChatGPT 学术优化</h1>"
16
- description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
17
-
18
- # 问询记录, python 版本建议3.9+(越新越好)
19
- import logging
20
- os.makedirs("gpt_log", exist_ok=True)
21
- try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
22
- except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
23
- print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
24
-
25
- # 一些普通功能模块
26
- from functional import get_functionals
27
- functional = get_functionals()
28
-
29
- # 高级函数插件
30
- from functional_crazy import get_crazy_functionals
31
- crazy_fns = get_crazy_functionals()
32
-
33
- # 处理markdown文本格式的转变
34
- gr.Chatbot.postprocess = format_io
35
-
36
- # 做一些外观色彩上的调整
37
- from theme import adjust_theme, advanced_css
38
- set_theme = adjust_theme()
39
-
40
- cancel_handles = []
41
- with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
42
- gr.HTML(title_html)
43
- with gr.Row().style(equal_height=True):
44
- with gr.Column(scale=2):
45
- chatbot = gr.Chatbot()
46
- chatbot.style(height=CHATBOT_HEIGHT)
47
- history = gr.State([])
48
- with gr.Column(scale=1):
49
- with gr.Row():
50
- txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
51
- with gr.Row():
52
- submitBtn = gr.Button("提交", variant="primary")
53
- with gr.Row():
54
- resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
55
- stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
56
- with gr.Row():
57
- from check_proxy import check_proxy
58
- status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {check_proxy(proxies)}")
59
- with gr.Accordion("基础功能区", open=True) as area_basic_fn:
60
- with gr.Row():
61
- for k in functional:
62
- variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
63
- functional[k]["Button"] = gr.Button(k, variant=variant)
64
- with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
65
- with gr.Row():
66
- gr.Markdown("注意:以下“红颜色”标识的函数插件需从input区读取路径作为参数.")
67
- with gr.Row():
68
- for k in crazy_fns:
69
- if not crazy_fns[k].get("AsButton", True): continue
70
- variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
71
- crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
72
- with gr.Row():
73
- with gr.Accordion("更多函数插件", open=True):
74
- dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
75
- with gr.Column(scale=1):
76
- dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
77
- with gr.Column(scale=1):
78
- switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
79
- with gr.Row():
80
- with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
81
- file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
82
- with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=False):
83
- system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
84
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
85
- temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
86
- checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
87
- gr.Markdown(description)
88
- # 功能区显示开关与功能区的互动
89
- def fn_area_visibility(a):
90
- ret = {}
91
- ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
92
- ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
93
- return ret
94
- checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn] )
95
- # 整理反复出现的控件句柄组合
96
- input_combo = [txt, top_p, temperature, chatbot, history, system_prompt]
97
- output_combo = [chatbot, history, status]
98
- predict_args = dict(fn=predict, inputs=input_combo, outputs=output_combo)
99
- empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt]) # 用于在提交后清空输入栏
100
- # 提交按钮、重置按钮
101
- cancel_handles.append(txt.submit(**predict_args)) #; txt.submit(**empty_txt_args) 在提交后清空输入栏
102
- cancel_handles.append(submitBtn.click(**predict_args)) #; submitBtn.click(**empty_txt_args) 在提交后清空输入栏
103
- resetBtn.click(lambda: ([], [], "已重置"), None, output_combo)
104
- # 基础功能区的回调函数注册
105
- for k in functional:
106
- click_handle = functional[k]["Button"].click(predict, [*input_combo, gr.State(True), gr.State(k)], output_combo)
107
- cancel_handles.append(click_handle)
108
- # 文件上传区,接收文件后与chatbot的互动
109
- file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
110
- # 函数插件-固定按钮区
111
- for k in crazy_fns:
112
- if not crazy_fns[k].get("AsButton", True): continue
113
- click_handle = crazy_fns[k]["Button"].click(crazy_fns[k]["Function"], [*input_combo, gr.State(PORT)], output_combo)
114
- click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
115
- cancel_handles.append(click_handle)
116
- # 函数插件-下拉菜单与随变按钮的互动
117
- def on_dropdown_changed(k):
118
- variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
119
- return {switchy_bt: gr.update(value=k, variant=variant)}
120
- dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
121
- # 随变按钮的回调函数注册
122
- def route(k, *args, **kwargs):
123
- if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
124
- yield from crazy_fns[k]["Function"](*args, **kwargs)
125
- click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
126
- click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
127
- # def expand_file_area(file_upload, area_file_up):
128
- # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
129
- # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
130
- cancel_handles.append(click_handle)
131
- # 终止按钮的回调函数注册
132
- stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
133
-
134
- # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
135
- def auto_opentab_delay():
136
- import threading, webbrowser, time
137
- print(f"如果浏览器没有自动打开,请复制并转到以下URL: http://localhost:{PORT}")
138
- def open():
139
- time.sleep(2)
140
- webbrowser.open_new_tab(f"http://localhost:{PORT}")
141
- threading.Thread(target=open, name="open-browser", daemon=True).start()
142
-
143
- auto_opentab_delay()
144
- demo.title = "ChatGPT 学术优化"
145
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/schedulers.md DELETED
@@ -1,329 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 스케줄러
14
-
15
- diffusion 파이프라인은 diffusion 모델, 스케줄러 등의 컴포넌트들로 구성됩니다. 그리고 파이프라인 안의 일부 컴포넌트를 다른 컴포넌트로 교체하는 식의 커스터마이징 역시 가능합니다. 이와 같은 컴포넌트 커스터마이징의 가장 대표적인 예시가 바로 [스케줄러](../api/schedulers/overview.md)를 교체하는 것입니다.
16
-
17
-
18
-
19
- 스케쥴러는 다음과 같이 diffusion 시스템의 전반적인 디노이징 프로세스를 정의합니다.
20
-
21
- - 디노이징 스텝을 얼마나 가져가야 할까?
22
- - 확률적으로(stochastic) 혹은 확정적으로(deterministic)?
23
- - 디노이징 된 샘플을 찾아내기 위해 어떤 알고리즘을 사용해야 할까?
24
-
25
- 이러한 프로세스는 다소 난해하고, 디노이징 속도와 디노이징 퀄리티 사이의 트레이드 오프를 정의해야 하는 문제가 될 수 있습니다. 주어진 파이프라인에 어떤 스케줄러가 가장 적합한지를 정량적으로 판단하는 것은 매우 어려운 일입니다. 이로 인해 일단 해당 스케줄러를 직접 사용하여, 생성되는 이미지를 직접 눈으로 보며, 정성적으로 성능을 판단해보는 것이 추천되곤 합니다.
26
-
27
-
28
-
29
-
30
-
31
- ## 파이프라인 불러오기
32
-
33
- 먼저 스테이블 diffusion 파이프라인을 불러오도록 해보겠습니다. 물론 스테이블 diffusion을 사용하기 위해서는, 허깅페이스 허브에 등록된 사용자여야 하며, 관련 [라이센스](https://huggingface.co/runwayml/stable-diffusion-v1-5)에 동의해야 한다는 점을 잊지 말아주세요.
34
-
35
- *역자 주: 다만, 현재 신규로 생성한 허깅페이스 계정에 대해서는 라이센스 동의를 요구하지 않는 것으로 보입니다!*
36
-
37
- ```python
38
- from huggingface_hub import login
39
- from diffusers import DiffusionPipeline
40
- import torch
41
-
42
- # first we need to login with our access token
43
- login()
44
-
45
- # Now we can download the pipeline
46
- pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
47
- ```
48
-
49
- 다음으로, GPU로 이동합니다.
50
-
51
- ```python
52
- pipeline.to("cuda")
53
- ```
54
-
55
-
56
-
57
-
58
-
59
- ## 스케줄러 액세스
60
-
61
- 스케줄러는 언제나 파이프라인의 컴포넌트로서 존재하며, 일반적으로 파이프라인 인스턴스 내에 `scheduler`라는 이름의 속성(property)으로 정의되어 있습니다.
62
-
63
- ```python
64
- pipeline.scheduler
65
- ```
66
-
67
- **Output**:
68
-
69
- ```
70
- PNDMScheduler {
71
- "_class_name": "PNDMScheduler",
72
- "_diffusers_version": "0.8.0.dev0",
73
- "beta_end": 0.012,
74
- "beta_schedule": "scaled_linear",
75
- "beta_start": 0.00085,
76
- "clip_sample": false,
77
- "num_train_timesteps": 1000,
78
- "set_alpha_to_one": false,
79
- "skip_prk_steps": true,
80
- "steps_offset": 1,
81
- "trained_betas": null
82
- }
83
- ```
84
-
85
- 출력 결과를 통해, 우리는 해당 스케줄러가 [`PNDMScheduler`]의 인스턴스라는 것을 알 수 있습니다. 이제 [`PNDMScheduler`]와 다른 스케줄러들의 성능을 비교해보도록 하겠습니다. 먼저 테스트에 사용할 프롬프트를 다음과 같이 정의해보도록 하겠습니다.
86
-
87
- ```python
88
- prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition."
89
- ```
90
-
91
- 다음으로 유사한 이미지 생성을 보장하기 위해서, 다음과 같이 랜덤시드를 고정해주도록 하겠습니다.
92
-
93
- ```python
94
- generator = torch.Generator(device="cuda").manual_seed(8)
95
- image = pipeline(prompt, generator=generator).images[0]
96
- image
97
- ```
98
-
99
- <p align="center">
100
- <br>
101
- <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_pndm.png" width="400"/>
102
- <br>
103
- </p>
104
-
105
-
106
-
107
-
108
- ## 스케줄러 교체하기
109
-
110
- 다음으로 파이프라인의 스케줄러를 다른 스케줄러로 교체하는 방법에 대해 알아보겠습니다. 모든 스케줄러는 [`SchedulerMixin.compatibles`]라는 속성(property)을 갖고 있습니다. 해당 속성은 **호환 가능한** 스케줄러들에 대한 정보를 담고 있습니다.
111
-
112
- ```python
113
- pipeline.scheduler.compatibles
114
- ```
115
-
116
- **Output**:
117
-
118
- ```
119
- [diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler,
120
- diffusers.schedulers.scheduling_ddim.DDIMScheduler,
121
- diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler,
122
- diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler,
123
- diffusers.schedulers.scheduling_pndm.PNDMScheduler,
124
- diffusers.schedulers.scheduling_ddpm.DDPMScheduler,
125
- diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler]
126
- ```
127
-
128
- 호환되는 스케줄러들을 살펴보면 아래와 같습니다.
129
-
130
- - [`LMSDiscreteScheduler`],
131
- - [`DDIMScheduler`],
132
- - [`DPMSolverMultistepScheduler`],
133
- - [`EulerDiscreteScheduler`],
134
- - [`PNDMScheduler`],
135
- - [`DDPMScheduler`],
136
- - [`EulerAncestralDiscreteScheduler`].
137
-
138
- 앞서 정의했던 프롬프트를 사용해서 각각의 스케줄러들을 비교해보도록 하겠습니다.
139
-
140
- 먼저 파이프라인 안의 스케줄러를 바꾸기 위해 [`ConfigMixin.config`] 속성과 [`ConfigMixin.from_config`] 메서드를 활용해보려고 합니다.
141
-
142
-
143
-
144
- ```python
145
- pipeline.scheduler.config
146
- ```
147
-
148
- **Output**:
149
-
150
- ```
151
- FrozenDict([('num_train_timesteps', 1000),
152
- ('beta_start', 0.00085),
153
- ('beta_end', 0.012),
154
- ('beta_schedule', 'scaled_linear'),
155
- ('trained_betas', None),
156
- ('skip_prk_steps', True),
157
- ('set_alpha_to_one', False),
158
- ('steps_offset', 1),
159
- ('_class_name', 'PNDMScheduler'),
160
- ('_diffusers_version', '0.8.0.dev0'),
161
- ('clip_sample', False)])
162
- ```
163
-
164
- 기존 스케줄러의 config를 호환 가능한 다른 스케줄러에 이식하는 것 역시 가능합니다.
165
-
166
- 다음 예시는 기존 스케줄러(`pipeline.scheduler`)를 다른 종류의 스케줄러(`DDIMScheduler`)로 바꾸는 코드입니다. 기존 스케줄러가 갖고 있던 config를 `.from_config` 메서드의 인자로 전달하는 것을 확인할 수 있습니다.
167
-
168
- ```python
169
- from diffusers import DDIMScheduler
170
-
171
- pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
172
- ```
173
-
174
-
175
-
176
- 이제 파이프라인을 실행해서 두 스케줄러 사이의 생성된 이미지의 퀄리티를 비교해봅시다.
177
-
178
- ```python
179
- generator = torch.Generator(device="cuda").manual_seed(8)
180
- image = pipeline(prompt, generator=generator).images[0]
181
- image
182
- ```
183
-
184
- <p align="center">
185
- <br>
186
- <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_ddim.png" width="400"/>
187
- <br>
188
- </p>
189
-
190
-
191
-
192
-
193
- ## 스케줄러들 비교해보기
194
-
195
- 지금까지는 [`PNDMScheduler`]와 [`DDIMScheduler`] 스케줄러를 실행해보았습니다. 아직 비교해볼 스케줄러들이 더 많이 남아있으니 계속 비교해보도록 하겠습니다.
196
-
197
-
198
-
199
- [`LMSDiscreteScheduler`]을 일반적으로 더 좋은 결과를 보여줍니다.
200
-
201
- ```python
202
- from diffusers import LMSDiscreteScheduler
203
-
204
- pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
205
-
206
- generator = torch.Generator(device="cuda").manual_seed(8)
207
- image = pipeline(prompt, generator=generator).images[0]
208
- image
209
- ```
210
-
211
- <p align="center">
212
- <br>
213
- <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_lms.png" width="400"/>
214
- <br>
215
- </p>
216
-
217
-
218
- [`EulerDiscreteScheduler`]와 [`EulerAncestralDiscreteScheduler`] 고작 30번의 inference step만으로도 높은 퀄리티의 이미지를 생성하는 것을 알 수 있습니다.
219
-
220
- ```python
221
- from diffusers import EulerDiscreteScheduler
222
-
223
- pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
224
-
225
- generator = torch.Generator(device="cuda").manual_seed(8)
226
- image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0]
227
- image
228
- ```
229
-
230
- <p align="center">
231
- <br>
232
- <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_discrete.png" width="400"/>
233
- <br>
234
- </p>
235
-
236
-
237
- ```python
238
- from diffusers import EulerAncestralDiscreteScheduler
239
-
240
- pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
241
-
242
- generator = torch.Generator(device="cuda").manual_seed(8)
243
- image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0]
244
- image
245
- ```
246
-
247
- <p align="center">
248
- <br>
249
- <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_ancestral.png" width="400"/>
250
- <br>
251
- </p>
252
-
253
-
254
- 지금 이 문서를 작성하는 현시점 기준에선, [`DPMSolverMultistepScheduler`]가 시간 대비 가장 좋은 품질의 이미지를 생성하는 것 같습니다. 20번 정도의 스텝만으로도 실행될 수 있습니다.
255
-
256
-
257
-
258
- ```python
259
- from diffusers import DPMSolverMultistepScheduler
260
-
261
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
262
-
263
- generator = torch.Generator(device="cuda").manual_seed(8)
264
- image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0]
265
- image
266
- ```
267
-
268
- <p align="center">
269
- <br>
270
- <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_dpm.png" width="400"/>
271
- <br>
272
- </p>
273
-
274
-
275
- 보시다시피 생성된 이미지들은 매우 비슷하고, 비슷한 퀄리티를 보이는 것 같습니다. 실제�� 어떤 스케줄러를 선택할 것인가는 종종 특정 이용 사례에 기반해서 결정되곤 합니다. 결국 여러 종류의 스케줄러를 직접 실행시켜보고 눈으로 직접 비교해서 판단하는 게 좋은 선택일 것 같습니다.
276
-
277
-
278
-
279
- ## Flax에서 스케줄러 교체하기
280
-
281
- JAX/Flax 사용자인 경우 기본 파이프라인 스케줄러를 변경할 수도 있습니다. 다음은 Flax Stable Diffusion 파이프라인과 초고속 [DDPM-Solver++ 스케줄러를](../api/schedulers/multistep_dpm_solver) 사용하여 추론을 실행하는 방법에 대한 예시입니다 .
282
-
283
- ```Python
284
- import jax
285
- import numpy as np
286
- from flax.jax_utils import replicate
287
- from flax.training.common_utils import shard
288
-
289
- from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler
290
-
291
- model_id = "runwayml/stable-diffusion-v1-5"
292
- scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
293
- model_id,
294
- subfolder="scheduler"
295
- )
296
- pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
297
- model_id,
298
- scheduler=scheduler,
299
- revision="bf16",
300
- dtype=jax.numpy.bfloat16,
301
- )
302
- params["scheduler"] = scheduler_state
303
-
304
- # Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8)
305
- prompt = "a photo of an astronaut riding a horse on mars"
306
- num_samples = jax.device_count()
307
- prompt_ids = pipeline.prepare_inputs([prompt] * num_samples)
308
-
309
- prng_seed = jax.random.PRNGKey(0)
310
- num_inference_steps = 25
311
-
312
- # shard inputs and rng
313
- params = replicate(params)
314
- prng_seed = jax.random.split(prng_seed, jax.device_count())
315
- prompt_ids = shard(prompt_ids)
316
-
317
- images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
318
- images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
319
- ```
320
-
321
- <Tip warning={true}>
322
-
323
- 다음 Flax 스케줄러는 *아직* Flax Stable Diffusion 파이프라인과 호환되지 않습니다.
324
-
325
- - `FlaxLMSDiscreteScheduler`
326
- - `FlaxDDPMScheduler`
327
-
328
- </Tip>
329
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_kakao_brain_unclip_to_diffusers.py DELETED
@@ -1,1159 +0,0 @@
1
- import argparse
2
- import tempfile
3
-
4
- import torch
5
- from accelerate import load_checkpoint_and_dispatch
6
- from transformers import CLIPTextModelWithProjection, CLIPTokenizer
7
-
8
- from diffusers import UnCLIPPipeline, UNet2DConditionModel, UNet2DModel
9
- from diffusers.models.prior_transformer import PriorTransformer
10
- from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
11
- from diffusers.schedulers.scheduling_unclip import UnCLIPScheduler
12
-
13
-
14
- """
15
- Example - From the diffusers root directory:
16
-
17
- Download weights:
18
- ```sh
19
- $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/efdf6206d8ed593961593dc029a8affa/decoder-ckpt-step%3D01000000-of-01000000.ckpt
20
- $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/4226b831ae0279020d134281f3c31590/improved-sr-ckpt-step%3D1.2M.ckpt
21
- $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/85626483eaca9f581e2a78d31ff905ca/prior-ckpt-step%3D01000000-of-01000000.ckpt
22
- $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/0b62380a75e56f073e2844ab5199153d/ViT-L-14_stats.th
23
- ```
24
-
25
- Convert the model:
26
- ```sh
27
- $ python scripts/convert_kakao_brain_unclip_to_diffusers.py \
28
- --decoder_checkpoint_path ./decoder-ckpt-step\=01000000-of-01000000.ckpt \
29
- --super_res_unet_checkpoint_path ./improved-sr-ckpt-step\=1.2M.ckpt \
30
- --prior_checkpoint_path ./prior-ckpt-step\=01000000-of-01000000.ckpt \
31
- --clip_stat_path ./ViT-L-14_stats.th \
32
- --dump_path <path where to save model>
33
- ```
34
- """
35
-
36
-
37
- # prior
38
-
39
- PRIOR_ORIGINAL_PREFIX = "model"
40
-
41
- # Uses default arguments
42
- PRIOR_CONFIG = {}
43
-
44
-
45
- def prior_model_from_original_config():
46
- model = PriorTransformer(**PRIOR_CONFIG)
47
-
48
- return model
49
-
50
-
51
- def prior_original_checkpoint_to_diffusers_checkpoint(model, checkpoint, clip_stats_checkpoint):
52
- diffusers_checkpoint = {}
53
-
54
- # <original>.time_embed.0 -> <diffusers>.time_embedding.linear_1
55
- diffusers_checkpoint.update(
56
- {
57
- "time_embedding.linear_1.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.weight"],
58
- "time_embedding.linear_1.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.bias"],
59
- }
60
- )
61
-
62
- # <original>.clip_img_proj -> <diffusers>.proj_in
63
- diffusers_checkpoint.update(
64
- {
65
- "proj_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.weight"],
66
- "proj_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.bias"],
67
- }
68
- )
69
-
70
- # <original>.text_emb_proj -> <diffusers>.embedding_proj
71
- diffusers_checkpoint.update(
72
- {
73
- "embedding_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.weight"],
74
- "embedding_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.bias"],
75
- }
76
- )
77
-
78
- # <original>.text_enc_proj -> <diffusers>.encoder_hidden_states_proj
79
- diffusers_checkpoint.update(
80
- {
81
- "encoder_hidden_states_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.weight"],
82
- "encoder_hidden_states_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.bias"],
83
- }
84
- )
85
-
86
- # <original>.positional_embedding -> <diffusers>.positional_embedding
87
- diffusers_checkpoint.update({"positional_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.positional_embedding"]})
88
-
89
- # <original>.prd_emb -> <diffusers>.prd_embedding
90
- diffusers_checkpoint.update({"prd_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.prd_emb"]})
91
-
92
- # <original>.time_embed.2 -> <diffusers>.time_embedding.linear_2
93
- diffusers_checkpoint.update(
94
- {
95
- "time_embedding.linear_2.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.weight"],
96
- "time_embedding.linear_2.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.bias"],
97
- }
98
- )
99
-
100
- # <original>.resblocks.<x> -> <diffusers>.transformer_blocks.<x>
101
- for idx in range(len(model.transformer_blocks)):
102
- diffusers_transformer_prefix = f"transformer_blocks.{idx}"
103
- original_transformer_prefix = f"{PRIOR_ORIGINAL_PREFIX}.transformer.resblocks.{idx}"
104
-
105
- # <original>.attn -> <diffusers>.attn1
106
- diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1"
107
- original_attention_prefix = f"{original_transformer_prefix}.attn"
108
- diffusers_checkpoint.update(
109
- prior_attention_to_diffusers(
110
- checkpoint,
111
- diffusers_attention_prefix=diffusers_attention_prefix,
112
- original_attention_prefix=original_attention_prefix,
113
- attention_head_dim=model.attention_head_dim,
114
- )
115
- )
116
-
117
- # <original>.mlp -> <diffusers>.ff
118
- diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff"
119
- original_ff_prefix = f"{original_transformer_prefix}.mlp"
120
- diffusers_checkpoint.update(
121
- prior_ff_to_diffusers(
122
- checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix
123
- )
124
- )
125
-
126
- # <original>.ln_1 -> <diffusers>.norm1
127
- diffusers_checkpoint.update(
128
- {
129
- f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[
130
- f"{original_transformer_prefix}.ln_1.weight"
131
- ],
132
- f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"],
133
- }
134
- )
135
-
136
- # <original>.ln_2 -> <diffusers>.norm3
137
- diffusers_checkpoint.update(
138
- {
139
- f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[
140
- f"{original_transformer_prefix}.ln_2.weight"
141
- ],
142
- f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"],
143
- }
144
- )
145
-
146
- # <original>.final_ln -> <diffusers>.norm_out
147
- diffusers_checkpoint.update(
148
- {
149
- "norm_out.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.weight"],
150
- "norm_out.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.bias"],
151
- }
152
- )
153
-
154
- # <original>.out_proj -> <diffusers>.proj_to_clip_embeddings
155
- diffusers_checkpoint.update(
156
- {
157
- "proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.weight"],
158
- "proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.bias"],
159
- }
160
- )
161
-
162
- # clip stats
163
- clip_mean, clip_std = clip_stats_checkpoint
164
- clip_mean = clip_mean[None, :]
165
- clip_std = clip_std[None, :]
166
-
167
- diffusers_checkpoint.update({"clip_mean": clip_mean, "clip_std": clip_std})
168
-
169
- return diffusers_checkpoint
170
-
171
-
172
- def prior_attention_to_diffusers(
173
- checkpoint, *, diffusers_attention_prefix, original_attention_prefix, attention_head_dim
174
- ):
175
- diffusers_checkpoint = {}
176
-
177
- # <original>.c_qkv -> <diffusers>.{to_q, to_k, to_v}
178
- [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions(
179
- weight=checkpoint[f"{original_attention_prefix}.c_qkv.weight"],
180
- bias=checkpoint[f"{original_attention_prefix}.c_qkv.bias"],
181
- split=3,
182
- chunk_size=attention_head_dim,
183
- )
184
-
185
- diffusers_checkpoint.update(
186
- {
187
- f"{diffusers_attention_prefix}.to_q.weight": q_weight,
188
- f"{diffusers_attention_prefix}.to_q.bias": q_bias,
189
- f"{diffusers_attention_prefix}.to_k.weight": k_weight,
190
- f"{diffusers_attention_prefix}.to_k.bias": k_bias,
191
- f"{diffusers_attention_prefix}.to_v.weight": v_weight,
192
- f"{diffusers_attention_prefix}.to_v.bias": v_bias,
193
- }
194
- )
195
-
196
- # <original>.c_proj -> <diffusers>.to_out.0
197
- diffusers_checkpoint.update(
198
- {
199
- f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{original_attention_prefix}.c_proj.weight"],
200
- f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{original_attention_prefix}.c_proj.bias"],
201
- }
202
- )
203
-
204
- return diffusers_checkpoint
205
-
206
-
207
- def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix):
208
- diffusers_checkpoint = {
209
- # <original>.c_fc -> <diffusers>.net.0.proj
210
- f"{diffusers_ff_prefix}.net.{0}.proj.weight": checkpoint[f"{original_ff_prefix}.c_fc.weight"],
211
- f"{diffusers_ff_prefix}.net.{0}.proj.bias": checkpoint[f"{original_ff_prefix}.c_fc.bias"],
212
- # <original>.c_proj -> <diffusers>.net.2
213
- f"{diffusers_ff_prefix}.net.{2}.weight": checkpoint[f"{original_ff_prefix}.c_proj.weight"],
214
- f"{diffusers_ff_prefix}.net.{2}.bias": checkpoint[f"{original_ff_prefix}.c_proj.bias"],
215
- }
216
-
217
- return diffusers_checkpoint
218
-
219
-
220
- # done prior
221
-
222
-
223
- # decoder
224
-
225
- DECODER_ORIGINAL_PREFIX = "model"
226
-
227
- # We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can
228
- # update then.
229
- DECODER_CONFIG = {
230
- "sample_size": 64,
231
- "layers_per_block": 3,
232
- "down_block_types": (
233
- "ResnetDownsampleBlock2D",
234
- "SimpleCrossAttnDownBlock2D",
235
- "SimpleCrossAttnDownBlock2D",
236
- "SimpleCrossAttnDownBlock2D",
237
- ),
238
- "up_block_types": (
239
- "SimpleCrossAttnUpBlock2D",
240
- "SimpleCrossAttnUpBlock2D",
241
- "SimpleCrossAttnUpBlock2D",
242
- "ResnetUpsampleBlock2D",
243
- ),
244
- "mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
245
- "block_out_channels": (320, 640, 960, 1280),
246
- "in_channels": 3,
247
- "out_channels": 6,
248
- "cross_attention_dim": 1536,
249
- "class_embed_type": "identity",
250
- "attention_head_dim": 64,
251
- "resnet_time_scale_shift": "scale_shift",
252
- }
253
-
254
-
255
- def decoder_model_from_original_config():
256
- model = UNet2DConditionModel(**DECODER_CONFIG)
257
-
258
- return model
259
-
260
-
261
- def decoder_original_checkpoint_to_diffusers_checkpoint(model, checkpoint):
262
- diffusers_checkpoint = {}
263
-
264
- original_unet_prefix = DECODER_ORIGINAL_PREFIX
265
- num_head_channels = DECODER_CONFIG["attention_head_dim"]
266
-
267
- diffusers_checkpoint.update(unet_time_embeddings(checkpoint, original_unet_prefix))
268
- diffusers_checkpoint.update(unet_conv_in(checkpoint, original_unet_prefix))
269
-
270
- # <original>.input_blocks -> <diffusers>.down_blocks
271
-
272
- original_down_block_idx = 1
273
-
274
- for diffusers_down_block_idx in range(len(model.down_blocks)):
275
- checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint(
276
- model,
277
- checkpoint,
278
- diffusers_down_block_idx=diffusers_down_block_idx,
279
- original_down_block_idx=original_down_block_idx,
280
- original_unet_prefix=original_unet_prefix,
281
- num_head_channels=num_head_channels,
282
- )
283
-
284
- original_down_block_idx += num_original_down_blocks
285
-
286
- diffusers_checkpoint.update(checkpoint_update)
287
-
288
- # done <original>.input_blocks -> <diffusers>.down_blocks
289
-
290
- diffusers_checkpoint.update(
291
- unet_midblock_to_diffusers_checkpoint(
292
- model,
293
- checkpoint,
294
- original_unet_prefix=original_unet_prefix,
295
- num_head_channels=num_head_channels,
296
- )
297
- )
298
-
299
- # <original>.output_blocks -> <diffusers>.up_blocks
300
-
301
- original_up_block_idx = 0
302
-
303
- for diffusers_up_block_idx in range(len(model.up_blocks)):
304
- checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint(
305
- model,
306
- checkpoint,
307
- diffusers_up_block_idx=diffusers_up_block_idx,
308
- original_up_block_idx=original_up_block_idx,
309
- original_unet_prefix=original_unet_prefix,
310
- num_head_channels=num_head_channels,
311
- )
312
-
313
- original_up_block_idx += num_original_up_blocks
314
-
315
- diffusers_checkpoint.update(checkpoint_update)
316
-
317
- # done <original>.output_blocks -> <diffusers>.up_blocks
318
-
319
- diffusers_checkpoint.update(unet_conv_norm_out(checkpoint, original_unet_prefix))
320
- diffusers_checkpoint.update(unet_conv_out(checkpoint, original_unet_prefix))
321
-
322
- return diffusers_checkpoint
323
-
324
-
325
- # done decoder
326
-
327
- # text proj
328
-
329
-
330
- def text_proj_from_original_config():
331
- # From the conditional unet constructor where the dimension of the projected time embeddings is
332
- # constructed
333
- time_embed_dim = DECODER_CONFIG["block_out_channels"][0] * 4
334
-
335
- cross_attention_dim = DECODER_CONFIG["cross_attention_dim"]
336
-
337
- model = UnCLIPTextProjModel(time_embed_dim=time_embed_dim, cross_attention_dim=cross_attention_dim)
338
-
339
- return model
340
-
341
-
342
- # Note that the input checkpoint is the original decoder checkpoint
343
- def text_proj_original_checkpoint_to_diffusers_checkpoint(checkpoint):
344
- diffusers_checkpoint = {
345
- # <original>.text_seq_proj.0 -> <diffusers>.encoder_hidden_states_proj
346
- "encoder_hidden_states_proj.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.0.weight"],
347
- "encoder_hidden_states_proj.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.0.bias"],
348
- # <original>.text_seq_proj.1 -> <diffusers>.text_encoder_hidden_states_norm
349
- "text_encoder_hidden_states_norm.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.1.weight"],
350
- "text_encoder_hidden_states_norm.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.1.bias"],
351
- # <original>.clip_tok_proj -> <diffusers>.clip_extra_context_tokens_proj
352
- "clip_extra_context_tokens_proj.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.clip_tok_proj.weight"],
353
- "clip_extra_context_tokens_proj.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.clip_tok_proj.bias"],
354
- # <original>.text_feat_proj -> <diffusers>.embedding_proj
355
- "embedding_proj.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_feat_proj.weight"],
356
- "embedding_proj.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_feat_proj.bias"],
357
- # <original>.cf_param -> <diffusers>.learned_classifier_free_guidance_embeddings
358
- "learned_classifier_free_guidance_embeddings": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.cf_param"],
359
- # <original>.clip_emb -> <diffusers>.clip_image_embeddings_project_to_time_embeddings
360
- "clip_image_embeddings_project_to_time_embeddings.weight": checkpoint[
361
- f"{DECODER_ORIGINAL_PREFIX}.clip_emb.weight"
362
- ],
363
- "clip_image_embeddings_project_to_time_embeddings.bias": checkpoint[
364
- f"{DECODER_ORIGINAL_PREFIX}.clip_emb.bias"
365
- ],
366
- }
367
-
368
- return diffusers_checkpoint
369
-
370
-
371
- # done text proj
372
-
373
- # super res unet first steps
374
-
375
- SUPER_RES_UNET_FIRST_STEPS_PREFIX = "model_first_steps"
376
-
377
- SUPER_RES_UNET_FIRST_STEPS_CONFIG = {
378
- "sample_size": 256,
379
- "layers_per_block": 3,
380
- "down_block_types": (
381
- "ResnetDownsampleBlock2D",
382
- "ResnetDownsampleBlock2D",
383
- "ResnetDownsampleBlock2D",
384
- "ResnetDownsampleBlock2D",
385
- ),
386
- "up_block_types": (
387
- "ResnetUpsampleBlock2D",
388
- "ResnetUpsampleBlock2D",
389
- "ResnetUpsampleBlock2D",
390
- "ResnetUpsampleBlock2D",
391
- ),
392
- "block_out_channels": (320, 640, 960, 1280),
393
- "in_channels": 6,
394
- "out_channels": 3,
395
- "add_attention": False,
396
- }
397
-
398
-
399
- def super_res_unet_first_steps_model_from_original_config():
400
- model = UNet2DModel(**SUPER_RES_UNET_FIRST_STEPS_CONFIG)
401
-
402
- return model
403
-
404
-
405
- def super_res_unet_first_steps_original_checkpoint_to_diffusers_checkpoint(model, checkpoint):
406
- diffusers_checkpoint = {}
407
-
408
- original_unet_prefix = SUPER_RES_UNET_FIRST_STEPS_PREFIX
409
-
410
- diffusers_checkpoint.update(unet_time_embeddings(checkpoint, original_unet_prefix))
411
- diffusers_checkpoint.update(unet_conv_in(checkpoint, original_unet_prefix))
412
-
413
- # <original>.input_blocks -> <diffusers>.down_blocks
414
-
415
- original_down_block_idx = 1
416
-
417
- for diffusers_down_block_idx in range(len(model.down_blocks)):
418
- checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint(
419
- model,
420
- checkpoint,
421
- diffusers_down_block_idx=diffusers_down_block_idx,
422
- original_down_block_idx=original_down_block_idx,
423
- original_unet_prefix=original_unet_prefix,
424
- num_head_channels=None,
425
- )
426
-
427
- original_down_block_idx += num_original_down_blocks
428
-
429
- diffusers_checkpoint.update(checkpoint_update)
430
-
431
- diffusers_checkpoint.update(
432
- unet_midblock_to_diffusers_checkpoint(
433
- model,
434
- checkpoint,
435
- original_unet_prefix=original_unet_prefix,
436
- num_head_channels=None,
437
- )
438
- )
439
-
440
- # <original>.output_blocks -> <diffusers>.up_blocks
441
-
442
- original_up_block_idx = 0
443
-
444
- for diffusers_up_block_idx in range(len(model.up_blocks)):
445
- checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint(
446
- model,
447
- checkpoint,
448
- diffusers_up_block_idx=diffusers_up_block_idx,
449
- original_up_block_idx=original_up_block_idx,
450
- original_unet_prefix=original_unet_prefix,
451
- num_head_channels=None,
452
- )
453
-
454
- original_up_block_idx += num_original_up_blocks
455
-
456
- diffusers_checkpoint.update(checkpoint_update)
457
-
458
- # done <original>.output_blocks -> <diffusers>.up_blocks
459
-
460
- diffusers_checkpoint.update(unet_conv_norm_out(checkpoint, original_unet_prefix))
461
- diffusers_checkpoint.update(unet_conv_out(checkpoint, original_unet_prefix))
462
-
463
- return diffusers_checkpoint
464
-
465
-
466
- # done super res unet first steps
467
-
468
- # super res unet last step
469
-
470
- SUPER_RES_UNET_LAST_STEP_PREFIX = "model_last_step"
471
-
472
- SUPER_RES_UNET_LAST_STEP_CONFIG = {
473
- "sample_size": 256,
474
- "layers_per_block": 3,
475
- "down_block_types": (
476
- "ResnetDownsampleBlock2D",
477
- "ResnetDownsampleBlock2D",
478
- "ResnetDownsampleBlock2D",
479
- "ResnetDownsampleBlock2D",
480
- ),
481
- "up_block_types": (
482
- "ResnetUpsampleBlock2D",
483
- "ResnetUpsampleBlock2D",
484
- "ResnetUpsampleBlock2D",
485
- "ResnetUpsampleBlock2D",
486
- ),
487
- "block_out_channels": (320, 640, 960, 1280),
488
- "in_channels": 6,
489
- "out_channels": 3,
490
- "add_attention": False,
491
- }
492
-
493
-
494
- def super_res_unet_last_step_model_from_original_config():
495
- model = UNet2DModel(**SUPER_RES_UNET_LAST_STEP_CONFIG)
496
-
497
- return model
498
-
499
-
500
- def super_res_unet_last_step_original_checkpoint_to_diffusers_checkpoint(model, checkpoint):
501
- diffusers_checkpoint = {}
502
-
503
- original_unet_prefix = SUPER_RES_UNET_LAST_STEP_PREFIX
504
-
505
- diffusers_checkpoint.update(unet_time_embeddings(checkpoint, original_unet_prefix))
506
- diffusers_checkpoint.update(unet_conv_in(checkpoint, original_unet_prefix))
507
-
508
- # <original>.input_blocks -> <diffusers>.down_blocks
509
-
510
- original_down_block_idx = 1
511
-
512
- for diffusers_down_block_idx in range(len(model.down_blocks)):
513
- checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint(
514
- model,
515
- checkpoint,
516
- diffusers_down_block_idx=diffusers_down_block_idx,
517
- original_down_block_idx=original_down_block_idx,
518
- original_unet_prefix=original_unet_prefix,
519
- num_head_channels=None,
520
- )
521
-
522
- original_down_block_idx += num_original_down_blocks
523
-
524
- diffusers_checkpoint.update(checkpoint_update)
525
-
526
- diffusers_checkpoint.update(
527
- unet_midblock_to_diffusers_checkpoint(
528
- model,
529
- checkpoint,
530
- original_unet_prefix=original_unet_prefix,
531
- num_head_channels=None,
532
- )
533
- )
534
-
535
- # <original>.output_blocks -> <diffusers>.up_blocks
536
-
537
- original_up_block_idx = 0
538
-
539
- for diffusers_up_block_idx in range(len(model.up_blocks)):
540
- checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint(
541
- model,
542
- checkpoint,
543
- diffusers_up_block_idx=diffusers_up_block_idx,
544
- original_up_block_idx=original_up_block_idx,
545
- original_unet_prefix=original_unet_prefix,
546
- num_head_channels=None,
547
- )
548
-
549
- original_up_block_idx += num_original_up_blocks
550
-
551
- diffusers_checkpoint.update(checkpoint_update)
552
-
553
- # done <original>.output_blocks -> <diffusers>.up_blocks
554
-
555
- diffusers_checkpoint.update(unet_conv_norm_out(checkpoint, original_unet_prefix))
556
- diffusers_checkpoint.update(unet_conv_out(checkpoint, original_unet_prefix))
557
-
558
- return diffusers_checkpoint
559
-
560
-
561
- # done super res unet last step
562
-
563
-
564
- # unet utils
565
-
566
-
567
- # <original>.time_embed -> <diffusers>.time_embedding
568
- def unet_time_embeddings(checkpoint, original_unet_prefix):
569
- diffusers_checkpoint = {}
570
-
571
- diffusers_checkpoint.update(
572
- {
573
- "time_embedding.linear_1.weight": checkpoint[f"{original_unet_prefix}.time_embed.0.weight"],
574
- "time_embedding.linear_1.bias": checkpoint[f"{original_unet_prefix}.time_embed.0.bias"],
575
- "time_embedding.linear_2.weight": checkpoint[f"{original_unet_prefix}.time_embed.2.weight"],
576
- "time_embedding.linear_2.bias": checkpoint[f"{original_unet_prefix}.time_embed.2.bias"],
577
- }
578
- )
579
-
580
- return diffusers_checkpoint
581
-
582
-
583
- # <original>.input_blocks.0 -> <diffusers>.conv_in
584
- def unet_conv_in(checkpoint, original_unet_prefix):
585
- diffusers_checkpoint = {}
586
-
587
- diffusers_checkpoint.update(
588
- {
589
- "conv_in.weight": checkpoint[f"{original_unet_prefix}.input_blocks.0.0.weight"],
590
- "conv_in.bias": checkpoint[f"{original_unet_prefix}.input_blocks.0.0.bias"],
591
- }
592
- )
593
-
594
- return diffusers_checkpoint
595
-
596
-
597
- # <original>.out.0 -> <diffusers>.conv_norm_out
598
- def unet_conv_norm_out(checkpoint, original_unet_prefix):
599
- diffusers_checkpoint = {}
600
-
601
- diffusers_checkpoint.update(
602
- {
603
- "conv_norm_out.weight": checkpoint[f"{original_unet_prefix}.out.0.weight"],
604
- "conv_norm_out.bias": checkpoint[f"{original_unet_prefix}.out.0.bias"],
605
- }
606
- )
607
-
608
- return diffusers_checkpoint
609
-
610
-
611
- # <original>.out.2 -> <diffusers>.conv_out
612
- def unet_conv_out(checkpoint, original_unet_prefix):
613
- diffusers_checkpoint = {}
614
-
615
- diffusers_checkpoint.update(
616
- {
617
- "conv_out.weight": checkpoint[f"{original_unet_prefix}.out.2.weight"],
618
- "conv_out.bias": checkpoint[f"{original_unet_prefix}.out.2.bias"],
619
- }
620
- )
621
-
622
- return diffusers_checkpoint
623
-
624
-
625
- # <original>.input_blocks -> <diffusers>.down_blocks
626
- def unet_downblock_to_diffusers_checkpoint(
627
- model, checkpoint, *, diffusers_down_block_idx, original_down_block_idx, original_unet_prefix, num_head_channels
628
- ):
629
- diffusers_checkpoint = {}
630
-
631
- diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.resnets"
632
- original_down_block_prefix = f"{original_unet_prefix}.input_blocks"
633
-
634
- down_block = model.down_blocks[diffusers_down_block_idx]
635
-
636
- num_resnets = len(down_block.resnets)
637
-
638
- if down_block.downsamplers is None:
639
- downsampler = False
640
- else:
641
- assert len(down_block.downsamplers) == 1
642
- downsampler = True
643
- # The downsample block is also a resnet
644
- num_resnets += 1
645
-
646
- for resnet_idx_inc in range(num_resnets):
647
- full_resnet_prefix = f"{original_down_block_prefix}.{original_down_block_idx + resnet_idx_inc}.0"
648
-
649
- if downsampler and resnet_idx_inc == num_resnets - 1:
650
- # this is a downsample block
651
- full_diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.downsamplers.0"
652
- else:
653
- # this is a regular resnet block
654
- full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}"
655
-
656
- diffusers_checkpoint.update(
657
- resnet_to_diffusers_checkpoint(
658
- checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix
659
- )
660
- )
661
-
662
- if hasattr(down_block, "attentions"):
663
- num_attentions = len(down_block.attentions)
664
- diffusers_attention_prefix = f"down_blocks.{diffusers_down_block_idx}.attentions"
665
-
666
- for attention_idx_inc in range(num_attentions):
667
- full_attention_prefix = f"{original_down_block_prefix}.{original_down_block_idx + attention_idx_inc}.1"
668
- full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}"
669
-
670
- diffusers_checkpoint.update(
671
- attention_to_diffusers_checkpoint(
672
- checkpoint,
673
- attention_prefix=full_attention_prefix,
674
- diffusers_attention_prefix=full_diffusers_attention_prefix,
675
- num_head_channels=num_head_channels,
676
- )
677
- )
678
-
679
- num_original_down_blocks = num_resnets
680
-
681
- return diffusers_checkpoint, num_original_down_blocks
682
-
683
-
684
- # <original>.middle_block -> <diffusers>.mid_block
685
- def unet_midblock_to_diffusers_checkpoint(model, checkpoint, *, original_unet_prefix, num_head_channels):
686
- diffusers_checkpoint = {}
687
-
688
- # block 0
689
-
690
- original_block_idx = 0
691
-
692
- diffusers_checkpoint.update(
693
- resnet_to_diffusers_checkpoint(
694
- checkpoint,
695
- diffusers_resnet_prefix="mid_block.resnets.0",
696
- resnet_prefix=f"{original_unet_prefix}.middle_block.{original_block_idx}",
697
- )
698
- )
699
-
700
- original_block_idx += 1
701
-
702
- # optional block 1
703
-
704
- if hasattr(model.mid_block, "attentions") and model.mid_block.attentions[0] is not None:
705
- diffusers_checkpoint.update(
706
- attention_to_diffusers_checkpoint(
707
- checkpoint,
708
- diffusers_attention_prefix="mid_block.attentions.0",
709
- attention_prefix=f"{original_unet_prefix}.middle_block.{original_block_idx}",
710
- num_head_channels=num_head_channels,
711
- )
712
- )
713
- original_block_idx += 1
714
-
715
- # block 1 or block 2
716
-
717
- diffusers_checkpoint.update(
718
- resnet_to_diffusers_checkpoint(
719
- checkpoint,
720
- diffusers_resnet_prefix="mid_block.resnets.1",
721
- resnet_prefix=f"{original_unet_prefix}.middle_block.{original_block_idx}",
722
- )
723
- )
724
-
725
- return diffusers_checkpoint
726
-
727
-
728
- # <original>.output_blocks -> <diffusers>.up_blocks
729
- def unet_upblock_to_diffusers_checkpoint(
730
- model, checkpoint, *, diffusers_up_block_idx, original_up_block_idx, original_unet_prefix, num_head_channels
731
- ):
732
- diffusers_checkpoint = {}
733
-
734
- diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.resnets"
735
- original_up_block_prefix = f"{original_unet_prefix}.output_blocks"
736
-
737
- up_block = model.up_blocks[diffusers_up_block_idx]
738
-
739
- num_resnets = len(up_block.resnets)
740
-
741
- if up_block.upsamplers is None:
742
- upsampler = False
743
- else:
744
- assert len(up_block.upsamplers) == 1
745
- upsampler = True
746
- # The upsample block is also a resnet
747
- num_resnets += 1
748
-
749
- has_attentions = hasattr(up_block, "attentions")
750
-
751
- for resnet_idx_inc in range(num_resnets):
752
- if upsampler and resnet_idx_inc == num_resnets - 1:
753
- # this is an upsample block
754
- if has_attentions:
755
- # There is a middle attention block that we skip
756
- original_resnet_block_idx = 2
757
- else:
758
- original_resnet_block_idx = 1
759
-
760
- # we add the `minus 1` because the last two resnets are stuck together in the same output block
761
- full_resnet_prefix = (
762
- f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc - 1}.{original_resnet_block_idx}"
763
- )
764
-
765
- full_diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.upsamplers.0"
766
- else:
767
- # this is a regular resnet block
768
- full_resnet_prefix = f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc}.0"
769
- full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}"
770
-
771
- diffusers_checkpoint.update(
772
- resnet_to_diffusers_checkpoint(
773
- checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix
774
- )
775
- )
776
-
777
- if has_attentions:
778
- num_attentions = len(up_block.attentions)
779
- diffusers_attention_prefix = f"up_blocks.{diffusers_up_block_idx}.attentions"
780
-
781
- for attention_idx_inc in range(num_attentions):
782
- full_attention_prefix = f"{original_up_block_prefix}.{original_up_block_idx + attention_idx_inc}.1"
783
- full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}"
784
-
785
- diffusers_checkpoint.update(
786
- attention_to_diffusers_checkpoint(
787
- checkpoint,
788
- attention_prefix=full_attention_prefix,
789
- diffusers_attention_prefix=full_diffusers_attention_prefix,
790
- num_head_channels=num_head_channels,
791
- )
792
- )
793
-
794
- num_original_down_blocks = num_resnets - 1 if upsampler else num_resnets
795
-
796
- return diffusers_checkpoint, num_original_down_blocks
797
-
798
-
799
- def resnet_to_diffusers_checkpoint(checkpoint, *, diffusers_resnet_prefix, resnet_prefix):
800
- diffusers_checkpoint = {
801
- f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.in_layers.0.weight"],
802
- f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.in_layers.0.bias"],
803
- f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.in_layers.2.weight"],
804
- f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.in_layers.2.bias"],
805
- f"{diffusers_resnet_prefix}.time_emb_proj.weight": checkpoint[f"{resnet_prefix}.emb_layers.1.weight"],
806
- f"{diffusers_resnet_prefix}.time_emb_proj.bias": checkpoint[f"{resnet_prefix}.emb_layers.1.bias"],
807
- f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.out_layers.0.weight"],
808
- f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.out_layers.0.bias"],
809
- f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.out_layers.3.weight"],
810
- f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.out_layers.3.bias"],
811
- }
812
-
813
- skip_connection_prefix = f"{resnet_prefix}.skip_connection"
814
-
815
- if f"{skip_connection_prefix}.weight" in checkpoint:
816
- diffusers_checkpoint.update(
817
- {
818
- f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{skip_connection_prefix}.weight"],
819
- f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{skip_connection_prefix}.bias"],
820
- }
821
- )
822
-
823
- return diffusers_checkpoint
824
-
825
-
826
- def attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix, num_head_channels):
827
- diffusers_checkpoint = {}
828
-
829
- # <original>.norm -> <diffusers>.group_norm
830
- diffusers_checkpoint.update(
831
- {
832
- f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"],
833
- f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"],
834
- }
835
- )
836
-
837
- # <original>.qkv -> <diffusers>.{query, key, value}
838
- [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions(
839
- weight=checkpoint[f"{attention_prefix}.qkv.weight"][:, :, 0],
840
- bias=checkpoint[f"{attention_prefix}.qkv.bias"],
841
- split=3,
842
- chunk_size=num_head_channels,
843
- )
844
-
845
- diffusers_checkpoint.update(
846
- {
847
- f"{diffusers_attention_prefix}.to_q.weight": q_weight,
848
- f"{diffusers_attention_prefix}.to_q.bias": q_bias,
849
- f"{diffusers_attention_prefix}.to_k.weight": k_weight,
850
- f"{diffusers_attention_prefix}.to_k.bias": k_bias,
851
- f"{diffusers_attention_prefix}.to_v.weight": v_weight,
852
- f"{diffusers_attention_prefix}.to_v.bias": v_bias,
853
- }
854
- )
855
-
856
- # <original>.encoder_kv -> <diffusers>.{context_key, context_value}
857
- [encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions(
858
- weight=checkpoint[f"{attention_prefix}.encoder_kv.weight"][:, :, 0],
859
- bias=checkpoint[f"{attention_prefix}.encoder_kv.bias"],
860
- split=2,
861
- chunk_size=num_head_channels,
862
- )
863
-
864
- diffusers_checkpoint.update(
865
- {
866
- f"{diffusers_attention_prefix}.add_k_proj.weight": encoder_k_weight,
867
- f"{diffusers_attention_prefix}.add_k_proj.bias": encoder_k_bias,
868
- f"{diffusers_attention_prefix}.add_v_proj.weight": encoder_v_weight,
869
- f"{diffusers_attention_prefix}.add_v_proj.bias": encoder_v_bias,
870
- }
871
- )
872
-
873
- # <original>.proj_out (1d conv) -> <diffusers>.proj_attn (linear)
874
- diffusers_checkpoint.update(
875
- {
876
- f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][
877
- :, :, 0
878
- ],
879
- f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"],
880
- }
881
- )
882
-
883
- return diffusers_checkpoint
884
-
885
-
886
- # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?)
887
- def split_attentions(*, weight, bias, split, chunk_size):
888
- weights = [None] * split
889
- biases = [None] * split
890
-
891
- weights_biases_idx = 0
892
-
893
- for starting_row_index in range(0, weight.shape[0], chunk_size):
894
- row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size)
895
-
896
- weight_rows = weight[row_indices, :]
897
- bias_rows = bias[row_indices]
898
-
899
- if weights[weights_biases_idx] is None:
900
- assert weights[weights_biases_idx] is None
901
- weights[weights_biases_idx] = weight_rows
902
- biases[weights_biases_idx] = bias_rows
903
- else:
904
- assert weights[weights_biases_idx] is not None
905
- weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows])
906
- biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows])
907
-
908
- weights_biases_idx = (weights_biases_idx + 1) % split
909
-
910
- return weights, biases
911
-
912
-
913
- # done unet utils
914
-
915
-
916
- # Driver functions
917
-
918
-
919
- def text_encoder():
920
- print("loading CLIP text encoder")
921
-
922
- clip_name = "openai/clip-vit-large-patch14"
923
-
924
- # sets pad_value to 0
925
- pad_token = "!"
926
-
927
- tokenizer_model = CLIPTokenizer.from_pretrained(clip_name, pad_token=pad_token, device_map="auto")
928
-
929
- assert tokenizer_model.convert_tokens_to_ids(pad_token) == 0
930
-
931
- text_encoder_model = CLIPTextModelWithProjection.from_pretrained(
932
- clip_name,
933
- # `CLIPTextModel` does not support device_map="auto"
934
- # device_map="auto"
935
- )
936
-
937
- print("done loading CLIP text encoder")
938
-
939
- return text_encoder_model, tokenizer_model
940
-
941
-
942
- def prior(*, args, checkpoint_map_location):
943
- print("loading prior")
944
-
945
- prior_checkpoint = torch.load(args.prior_checkpoint_path, map_location=checkpoint_map_location)
946
- prior_checkpoint = prior_checkpoint["state_dict"]
947
-
948
- clip_stats_checkpoint = torch.load(args.clip_stat_path, map_location=checkpoint_map_location)
949
-
950
- prior_model = prior_model_from_original_config()
951
-
952
- prior_diffusers_checkpoint = prior_original_checkpoint_to_diffusers_checkpoint(
953
- prior_model, prior_checkpoint, clip_stats_checkpoint
954
- )
955
-
956
- del prior_checkpoint
957
- del clip_stats_checkpoint
958
-
959
- load_checkpoint_to_model(prior_diffusers_checkpoint, prior_model, strict=True)
960
-
961
- print("done loading prior")
962
-
963
- return prior_model
964
-
965
-
966
- def decoder(*, args, checkpoint_map_location):
967
- print("loading decoder")
968
-
969
- decoder_checkpoint = torch.load(args.decoder_checkpoint_path, map_location=checkpoint_map_location)
970
- decoder_checkpoint = decoder_checkpoint["state_dict"]
971
-
972
- decoder_model = decoder_model_from_original_config()
973
-
974
- decoder_diffusers_checkpoint = decoder_original_checkpoint_to_diffusers_checkpoint(
975
- decoder_model, decoder_checkpoint
976
- )
977
-
978
- # text proj interlude
979
-
980
- # The original decoder implementation includes a set of parameters that are used
981
- # for creating the `encoder_hidden_states` which are what the U-net is conditioned
982
- # on. The diffusers conditional unet directly takes the encoder_hidden_states. We pull
983
- # the parameters into the UnCLIPTextProjModel class
984
- text_proj_model = text_proj_from_original_config()
985
-
986
- text_proj_checkpoint = text_proj_original_checkpoint_to_diffusers_checkpoint(decoder_checkpoint)
987
-
988
- load_checkpoint_to_model(text_proj_checkpoint, text_proj_model, strict=True)
989
-
990
- # done text proj interlude
991
-
992
- del decoder_checkpoint
993
-
994
- load_checkpoint_to_model(decoder_diffusers_checkpoint, decoder_model, strict=True)
995
-
996
- print("done loading decoder")
997
-
998
- return decoder_model, text_proj_model
999
-
1000
-
1001
- def super_res_unet(*, args, checkpoint_map_location):
1002
- print("loading super resolution unet")
1003
-
1004
- super_res_checkpoint = torch.load(args.super_res_unet_checkpoint_path, map_location=checkpoint_map_location)
1005
- super_res_checkpoint = super_res_checkpoint["state_dict"]
1006
-
1007
- # model_first_steps
1008
-
1009
- super_res_first_model = super_res_unet_first_steps_model_from_original_config()
1010
-
1011
- super_res_first_steps_checkpoint = super_res_unet_first_steps_original_checkpoint_to_diffusers_checkpoint(
1012
- super_res_first_model, super_res_checkpoint
1013
- )
1014
-
1015
- # model_last_step
1016
- super_res_last_model = super_res_unet_last_step_model_from_original_config()
1017
-
1018
- super_res_last_step_checkpoint = super_res_unet_last_step_original_checkpoint_to_diffusers_checkpoint(
1019
- super_res_last_model, super_res_checkpoint
1020
- )
1021
-
1022
- del super_res_checkpoint
1023
-
1024
- load_checkpoint_to_model(super_res_first_steps_checkpoint, super_res_first_model, strict=True)
1025
-
1026
- load_checkpoint_to_model(super_res_last_step_checkpoint, super_res_last_model, strict=True)
1027
-
1028
- print("done loading super resolution unet")
1029
-
1030
- return super_res_first_model, super_res_last_model
1031
-
1032
-
1033
- def load_checkpoint_to_model(checkpoint, model, strict=False):
1034
- with tempfile.NamedTemporaryFile() as file:
1035
- torch.save(checkpoint, file.name)
1036
- del checkpoint
1037
- if strict:
1038
- model.load_state_dict(torch.load(file.name), strict=True)
1039
- else:
1040
- load_checkpoint_and_dispatch(model, file.name, device_map="auto")
1041
-
1042
-
1043
- if __name__ == "__main__":
1044
- parser = argparse.ArgumentParser()
1045
-
1046
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
1047
-
1048
- parser.add_argument(
1049
- "--prior_checkpoint_path",
1050
- default=None,
1051
- type=str,
1052
- required=True,
1053
- help="Path to the prior checkpoint to convert.",
1054
- )
1055
-
1056
- parser.add_argument(
1057
- "--decoder_checkpoint_path",
1058
- default=None,
1059
- type=str,
1060
- required=True,
1061
- help="Path to the decoder checkpoint to convert.",
1062
- )
1063
-
1064
- parser.add_argument(
1065
- "--super_res_unet_checkpoint_path",
1066
- default=None,
1067
- type=str,
1068
- required=True,
1069
- help="Path to the super resolution checkpoint to convert.",
1070
- )
1071
-
1072
- parser.add_argument(
1073
- "--clip_stat_path", default=None, type=str, required=True, help="Path to the clip stats checkpoint to convert."
1074
- )
1075
-
1076
- parser.add_argument(
1077
- "--checkpoint_load_device",
1078
- default="cpu",
1079
- type=str,
1080
- required=False,
1081
- help="The device passed to `map_location` when loading checkpoints.",
1082
- )
1083
-
1084
- parser.add_argument(
1085
- "--debug",
1086
- default=None,
1087
- type=str,
1088
- required=False,
1089
- help="Only run a specific stage of the convert script. Used for debugging",
1090
- )
1091
-
1092
- args = parser.parse_args()
1093
-
1094
- print(f"loading checkpoints to {args.checkpoint_load_device}")
1095
-
1096
- checkpoint_map_location = torch.device(args.checkpoint_load_device)
1097
-
1098
- if args.debug is not None:
1099
- print(f"debug: only executing {args.debug}")
1100
-
1101
- if args.debug is None:
1102
- text_encoder_model, tokenizer_model = text_encoder()
1103
-
1104
- prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location)
1105
-
1106
- decoder_model, text_proj_model = decoder(args=args, checkpoint_map_location=checkpoint_map_location)
1107
-
1108
- super_res_first_model, super_res_last_model = super_res_unet(
1109
- args=args, checkpoint_map_location=checkpoint_map_location
1110
- )
1111
-
1112
- prior_scheduler = UnCLIPScheduler(
1113
- variance_type="fixed_small_log",
1114
- prediction_type="sample",
1115
- num_train_timesteps=1000,
1116
- clip_sample_range=5.0,
1117
- )
1118
-
1119
- decoder_scheduler = UnCLIPScheduler(
1120
- variance_type="learned_range",
1121
- prediction_type="epsilon",
1122
- num_train_timesteps=1000,
1123
- )
1124
-
1125
- super_res_scheduler = UnCLIPScheduler(
1126
- variance_type="fixed_small_log",
1127
- prediction_type="epsilon",
1128
- num_train_timesteps=1000,
1129
- )
1130
-
1131
- print(f"saving Kakao Brain unCLIP to {args.dump_path}")
1132
-
1133
- pipe = UnCLIPPipeline(
1134
- prior=prior_model,
1135
- decoder=decoder_model,
1136
- text_proj=text_proj_model,
1137
- tokenizer=tokenizer_model,
1138
- text_encoder=text_encoder_model,
1139
- super_res_first=super_res_first_model,
1140
- super_res_last=super_res_last_model,
1141
- prior_scheduler=prior_scheduler,
1142
- decoder_scheduler=decoder_scheduler,
1143
- super_res_scheduler=super_res_scheduler,
1144
- )
1145
- pipe.save_pretrained(args.dump_path)
1146
-
1147
- print("done writing Kakao Brain unCLIP")
1148
- elif args.debug == "text_encoder":
1149
- text_encoder_model, tokenizer_model = text_encoder()
1150
- elif args.debug == "prior":
1151
- prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location)
1152
- elif args.debug == "decoder":
1153
- decoder_model, text_proj_model = decoder(args=args, checkpoint_map_location=checkpoint_map_location)
1154
- elif args.debug == "super_res_unet":
1155
- super_res_first_model, super_res_last_model = super_res_unet(
1156
- args=args, checkpoint_map_location=checkpoint_map_location
1157
- )
1158
- else:
1159
- raise ValueError(f"unknown debug value : {args.debug}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py DELETED
@@ -1,108 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import List, Optional, Tuple, Union
16
-
17
- import torch
18
-
19
- from ...models import UNet2DModel
20
- from ...schedulers import ScoreSdeVeScheduler
21
- from ...utils import randn_tensor
22
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
23
-
24
-
25
- class ScoreSdeVePipeline(DiffusionPipeline):
26
- r"""
27
- Pipeline for unconditional image generation.
28
-
29
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
30
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
31
-
32
- Parameters:
33
- unet ([`UNet2DModel`]):
34
- A `UNet2DModel` to denoise the encoded image.
35
- scheduler ([`ScoreSdeVeScheduler`]):
36
- A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image.
37
- """
38
- unet: UNet2DModel
39
- scheduler: ScoreSdeVeScheduler
40
-
41
- def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler):
42
- super().__init__()
43
- self.register_modules(unet=unet, scheduler=scheduler)
44
-
45
- @torch.no_grad()
46
- def __call__(
47
- self,
48
- batch_size: int = 1,
49
- num_inference_steps: int = 2000,
50
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
51
- output_type: Optional[str] = "pil",
52
- return_dict: bool = True,
53
- **kwargs,
54
- ) -> Union[ImagePipelineOutput, Tuple]:
55
- r"""
56
- The call function to the pipeline for generation.
57
-
58
- Args:
59
- batch_size (`int`, *optional*, defaults to 1):
60
- The number of images to generate.
61
- generator (`torch.Generator`, `optional`):
62
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
63
- generation deterministic.
64
- output_type (`str`, `optional`, defaults to `"pil"`):
65
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
66
- return_dict (`bool`, *optional*, defaults to `True`):
67
- Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
68
-
69
- Returns:
70
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
71
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
72
- returned where the first element is a list with the generated images.
73
- """
74
-
75
- img_size = self.unet.config.sample_size
76
- shape = (batch_size, 3, img_size, img_size)
77
-
78
- model = self.unet
79
-
80
- sample = randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma
81
- sample = sample.to(self.device)
82
-
83
- self.scheduler.set_timesteps(num_inference_steps)
84
- self.scheduler.set_sigmas(num_inference_steps)
85
-
86
- for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
87
- sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device)
88
-
89
- # correction step
90
- for _ in range(self.scheduler.config.correct_steps):
91
- model_output = self.unet(sample, sigma_t).sample
92
- sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample
93
-
94
- # prediction step
95
- model_output = model(sample, sigma_t).sample
96
- output = self.scheduler.step_pred(model_output, t, sample, generator=generator)
97
-
98
- sample, sample_mean = output.prev_sample, output.prev_sample_mean
99
-
100
- sample = sample_mean.clamp(0, 1)
101
- sample = sample.cpu().permute(0, 2, 3, 1).numpy()
102
- if output_type == "pil":
103
- sample = self.numpy_to_pil(sample)
104
-
105
- if not return_dict:
106
- return (sample,)
107
-
108
- return ImagePipelineOutput(images=sample)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion.py DELETED
@@ -1,254 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- import numpy as np
20
- import torch
21
- from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
22
-
23
- from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNet2DConditionModel
24
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
25
- RobertaSeriesConfig,
26
- RobertaSeriesModelWithTransformation,
27
- )
28
- from diffusers.utils import slow, torch_device
29
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
30
-
31
- from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
32
- from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
33
-
34
-
35
- enable_full_determinism()
36
-
37
-
38
- class AltDiffusionPipelineFastTests(
39
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
40
- ):
41
- pipeline_class = AltDiffusionPipeline
42
- params = TEXT_TO_IMAGE_PARAMS
43
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
44
- image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
45
- image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
46
-
47
- def get_dummy_components(self):
48
- torch.manual_seed(0)
49
- unet = UNet2DConditionModel(
50
- block_out_channels=(32, 64),
51
- layers_per_block=2,
52
- sample_size=32,
53
- in_channels=4,
54
- out_channels=4,
55
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
56
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
57
- cross_attention_dim=32,
58
- )
59
- scheduler = DDIMScheduler(
60
- beta_start=0.00085,
61
- beta_end=0.012,
62
- beta_schedule="scaled_linear",
63
- clip_sample=False,
64
- set_alpha_to_one=False,
65
- )
66
- torch.manual_seed(0)
67
- vae = AutoencoderKL(
68
- block_out_channels=[32, 64],
69
- in_channels=3,
70
- out_channels=3,
71
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
72
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
73
- latent_channels=4,
74
- )
75
-
76
- # TODO: address the non-deterministic text encoder (fails for save-load tests)
77
- # torch.manual_seed(0)
78
- # text_encoder_config = RobertaSeriesConfig(
79
- # hidden_size=32,
80
- # project_dim=32,
81
- # intermediate_size=37,
82
- # layer_norm_eps=1e-05,
83
- # num_attention_heads=4,
84
- # num_hidden_layers=5,
85
- # vocab_size=5002,
86
- # )
87
- # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
88
-
89
- torch.manual_seed(0)
90
- text_encoder_config = CLIPTextConfig(
91
- bos_token_id=0,
92
- eos_token_id=2,
93
- hidden_size=32,
94
- projection_dim=32,
95
- intermediate_size=37,
96
- layer_norm_eps=1e-05,
97
- num_attention_heads=4,
98
- num_hidden_layers=5,
99
- pad_token_id=1,
100
- vocab_size=5002,
101
- )
102
- text_encoder = CLIPTextModel(text_encoder_config)
103
-
104
- tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
105
- tokenizer.model_max_length = 77
106
-
107
- components = {
108
- "unet": unet,
109
- "scheduler": scheduler,
110
- "vae": vae,
111
- "text_encoder": text_encoder,
112
- "tokenizer": tokenizer,
113
- "safety_checker": None,
114
- "feature_extractor": None,
115
- }
116
- return components
117
-
118
- def get_dummy_inputs(self, device, seed=0):
119
- if str(device).startswith("mps"):
120
- generator = torch.manual_seed(seed)
121
- else:
122
- generator = torch.Generator(device=device).manual_seed(seed)
123
- inputs = {
124
- "prompt": "A painting of a squirrel eating a burger",
125
- "generator": generator,
126
- "num_inference_steps": 2,
127
- "guidance_scale": 6.0,
128
- "output_type": "numpy",
129
- }
130
- return inputs
131
-
132
- def test_attention_slicing_forward_pass(self):
133
- super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
134
-
135
- def test_inference_batch_single_identical(self):
136
- super().test_inference_batch_single_identical(expected_max_diff=3e-3)
137
-
138
- def test_alt_diffusion_ddim(self):
139
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
140
-
141
- components = self.get_dummy_components()
142
- torch.manual_seed(0)
143
- text_encoder_config = RobertaSeriesConfig(
144
- hidden_size=32,
145
- project_dim=32,
146
- intermediate_size=37,
147
- layer_norm_eps=1e-05,
148
- num_attention_heads=4,
149
- num_hidden_layers=5,
150
- vocab_size=5002,
151
- )
152
- # TODO: remove after fixing the non-deterministic text encoder
153
- text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
154
- components["text_encoder"] = text_encoder
155
-
156
- alt_pipe = AltDiffusionPipeline(**components)
157
- alt_pipe = alt_pipe.to(device)
158
- alt_pipe.set_progress_bar_config(disable=None)
159
-
160
- inputs = self.get_dummy_inputs(device)
161
- inputs["prompt"] = "A photo of an astronaut"
162
- output = alt_pipe(**inputs)
163
- image = output.images
164
- image_slice = image[0, -3:, -3:, -1]
165
-
166
- assert image.shape == (1, 64, 64, 3)
167
- expected_slice = np.array(
168
- [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093]
169
- )
170
-
171
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
172
-
173
- def test_alt_diffusion_pndm(self):
174
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
175
-
176
- components = self.get_dummy_components()
177
- components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
178
- torch.manual_seed(0)
179
- text_encoder_config = RobertaSeriesConfig(
180
- hidden_size=32,
181
- project_dim=32,
182
- intermediate_size=37,
183
- layer_norm_eps=1e-05,
184
- num_attention_heads=4,
185
- num_hidden_layers=5,
186
- vocab_size=5002,
187
- )
188
- # TODO: remove after fixing the non-deterministic text encoder
189
- text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
190
- components["text_encoder"] = text_encoder
191
- alt_pipe = AltDiffusionPipeline(**components)
192
- alt_pipe = alt_pipe.to(device)
193
- alt_pipe.set_progress_bar_config(disable=None)
194
-
195
- inputs = self.get_dummy_inputs(device)
196
- output = alt_pipe(**inputs)
197
- image = output.images
198
- image_slice = image[0, -3:, -3:, -1]
199
-
200
- assert image.shape == (1, 64, 64, 3)
201
- expected_slice = np.array(
202
- [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237]
203
- )
204
-
205
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
206
-
207
-
208
- @slow
209
- @require_torch_gpu
210
- class AltDiffusionPipelineIntegrationTests(unittest.TestCase):
211
- def tearDown(self):
212
- # clean up the VRAM after each test
213
- super().tearDown()
214
- gc.collect()
215
- torch.cuda.empty_cache()
216
-
217
- def test_alt_diffusion(self):
218
- # make sure here that pndm scheduler skips prk
219
- alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", safety_checker=None)
220
- alt_pipe = alt_pipe.to(torch_device)
221
- alt_pipe.set_progress_bar_config(disable=None)
222
-
223
- prompt = "A painting of a squirrel eating a burger"
224
- generator = torch.manual_seed(0)
225
- output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np")
226
-
227
- image = output.images
228
-
229
- image_slice = image[0, -3:, -3:, -1]
230
-
231
- assert image.shape == (1, 512, 512, 3)
232
- expected_slice = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
233
-
234
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
235
-
236
- def test_alt_diffusion_fast_ddim(self):
237
- scheduler = DDIMScheduler.from_pretrained("BAAI/AltDiffusion", subfolder="scheduler")
238
-
239
- alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", scheduler=scheduler, safety_checker=None)
240
- alt_pipe = alt_pipe.to(torch_device)
241
- alt_pipe.set_progress_bar_config(disable=None)
242
-
243
- prompt = "A painting of a squirrel eating a burger"
244
- generator = torch.manual_seed(0)
245
-
246
- output = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy")
247
- image = output.images
248
-
249
- image_slice = image[0, -3:, -3:, -1]
250
-
251
- assert image.shape == (1, 512, 512, 3)
252
- expected_slice = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
253
-
254
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py DELETED
@@ -1,236 +0,0 @@
1
- _base_ = [
2
- '../_base_/datasets/coco_instance.py',
3
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
4
- ]
5
- # model settings
6
- model = dict(
7
- type='HybridTaskCascade',
8
- pretrained='torchvision://resnet50',
9
- backbone=dict(
10
- type='ResNet',
11
- depth=50,
12
- num_stages=4,
13
- out_indices=(0, 1, 2, 3),
14
- frozen_stages=1,
15
- norm_cfg=dict(type='BN', requires_grad=True),
16
- norm_eval=True,
17
- style='pytorch'),
18
- neck=dict(
19
- type='FPN',
20
- in_channels=[256, 512, 1024, 2048],
21
- out_channels=256,
22
- num_outs=5),
23
- rpn_head=dict(
24
- type='RPNHead',
25
- in_channels=256,
26
- feat_channels=256,
27
- anchor_generator=dict(
28
- type='AnchorGenerator',
29
- scales=[8],
30
- ratios=[0.5, 1.0, 2.0],
31
- strides=[4, 8, 16, 32, 64]),
32
- bbox_coder=dict(
33
- type='DeltaXYWHBBoxCoder',
34
- target_means=[.0, .0, .0, .0],
35
- target_stds=[1.0, 1.0, 1.0, 1.0]),
36
- loss_cls=dict(
37
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
38
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
39
- roi_head=dict(
40
- type='HybridTaskCascadeRoIHead',
41
- interleaved=True,
42
- mask_info_flow=True,
43
- num_stages=3,
44
- stage_loss_weights=[1, 0.5, 0.25],
45
- bbox_roi_extractor=dict(
46
- type='SingleRoIExtractor',
47
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
48
- out_channels=256,
49
- featmap_strides=[4, 8, 16, 32]),
50
- bbox_head=[
51
- dict(
52
- type='Shared2FCBBoxHead',
53
- in_channels=256,
54
- fc_out_channels=1024,
55
- roi_feat_size=7,
56
- num_classes=80,
57
- bbox_coder=dict(
58
- type='DeltaXYWHBBoxCoder',
59
- target_means=[0., 0., 0., 0.],
60
- target_stds=[0.1, 0.1, 0.2, 0.2]),
61
- reg_class_agnostic=True,
62
- loss_cls=dict(
63
- type='CrossEntropyLoss',
64
- use_sigmoid=False,
65
- loss_weight=1.0),
66
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
67
- loss_weight=1.0)),
68
- dict(
69
- type='Shared2FCBBoxHead',
70
- in_channels=256,
71
- fc_out_channels=1024,
72
- roi_feat_size=7,
73
- num_classes=80,
74
- bbox_coder=dict(
75
- type='DeltaXYWHBBoxCoder',
76
- target_means=[0., 0., 0., 0.],
77
- target_stds=[0.05, 0.05, 0.1, 0.1]),
78
- reg_class_agnostic=True,
79
- loss_cls=dict(
80
- type='CrossEntropyLoss',
81
- use_sigmoid=False,
82
- loss_weight=1.0),
83
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
84
- loss_weight=1.0)),
85
- dict(
86
- type='Shared2FCBBoxHead',
87
- in_channels=256,
88
- fc_out_channels=1024,
89
- roi_feat_size=7,
90
- num_classes=80,
91
- bbox_coder=dict(
92
- type='DeltaXYWHBBoxCoder',
93
- target_means=[0., 0., 0., 0.],
94
- target_stds=[0.033, 0.033, 0.067, 0.067]),
95
- reg_class_agnostic=True,
96
- loss_cls=dict(
97
- type='CrossEntropyLoss',
98
- use_sigmoid=False,
99
- loss_weight=1.0),
100
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
101
- ],
102
- mask_roi_extractor=dict(
103
- type='SingleRoIExtractor',
104
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
105
- out_channels=256,
106
- featmap_strides=[4, 8, 16, 32]),
107
- mask_head=[
108
- dict(
109
- type='HTCMaskHead',
110
- with_conv_res=False,
111
- num_convs=4,
112
- in_channels=256,
113
- conv_out_channels=256,
114
- num_classes=80,
115
- loss_mask=dict(
116
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
117
- dict(
118
- type='HTCMaskHead',
119
- num_convs=4,
120
- in_channels=256,
121
- conv_out_channels=256,
122
- num_classes=80,
123
- loss_mask=dict(
124
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
125
- dict(
126
- type='HTCMaskHead',
127
- num_convs=4,
128
- in_channels=256,
129
- conv_out_channels=256,
130
- num_classes=80,
131
- loss_mask=dict(
132
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
133
- ]),
134
- # model training and testing settings
135
- train_cfg=dict(
136
- rpn=dict(
137
- assigner=dict(
138
- type='MaxIoUAssigner',
139
- pos_iou_thr=0.7,
140
- neg_iou_thr=0.3,
141
- min_pos_iou=0.3,
142
- ignore_iof_thr=-1),
143
- sampler=dict(
144
- type='RandomSampler',
145
- num=256,
146
- pos_fraction=0.5,
147
- neg_pos_ub=-1,
148
- add_gt_as_proposals=False),
149
- allowed_border=0,
150
- pos_weight=-1,
151
- debug=False),
152
- rpn_proposal=dict(
153
- nms_pre=2000,
154
- max_per_img=2000,
155
- nms=dict(type='nms', iou_threshold=0.7),
156
- min_bbox_size=0),
157
- rcnn=[
158
- dict(
159
- assigner=dict(
160
- type='MaxIoUAssigner',
161
- pos_iou_thr=0.5,
162
- neg_iou_thr=0.5,
163
- min_pos_iou=0.5,
164
- ignore_iof_thr=-1),
165
- sampler=dict(
166
- type='RandomSampler',
167
- num=512,
168
- pos_fraction=0.25,
169
- neg_pos_ub=-1,
170
- add_gt_as_proposals=True),
171
- mask_size=28,
172
- pos_weight=-1,
173
- debug=False),
174
- dict(
175
- assigner=dict(
176
- type='MaxIoUAssigner',
177
- pos_iou_thr=0.6,
178
- neg_iou_thr=0.6,
179
- min_pos_iou=0.6,
180
- ignore_iof_thr=-1),
181
- sampler=dict(
182
- type='RandomSampler',
183
- num=512,
184
- pos_fraction=0.25,
185
- neg_pos_ub=-1,
186
- add_gt_as_proposals=True),
187
- mask_size=28,
188
- pos_weight=-1,
189
- debug=False),
190
- dict(
191
- assigner=dict(
192
- type='MaxIoUAssigner',
193
- pos_iou_thr=0.7,
194
- neg_iou_thr=0.7,
195
- min_pos_iou=0.7,
196
- ignore_iof_thr=-1),
197
- sampler=dict(
198
- type='RandomSampler',
199
- num=512,
200
- pos_fraction=0.25,
201
- neg_pos_ub=-1,
202
- add_gt_as_proposals=True),
203
- mask_size=28,
204
- pos_weight=-1,
205
- debug=False)
206
- ]),
207
- test_cfg=dict(
208
- rpn=dict(
209
- nms_pre=1000,
210
- max_per_img=1000,
211
- nms=dict(type='nms', iou_threshold=0.7),
212
- min_bbox_size=0),
213
- rcnn=dict(
214
- score_thr=0.001,
215
- nms=dict(type='nms', iou_threshold=0.5),
216
- max_per_img=100,
217
- mask_thr_binary=0.5)))
218
- img_norm_cfg = dict(
219
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
220
- test_pipeline = [
221
- dict(type='LoadImageFromFile'),
222
- dict(
223
- type='MultiScaleFlipAug',
224
- img_scale=(1333, 800),
225
- flip=False,
226
- transforms=[
227
- dict(type='Resize', keep_ratio=True),
228
- dict(type='RandomFlip', flip_ratio=0.5),
229
- dict(type='Normalize', **img_norm_cfg),
230
- dict(type='Pad', size_divisor=32),
231
- dict(type='ImageToTensor', keys=['img']),
232
- dict(type='Collect', keys=['img']),
233
- ])
234
- ]
235
- data = dict(
236
- val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/bbox_head.py DELETED
@@ -1,483 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.runner import auto_fp16, force_fp32
5
- from torch.nn.modules.utils import _pair
6
-
7
- from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
8
- from mmdet.models.builder import HEADS, build_loss
9
- from mmdet.models.losses import accuracy
10
-
11
-
12
- @HEADS.register_module()
13
- class BBoxHead(nn.Module):
14
- """Simplest RoI head, with only two fc layers for classification and
15
- regression respectively."""
16
-
17
- def __init__(self,
18
- with_avg_pool=False,
19
- with_cls=True,
20
- with_reg=True,
21
- roi_feat_size=7,
22
- in_channels=256,
23
- num_classes=80,
24
- bbox_coder=dict(
25
- type='DeltaXYWHBBoxCoder',
26
- clip_border=True,
27
- target_means=[0., 0., 0., 0.],
28
- target_stds=[0.1, 0.1, 0.2, 0.2]),
29
- reg_class_agnostic=False,
30
- reg_decoded_bbox=False,
31
- loss_cls=dict(
32
- type='CrossEntropyLoss',
33
- use_sigmoid=False,
34
- loss_weight=1.0),
35
- loss_bbox=dict(
36
- type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
37
- super(BBoxHead, self).__init__()
38
- assert with_cls or with_reg
39
- self.with_avg_pool = with_avg_pool
40
- self.with_cls = with_cls
41
- self.with_reg = with_reg
42
- self.roi_feat_size = _pair(roi_feat_size)
43
- self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
44
- self.in_channels = in_channels
45
- self.num_classes = num_classes
46
- self.reg_class_agnostic = reg_class_agnostic
47
- self.reg_decoded_bbox = reg_decoded_bbox
48
- self.fp16_enabled = False
49
-
50
- self.bbox_coder = build_bbox_coder(bbox_coder)
51
- self.loss_cls = build_loss(loss_cls)
52
- self.loss_bbox = build_loss(loss_bbox)
53
-
54
- in_channels = self.in_channels
55
- if self.with_avg_pool:
56
- self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
57
- else:
58
- in_channels *= self.roi_feat_area
59
- if self.with_cls:
60
- # need to add background class
61
- self.fc_cls = nn.Linear(in_channels, num_classes + 1)
62
- if self.with_reg:
63
- out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
64
- self.fc_reg = nn.Linear(in_channels, out_dim_reg)
65
- self.debug_imgs = None
66
-
67
- def init_weights(self):
68
- # conv layers are already initialized by ConvModule
69
- if self.with_cls:
70
- nn.init.normal_(self.fc_cls.weight, 0, 0.01)
71
- nn.init.constant_(self.fc_cls.bias, 0)
72
- if self.with_reg:
73
- nn.init.normal_(self.fc_reg.weight, 0, 0.001)
74
- nn.init.constant_(self.fc_reg.bias, 0)
75
-
76
- @auto_fp16()
77
- def forward(self, x):
78
- if self.with_avg_pool:
79
- x = self.avg_pool(x)
80
- x = x.view(x.size(0), -1)
81
- cls_score = self.fc_cls(x) if self.with_cls else None
82
- bbox_pred = self.fc_reg(x) if self.with_reg else None
83
- return cls_score, bbox_pred
84
-
85
- def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
86
- pos_gt_labels, cfg):
87
- """Calculate the ground truth for proposals in the single image
88
- according to the sampling results.
89
-
90
- Args:
91
- pos_bboxes (Tensor): Contains all the positive boxes,
92
- has shape (num_pos, 4), the last dimension 4
93
- represents [tl_x, tl_y, br_x, br_y].
94
- neg_bboxes (Tensor): Contains all the negative boxes,
95
- has shape (num_neg, 4), the last dimension 4
96
- represents [tl_x, tl_y, br_x, br_y].
97
- pos_gt_bboxes (Tensor): Contains all the gt_boxes,
98
- has shape (num_gt, 4), the last dimension 4
99
- represents [tl_x, tl_y, br_x, br_y].
100
- pos_gt_labels (Tensor): Contains all the gt_labels,
101
- has shape (num_gt).
102
- cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
103
-
104
- Returns:
105
- Tuple[Tensor]: Ground truth for proposals
106
- in a single image. Containing the following Tensors:
107
-
108
- - labels(Tensor): Gt_labels for all proposals, has
109
- shape (num_proposals,).
110
- - label_weights(Tensor): Labels_weights for all
111
- proposals, has shape (num_proposals,).
112
- - bbox_targets(Tensor):Regression target for all
113
- proposals, has shape (num_proposals, 4), the
114
- last dimension 4 represents [tl_x, tl_y, br_x, br_y].
115
- - bbox_weights(Tensor):Regression weights for all
116
- proposals, has shape (num_proposals, 4).
117
- """
118
- num_pos = pos_bboxes.size(0)
119
- num_neg = neg_bboxes.size(0)
120
- num_samples = num_pos + num_neg
121
-
122
- # original implementation uses new_zeros since BG are set to be 0
123
- # now use empty & fill because BG cat_id = num_classes,
124
- # FG cat_id = [0, num_classes-1]
125
- labels = pos_bboxes.new_full((num_samples, ),
126
- self.num_classes,
127
- dtype=torch.long)
128
- label_weights = pos_bboxes.new_zeros(num_samples)
129
- bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
130
- bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
131
- if num_pos > 0:
132
- labels[:num_pos] = pos_gt_labels
133
- pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
134
- label_weights[:num_pos] = pos_weight
135
- if not self.reg_decoded_bbox:
136
- pos_bbox_targets = self.bbox_coder.encode(
137
- pos_bboxes, pos_gt_bboxes)
138
- else:
139
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
140
- # is applied directly on the decoded bounding boxes, both
141
- # the predicted boxes and regression targets should be with
142
- # absolute coordinate format.
143
- pos_bbox_targets = pos_gt_bboxes
144
- bbox_targets[:num_pos, :] = pos_bbox_targets
145
- bbox_weights[:num_pos, :] = 1
146
- if num_neg > 0:
147
- label_weights[-num_neg:] = 1.0
148
-
149
- return labels, label_weights, bbox_targets, bbox_weights
150
-
151
- def get_targets(self,
152
- sampling_results,
153
- gt_bboxes,
154
- gt_labels,
155
- rcnn_train_cfg,
156
- concat=True):
157
- """Calculate the ground truth for all samples in a batch according to
158
- the sampling_results.
159
-
160
- Almost the same as the implementation in bbox_head, we passed
161
- additional parameters pos_inds_list and neg_inds_list to
162
- `_get_target_single` function.
163
-
164
- Args:
165
- sampling_results (List[obj:SamplingResults]): Assign results of
166
- all images in a batch after sampling.
167
- gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
168
- each tensor has shape (num_gt, 4), the last dimension 4
169
- represents [tl_x, tl_y, br_x, br_y].
170
- gt_labels (list[Tensor]): Gt_labels of all images in a batch,
171
- each tensor has shape (num_gt,).
172
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
173
- concat (bool): Whether to concatenate the results of all
174
- the images in a single batch.
175
-
176
- Returns:
177
- Tuple[Tensor]: Ground truth for proposals in a single image.
178
- Containing the following list of Tensors:
179
-
180
- - labels (list[Tensor],Tensor): Gt_labels for all
181
- proposals in a batch, each tensor in list has
182
- shape (num_proposals,) when `concat=False`, otherwise
183
- just a single tensor has shape (num_all_proposals,).
184
- - label_weights (list[Tensor]): Labels_weights for
185
- all proposals in a batch, each tensor in list has
186
- shape (num_proposals,) when `concat=False`, otherwise
187
- just a single tensor has shape (num_all_proposals,).
188
- - bbox_targets (list[Tensor],Tensor): Regression target
189
- for all proposals in a batch, each tensor in list
190
- has shape (num_proposals, 4) when `concat=False`,
191
- otherwise just a single tensor has shape
192
- (num_all_proposals, 4), the last dimension 4 represents
193
- [tl_x, tl_y, br_x, br_y].
194
- - bbox_weights (list[tensor],Tensor): Regression weights for
195
- all proposals in a batch, each tensor in list has shape
196
- (num_proposals, 4) when `concat=False`, otherwise just a
197
- single tensor has shape (num_all_proposals, 4).
198
- """
199
- pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
200
- neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
201
- pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
202
- pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
203
- labels, label_weights, bbox_targets, bbox_weights = multi_apply(
204
- self._get_target_single,
205
- pos_bboxes_list,
206
- neg_bboxes_list,
207
- pos_gt_bboxes_list,
208
- pos_gt_labels_list,
209
- cfg=rcnn_train_cfg)
210
-
211
- if concat:
212
- labels = torch.cat(labels, 0)
213
- label_weights = torch.cat(label_weights, 0)
214
- bbox_targets = torch.cat(bbox_targets, 0)
215
- bbox_weights = torch.cat(bbox_weights, 0)
216
- return labels, label_weights, bbox_targets, bbox_weights
217
-
218
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
219
- def loss(self,
220
- cls_score,
221
- bbox_pred,
222
- rois,
223
- labels,
224
- label_weights,
225
- bbox_targets,
226
- bbox_weights,
227
- reduction_override=None):
228
- losses = dict()
229
- if cls_score is not None:
230
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
231
- if cls_score.numel() > 0:
232
- losses['loss_cls'] = self.loss_cls(
233
- cls_score,
234
- labels,
235
- label_weights,
236
- avg_factor=avg_factor,
237
- reduction_override=reduction_override)
238
- losses['acc'] = accuracy(cls_score, labels)
239
- if bbox_pred is not None:
240
- bg_class_ind = self.num_classes
241
- # 0~self.num_classes-1 are FG, self.num_classes is BG
242
- pos_inds = (labels >= 0) & (labels < bg_class_ind)
243
- # do not perform bounding box regression for BG anymore.
244
- if pos_inds.any():
245
- if self.reg_decoded_bbox:
246
- # When the regression loss (e.g. `IouLoss`,
247
- # `GIouLoss`, `DIouLoss`) is applied directly on
248
- # the decoded bounding boxes, it decodes the
249
- # already encoded coordinates to absolute format.
250
- bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
251
- if self.reg_class_agnostic:
252
- pos_bbox_pred = bbox_pred.view(
253
- bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
254
- else:
255
- pos_bbox_pred = bbox_pred.view(
256
- bbox_pred.size(0), -1,
257
- 4)[pos_inds.type(torch.bool),
258
- labels[pos_inds.type(torch.bool)]]
259
- losses['loss_bbox'] = self.loss_bbox(
260
- pos_bbox_pred,
261
- bbox_targets[pos_inds.type(torch.bool)],
262
- bbox_weights[pos_inds.type(torch.bool)],
263
- avg_factor=bbox_targets.size(0),
264
- reduction_override=reduction_override)
265
- else:
266
- losses['loss_bbox'] = bbox_pred[pos_inds].sum()
267
- return losses
268
-
269
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
270
- def get_bboxes(self,
271
- rois,
272
- cls_score,
273
- bbox_pred,
274
- img_shape,
275
- scale_factor,
276
- rescale=False,
277
- cfg=None):
278
- """Transform network output for a batch into bbox predictions.
279
-
280
- If the input rois has batch dimension, the function would be in
281
- `batch_mode` and return is a tuple[list[Tensor], list[Tensor]],
282
- otherwise, the return is a tuple[Tensor, Tensor].
283
-
284
- Args:
285
- rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5)
286
- or (B, num_boxes, 5)
287
- cls_score (list[Tensor] or Tensor): Box scores for
288
- each scale level, each is a 4D-tensor, the channel number is
289
- num_points * num_classes.
290
- bbox_pred (Tensor, optional): Box energies / deltas for each scale
291
- level, each is a 4D-tensor, the channel number is
292
- num_classes * 4.
293
- img_shape (Sequence[int] or torch.Tensor or Sequence[
294
- Sequence[int]], optional): Maximum bounds for boxes, specifies
295
- (H, W, C) or (H, W). If rois shape is (B, num_boxes, 4), then
296
- the max_shape should be a Sequence[Sequence[int]]
297
- and the length of max_shape should also be B.
298
- scale_factor (tuple[ndarray] or ndarray): Scale factor of the
299
- image arange as (w_scale, h_scale, w_scale, h_scale). In
300
- `batch_mode`, the scale_factor shape is tuple[ndarray].
301
- rescale (bool): If True, return boxes in original image space.
302
- Default: False.
303
- cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
304
-
305
- Returns:
306
- tuple[list[Tensor], list[Tensor]] or tuple[Tensor, Tensor]:
307
- If the input has a batch dimension, the return value is
308
- a tuple of the list. The first list contains the boxes of
309
- the corresponding image in a batch, each tensor has the
310
- shape (num_boxes, 5) and last dimension 5 represent
311
- (tl_x, tl_y, br_x, br_y, score). Each Tensor in the second
312
- list is the labels with shape (num_boxes, ). The length of
313
- both lists should be equal to batch_size. Otherwise return
314
- value is a tuple of two tensors, the first tensor is the
315
- boxes with scores, the second tensor is the labels, both
316
- have the same shape as the first case.
317
- """
318
- if isinstance(cls_score, list):
319
- cls_score = sum(cls_score) / float(len(cls_score))
320
-
321
- scores = F.softmax(
322
- cls_score, dim=-1) if cls_score is not None else None
323
-
324
- batch_mode = True
325
- if rois.ndim == 2:
326
- # e.g. AugTest, Cascade R-CNN, HTC, SCNet...
327
- batch_mode = False
328
-
329
- # add batch dimension
330
- if scores is not None:
331
- scores = scores.unsqueeze(0)
332
- if bbox_pred is not None:
333
- bbox_pred = bbox_pred.unsqueeze(0)
334
- rois = rois.unsqueeze(0)
335
-
336
- if bbox_pred is not None:
337
- bboxes = self.bbox_coder.decode(
338
- rois[..., 1:], bbox_pred, max_shape=img_shape)
339
- else:
340
- bboxes = rois[..., 1:].clone()
341
- if img_shape is not None:
342
- max_shape = bboxes.new_tensor(img_shape)[..., :2]
343
- min_xy = bboxes.new_tensor(0)
344
- max_xy = torch.cat(
345
- [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
346
- bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
347
- bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
348
-
349
- if rescale and bboxes.size(-2) > 0:
350
- if not isinstance(scale_factor, tuple):
351
- scale_factor = tuple([scale_factor])
352
- # B, 1, bboxes.size(-1)
353
- scale_factor = bboxes.new_tensor(scale_factor).unsqueeze(1).repeat(
354
- 1, 1,
355
- bboxes.size(-1) // 4)
356
- bboxes /= scale_factor
357
-
358
- det_bboxes = []
359
- det_labels = []
360
- for (bbox, score) in zip(bboxes, scores):
361
- if cfg is not None:
362
- det_bbox, det_label = multiclass_nms(bbox, score,
363
- cfg.score_thr, cfg.nms,
364
- cfg.max_per_img)
365
- else:
366
- det_bbox, det_label = bbox, score
367
- det_bboxes.append(det_bbox)
368
- det_labels.append(det_label)
369
-
370
- if not batch_mode:
371
- det_bboxes = det_bboxes[0]
372
- det_labels = det_labels[0]
373
- return det_bboxes, det_labels
374
-
375
- @force_fp32(apply_to=('bbox_preds', ))
376
- def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
377
- """Refine bboxes during training.
378
-
379
- Args:
380
- rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
381
- and bs is the sampled RoIs per image. The first column is
382
- the image id and the next 4 columns are x1, y1, x2, y2.
383
- labels (Tensor): Shape (n*bs, ).
384
- bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
385
- pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
386
- is a gt bbox.
387
- img_metas (list[dict]): Meta info of each image.
388
-
389
- Returns:
390
- list[Tensor]: Refined bboxes of each image in a mini-batch.
391
-
392
- Example:
393
- >>> # xdoctest: +REQUIRES(module:kwarray)
394
- >>> import kwarray
395
- >>> import numpy as np
396
- >>> from mmdet.core.bbox.demodata import random_boxes
397
- >>> self = BBoxHead(reg_class_agnostic=True)
398
- >>> n_roi = 2
399
- >>> n_img = 4
400
- >>> scale = 512
401
- >>> rng = np.random.RandomState(0)
402
- >>> img_metas = [{'img_shape': (scale, scale)}
403
- ... for _ in range(n_img)]
404
- >>> # Create rois in the expected format
405
- >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
406
- >>> img_ids = torch.randint(0, n_img, (n_roi,))
407
- >>> img_ids = img_ids.float()
408
- >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
409
- >>> # Create other args
410
- >>> labels = torch.randint(0, 2, (n_roi,)).long()
411
- >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
412
- >>> # For each image, pretend random positive boxes are gts
413
- >>> is_label_pos = (labels.numpy() > 0).astype(np.int)
414
- >>> lbl_per_img = kwarray.group_items(is_label_pos,
415
- ... img_ids.numpy())
416
- >>> pos_per_img = [sum(lbl_per_img.get(gid, []))
417
- ... for gid in range(n_img)]
418
- >>> pos_is_gts = [
419
- >>> torch.randint(0, 2, (npos,)).byte().sort(
420
- >>> descending=True)[0]
421
- >>> for npos in pos_per_img
422
- >>> ]
423
- >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
424
- >>> pos_is_gts, img_metas)
425
- >>> print(bboxes_list)
426
- """
427
- img_ids = rois[:, 0].long().unique(sorted=True)
428
- assert img_ids.numel() <= len(img_metas)
429
-
430
- bboxes_list = []
431
- for i in range(len(img_metas)):
432
- inds = torch.nonzero(
433
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
434
- num_rois = inds.numel()
435
-
436
- bboxes_ = rois[inds, 1:]
437
- label_ = labels[inds]
438
- bbox_pred_ = bbox_preds[inds]
439
- img_meta_ = img_metas[i]
440
- pos_is_gts_ = pos_is_gts[i]
441
-
442
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
443
- img_meta_)
444
-
445
- # filter gt bboxes
446
- pos_keep = 1 - pos_is_gts_
447
- keep_inds = pos_is_gts_.new_ones(num_rois)
448
- keep_inds[:len(pos_is_gts_)] = pos_keep
449
-
450
- bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
451
-
452
- return bboxes_list
453
-
454
- @force_fp32(apply_to=('bbox_pred', ))
455
- def regress_by_class(self, rois, label, bbox_pred, img_meta):
456
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
457
-
458
- Args:
459
- rois (Tensor): shape (n, 4) or (n, 5)
460
- label (Tensor): shape (n, )
461
- bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4)
462
- img_meta (dict): Image meta info.
463
-
464
- Returns:
465
- Tensor: Regressed bboxes, the same shape as input rois.
466
- """
467
- assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
468
-
469
- if not self.reg_class_agnostic:
470
- label = label * 4
471
- inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
472
- bbox_pred = torch.gather(bbox_pred, 1, inds)
473
- assert bbox_pred.size(1) == 4
474
-
475
- if rois.size(1) == 4:
476
- new_rois = self.bbox_coder.decode(
477
- rois, bbox_pred, max_shape=img_meta['img_shape'])
478
- else:
479
- bboxes = self.bbox_coder.decode(
480
- rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])
481
- new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
482
-
483
- return new_rois
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_512x512_40k_voc12aug.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Anni123/AuRoRA/README.md DELETED
@@ -1,6 +0,0 @@
1
- ---
2
- title: Unified-Adapter
3
- app_file: app.py
4
- sdk: gradio
5
- sdk_version: 3.36.1
6
- ---
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/padding.py DELETED
@@ -1,36 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch.nn as nn
3
-
4
- from .registry import PADDING_LAYERS
5
-
6
- PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
7
- PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
8
- PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
9
-
10
-
11
- def build_padding_layer(cfg, *args, **kwargs):
12
- """Build padding layer.
13
-
14
- Args:
15
- cfg (None or dict): The padding layer config, which should contain:
16
- - type (str): Layer type.
17
- - layer args: Args needed to instantiate a padding layer.
18
-
19
- Returns:
20
- nn.Module: Created padding layer.
21
- """
22
- if not isinstance(cfg, dict):
23
- raise TypeError('cfg must be a dict')
24
- if 'type' not in cfg:
25
- raise KeyError('the cfg dict must contain the key "type"')
26
-
27
- cfg_ = cfg.copy()
28
- padding_type = cfg_.pop('type')
29
- if padding_type not in PADDING_LAYERS:
30
- raise KeyError(f'Unrecognized padding type {padding_type}.')
31
- else:
32
- padding_layer = PADDING_LAYERS.get(padding_type)
33
-
34
- layer = padding_layer(*args, **kwargs, **cfg_)
35
-
36
- return layer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/google_app_engine/Dockerfile DELETED
@@ -1,25 +0,0 @@
1
- FROM gcr.io/google-appengine/python
2
-
3
- # Create a virtualenv for dependencies. This isolates these packages from
4
- # system-level packages.
5
- # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6
- RUN virtualenv /env -p python3
7
-
8
- # Setting these environment variables are the same as running
9
- # source /env/bin/activate.
10
- ENV VIRTUAL_ENV /env
11
- ENV PATH /env/bin:$PATH
12
-
13
- RUN apt-get update && apt-get install -y python-opencv
14
-
15
- # Copy the application's requirements.txt and run pip to install all
16
- # dependencies into the virtualenv.
17
- ADD requirements.txt /app/requirements.txt
18
- RUN pip install -r /app/requirements.txt
19
-
20
- # Add the application source code.
21
- ADD . /app
22
-
23
- # Run a WSGI server to serve the application. gunicorn must be declared as
24
- # a dependency in requirements.txt.
25
- CMD gunicorn -b :$PORT main:app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Armored-Atom/gpt2/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Gpt2
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/backbone/position_encoding.py DELETED
@@ -1,186 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # DINO
8
- # Copyright (c) 2022 IDEA. All Rights Reserved.
9
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
10
- # ------------------------------------------------------------------------
11
- # Conditional DETR
12
- # Copyright (c) 2021 Microsoft. All Rights Reserved.
13
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
14
- # ------------------------------------------------------------------------
15
- # Copied from DETR (https://github.com/facebookresearch/detr)
16
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
17
- # ------------------------------------------------------------------------
18
-
19
- """
20
- Various positional encodings for the transformer.
21
- """
22
- import math
23
-
24
- import torch
25
- from torch import nn
26
-
27
- from groundingdino.util.misc import NestedTensor
28
-
29
-
30
- class PositionEmbeddingSine(nn.Module):
31
- """
32
- This is a more standard version of the position embedding, very similar to the one
33
- used by the Attention is all you need paper, generalized to work on images.
34
- """
35
-
36
- def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
37
- super().__init__()
38
- self.num_pos_feats = num_pos_feats
39
- self.temperature = temperature
40
- self.normalize = normalize
41
- if scale is not None and normalize is False:
42
- raise ValueError("normalize should be True if scale is passed")
43
- if scale is None:
44
- scale = 2 * math.pi
45
- self.scale = scale
46
-
47
- def forward(self, tensor_list: NestedTensor):
48
- x = tensor_list.tensors
49
- mask = tensor_list.mask
50
- assert mask is not None
51
- not_mask = ~mask
52
- y_embed = not_mask.cumsum(1, dtype=torch.float32)
53
- x_embed = not_mask.cumsum(2, dtype=torch.float32)
54
- if self.normalize:
55
- eps = 1e-6
56
- # if os.environ.get("SHILONG_AMP", None) == '1':
57
- # eps = 1e-4
58
- # else:
59
- # eps = 1e-6
60
- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
61
- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
62
-
63
- dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
64
- dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
65
-
66
- pos_x = x_embed[:, :, :, None] / dim_t
67
- pos_y = y_embed[:, :, :, None] / dim_t
68
- pos_x = torch.stack(
69
- (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
70
- ).flatten(3)
71
- pos_y = torch.stack(
72
- (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
73
- ).flatten(3)
74
- pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
75
- return pos
76
-
77
-
78
- class PositionEmbeddingSineHW(nn.Module):
79
- """
80
- This is a more standard version of the position embedding, very similar to the one
81
- used by the Attention is all you need paper, generalized to work on images.
82
- """
83
-
84
- def __init__(
85
- self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None
86
- ):
87
- super().__init__()
88
- self.num_pos_feats = num_pos_feats
89
- self.temperatureH = temperatureH
90
- self.temperatureW = temperatureW
91
- self.normalize = normalize
92
- if scale is not None and normalize is False:
93
- raise ValueError("normalize should be True if scale is passed")
94
- if scale is None:
95
- scale = 2 * math.pi
96
- self.scale = scale
97
-
98
- def forward(self, tensor_list: NestedTensor):
99
- x = tensor_list.tensors
100
- mask = tensor_list.mask
101
- assert mask is not None
102
- not_mask = ~mask
103
- y_embed = not_mask.cumsum(1, dtype=torch.float32)
104
- x_embed = not_mask.cumsum(2, dtype=torch.float32)
105
-
106
- # import ipdb; ipdb.set_trace()
107
-
108
- if self.normalize:
109
- eps = 1e-6
110
- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
111
- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
112
-
113
- dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
114
- dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats)
115
- pos_x = x_embed[:, :, :, None] / dim_tx
116
-
117
- dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
118
- dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats)
119
- pos_y = y_embed[:, :, :, None] / dim_ty
120
-
121
- pos_x = torch.stack(
122
- (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
123
- ).flatten(3)
124
- pos_y = torch.stack(
125
- (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
126
- ).flatten(3)
127
- pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
128
-
129
- # import ipdb; ipdb.set_trace()
130
-
131
- return pos
132
-
133
-
134
- class PositionEmbeddingLearned(nn.Module):
135
- """
136
- Absolute pos embedding, learned.
137
- """
138
-
139
- def __init__(self, num_pos_feats=256):
140
- super().__init__()
141
- self.row_embed = nn.Embedding(50, num_pos_feats)
142
- self.col_embed = nn.Embedding(50, num_pos_feats)
143
- self.reset_parameters()
144
-
145
- def reset_parameters(self):
146
- nn.init.uniform_(self.row_embed.weight)
147
- nn.init.uniform_(self.col_embed.weight)
148
-
149
- def forward(self, tensor_list: NestedTensor):
150
- x = tensor_list.tensors
151
- h, w = x.shape[-2:]
152
- i = torch.arange(w, device=x.device)
153
- j = torch.arange(h, device=x.device)
154
- x_emb = self.col_embed(i)
155
- y_emb = self.row_embed(j)
156
- pos = (
157
- torch.cat(
158
- [
159
- x_emb.unsqueeze(0).repeat(h, 1, 1),
160
- y_emb.unsqueeze(1).repeat(1, w, 1),
161
- ],
162
- dim=-1,
163
- )
164
- .permute(2, 0, 1)
165
- .unsqueeze(0)
166
- .repeat(x.shape[0], 1, 1, 1)
167
- )
168
- return pos
169
-
170
-
171
- def build_position_encoding(args):
172
- N_steps = args.hidden_dim // 2
173
- if args.position_embedding in ("v2", "sine"):
174
- # TODO find a better way of exposing other arguments
175
- position_embedding = PositionEmbeddingSineHW(
176
- N_steps,
177
- temperatureH=args.pe_temperatureH,
178
- temperatureW=args.pe_temperatureW,
179
- normalize=True,
180
- )
181
- elif args.position_embedding in ("v3", "learned"):
182
- position_embedding = PositionEmbeddingLearned(N_steps)
183
- else:
184
- raise ValueError(f"not supported {args.position_embedding}")
185
-
186
- return position_embedding
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/build_meta.py DELETED
@@ -1,511 +0,0 @@
1
- """A PEP 517 interface to setuptools
2
-
3
- Previously, when a user or a command line tool (let's call it a "frontend")
4
- needed to make a request of setuptools to take a certain action, for
5
- example, generating a list of installation requirements, the frontend would
6
- would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
7
-
8
- PEP 517 defines a different method of interfacing with setuptools. Rather
9
- than calling "setup.py" directly, the frontend should:
10
-
11
- 1. Set the current directory to the directory with a setup.py file
12
- 2. Import this module into a safe python interpreter (one in which
13
- setuptools can potentially set global variables or crash hard).
14
- 3. Call one of the functions defined in PEP 517.
15
-
16
- What each function does is defined in PEP 517. However, here is a "casual"
17
- definition of the functions (this definition should not be relied on for
18
- bug reports or API stability):
19
-
20
- - `build_wheel`: build a wheel in the folder and return the basename
21
- - `get_requires_for_build_wheel`: get the `setup_requires` to build
22
- - `prepare_metadata_for_build_wheel`: get the `install_requires`
23
- - `build_sdist`: build an sdist in the folder and return the basename
24
- - `get_requires_for_build_sdist`: get the `setup_requires` to build
25
-
26
- Again, this is not a formal definition! Just a "taste" of the module.
27
- """
28
-
29
- import io
30
- import os
31
- import shlex
32
- import sys
33
- import tokenize
34
- import shutil
35
- import contextlib
36
- import tempfile
37
- import warnings
38
- from pathlib import Path
39
- from typing import Dict, Iterator, List, Optional, Union
40
-
41
- import setuptools
42
- import distutils
43
- from . import errors
44
- from ._path import same_path
45
- from ._reqs import parse_strings
46
- from ._deprecation_warning import SetuptoolsDeprecationWarning
47
- from distutils.util import strtobool
48
-
49
-
50
- __all__ = ['get_requires_for_build_sdist',
51
- 'get_requires_for_build_wheel',
52
- 'prepare_metadata_for_build_wheel',
53
- 'build_wheel',
54
- 'build_sdist',
55
- 'get_requires_for_build_editable',
56
- 'prepare_metadata_for_build_editable',
57
- 'build_editable',
58
- '__legacy__',
59
- 'SetupRequirementsError']
60
-
61
- SETUPTOOLS_ENABLE_FEATURES = os.getenv("SETUPTOOLS_ENABLE_FEATURES", "").lower()
62
- LEGACY_EDITABLE = "legacy-editable" in SETUPTOOLS_ENABLE_FEATURES.replace("_", "-")
63
-
64
-
65
- class SetupRequirementsError(BaseException):
66
- def __init__(self, specifiers):
67
- self.specifiers = specifiers
68
-
69
-
70
- class Distribution(setuptools.dist.Distribution):
71
- def fetch_build_eggs(self, specifiers):
72
- specifier_list = list(parse_strings(specifiers))
73
-
74
- raise SetupRequirementsError(specifier_list)
75
-
76
- @classmethod
77
- @contextlib.contextmanager
78
- def patch(cls):
79
- """
80
- Replace
81
- distutils.dist.Distribution with this class
82
- for the duration of this context.
83
- """
84
- orig = distutils.core.Distribution
85
- distutils.core.Distribution = cls
86
- try:
87
- yield
88
- finally:
89
- distutils.core.Distribution = orig
90
-
91
-
92
- @contextlib.contextmanager
93
- def no_install_setup_requires():
94
- """Temporarily disable installing setup_requires
95
-
96
- Under PEP 517, the backend reports build dependencies to the frontend,
97
- and the frontend is responsible for ensuring they're installed.
98
- So setuptools (acting as a backend) should not try to install them.
99
- """
100
- orig = setuptools._install_setup_requires
101
- setuptools._install_setup_requires = lambda attrs: None
102
- try:
103
- yield
104
- finally:
105
- setuptools._install_setup_requires = orig
106
-
107
-
108
- def _get_immediate_subdirectories(a_dir):
109
- return [name for name in os.listdir(a_dir)
110
- if os.path.isdir(os.path.join(a_dir, name))]
111
-
112
-
113
- def _file_with_extension(directory, extension):
114
- matching = (
115
- f for f in os.listdir(directory)
116
- if f.endswith(extension)
117
- )
118
- try:
119
- file, = matching
120
- except ValueError:
121
- raise ValueError(
122
- 'No distribution was found. Ensure that `setup.py` '
123
- 'is not empty and that it calls `setup()`.')
124
- return file
125
-
126
-
127
- def _open_setup_script(setup_script):
128
- if not os.path.exists(setup_script):
129
- # Supply a default setup.py
130
- return io.StringIO(u"from setuptools import setup; setup()")
131
-
132
- return getattr(tokenize, 'open', open)(setup_script)
133
-
134
-
135
- @contextlib.contextmanager
136
- def suppress_known_deprecation():
137
- with warnings.catch_warnings():
138
- warnings.filterwarnings('ignore', 'setup.py install is deprecated')
139
- yield
140
-
141
-
142
- _ConfigSettings = Optional[Dict[str, Union[str, List[str], None]]]
143
- """
144
- Currently the user can run::
145
-
146
- pip install -e . --config-settings key=value
147
- python -m build -C--key=value -C key=value
148
-
149
- - pip will pass both key and value as strings and overwriting repeated keys
150
- (pypa/pip#11059).
151
- - build will accumulate values associated with repeated keys in a list.
152
- It will also accept keys with no associated value.
153
- This means that an option passed by build can be ``str | list[str] | None``.
154
- - PEP 517 specifies that ``config_settings`` is an optional dict.
155
- """
156
-
157
-
158
- class _ConfigSettingsTranslator:
159
- """Translate ``config_settings`` into distutils-style command arguments.
160
- Only a limited number of options is currently supported.
161
- """
162
- # See pypa/setuptools#1928 pypa/setuptools#2491
163
-
164
- def _get_config(self, key: str, config_settings: _ConfigSettings) -> List[str]:
165
- """
166
- Get the value of a specific key in ``config_settings`` as a list of strings.
167
-
168
- >>> fn = _ConfigSettingsTranslator()._get_config
169
- >>> fn("--global-option", None)
170
- []
171
- >>> fn("--global-option", {})
172
- []
173
- >>> fn("--global-option", {'--global-option': 'foo'})
174
- ['foo']
175
- >>> fn("--global-option", {'--global-option': ['foo']})
176
- ['foo']
177
- >>> fn("--global-option", {'--global-option': 'foo'})
178
- ['foo']
179
- >>> fn("--global-option", {'--global-option': 'foo bar'})
180
- ['foo', 'bar']
181
- """
182
- cfg = config_settings or {}
183
- opts = cfg.get(key) or []
184
- return shlex.split(opts) if isinstance(opts, str) else opts
185
-
186
- def _valid_global_options(self):
187
- """Global options accepted by setuptools (e.g. quiet or verbose)."""
188
- options = (opt[:2] for opt in setuptools.dist.Distribution.global_options)
189
- return {flag for long_and_short in options for flag in long_and_short if flag}
190
-
191
- def _global_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
192
- """
193
- Let the user specify ``verbose`` or ``quiet`` + escape hatch via
194
- ``--global-option``.
195
- Note: ``-v``, ``-vv``, ``-vvv`` have similar effects in setuptools,
196
- so we just have to cover the basic scenario ``-v``.
197
-
198
- >>> fn = _ConfigSettingsTranslator()._global_args
199
- >>> list(fn(None))
200
- []
201
- >>> list(fn({"verbose": "False"}))
202
- ['-q']
203
- >>> list(fn({"verbose": "1"}))
204
- ['-v']
205
- >>> list(fn({"--verbose": None}))
206
- ['-v']
207
- >>> list(fn({"verbose": "true", "--global-option": "-q --no-user-cfg"}))
208
- ['-v', '-q', '--no-user-cfg']
209
- >>> list(fn({"--quiet": None}))
210
- ['-q']
211
- """
212
- cfg = config_settings or {}
213
- falsey = {"false", "no", "0", "off"}
214
- if "verbose" in cfg or "--verbose" in cfg:
215
- level = str(cfg.get("verbose") or cfg.get("--verbose") or "1")
216
- yield ("-q" if level.lower() in falsey else "-v")
217
- if "quiet" in cfg or "--quiet" in cfg:
218
- level = str(cfg.get("quiet") or cfg.get("--quiet") or "1")
219
- yield ("-v" if level.lower() in falsey else "-q")
220
-
221
- valid = self._valid_global_options()
222
- args = self._get_config("--global-option", config_settings)
223
- yield from (arg for arg in args if arg.strip("-") in valid)
224
-
225
- def __dist_info_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
226
- """
227
- The ``dist_info`` command accepts ``tag-date`` and ``tag-build``.
228
-
229
- .. warning::
230
- We cannot use this yet as it requires the ``sdist`` and ``bdist_wheel``
231
- commands run in ``build_sdist`` and ``build_wheel`` to re-use the egg-info
232
- directory created in ``prepare_metadata_for_build_wheel``.
233
-
234
- >>> fn = _ConfigSettingsTranslator()._ConfigSettingsTranslator__dist_info_args
235
- >>> list(fn(None))
236
- []
237
- >>> list(fn({"tag-date": "False"}))
238
- ['--no-date']
239
- >>> list(fn({"tag-date": None}))
240
- ['--no-date']
241
- >>> list(fn({"tag-date": "true", "tag-build": ".a"}))
242
- ['--tag-date', '--tag-build', '.a']
243
- """
244
- cfg = config_settings or {}
245
- if "tag-date" in cfg:
246
- val = strtobool(str(cfg["tag-date"] or "false"))
247
- yield ("--tag-date" if val else "--no-date")
248
- if "tag-build" in cfg:
249
- yield from ["--tag-build", str(cfg["tag-build"])]
250
-
251
- def _editable_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
252
- """
253
- The ``editable_wheel`` command accepts ``editable-mode=strict``.
254
-
255
- >>> fn = _ConfigSettingsTranslator()._editable_args
256
- >>> list(fn(None))
257
- []
258
- >>> list(fn({"editable-mode": "strict"}))
259
- ['--mode', 'strict']
260
- """
261
- cfg = config_settings or {}
262
- mode = cfg.get("editable-mode") or cfg.get("editable_mode")
263
- if not mode:
264
- return
265
- yield from ["--mode", str(mode)]
266
-
267
- def _arbitrary_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
268
- """
269
- Users may expect to pass arbitrary lists of arguments to a command
270
- via "--global-option" (example provided in PEP 517 of a "escape hatch").
271
-
272
- >>> fn = _ConfigSettingsTranslator()._arbitrary_args
273
- >>> list(fn(None))
274
- []
275
- >>> list(fn({}))
276
- []
277
- >>> list(fn({'--build-option': 'foo'}))
278
- ['foo']
279
- >>> list(fn({'--build-option': ['foo']}))
280
- ['foo']
281
- >>> list(fn({'--build-option': 'foo'}))
282
- ['foo']
283
- >>> list(fn({'--build-option': 'foo bar'}))
284
- ['foo', 'bar']
285
- >>> warnings.simplefilter('error', SetuptoolsDeprecationWarning)
286
- >>> list(fn({'--global-option': 'foo'})) # doctest: +IGNORE_EXCEPTION_DETAIL
287
- Traceback (most recent call last):
288
- SetuptoolsDeprecationWarning: ...arguments given via `--global-option`...
289
- """
290
- args = self._get_config("--global-option", config_settings)
291
- global_opts = self._valid_global_options()
292
- bad_args = []
293
-
294
- for arg in args:
295
- if arg.strip("-") not in global_opts:
296
- bad_args.append(arg)
297
- yield arg
298
-
299
- yield from self._get_config("--build-option", config_settings)
300
-
301
- if bad_args:
302
- msg = f"""
303
- The arguments {bad_args!r} were given via `--global-option`.
304
- Please use `--build-option` instead,
305
- `--global-option` is reserved to flags like `--verbose` or `--quiet`.
306
- """
307
- warnings.warn(msg, SetuptoolsDeprecationWarning)
308
-
309
-
310
- class _BuildMetaBackend(_ConfigSettingsTranslator):
311
- def _get_build_requires(self, config_settings, requirements):
312
- sys.argv = [
313
- *sys.argv[:1],
314
- *self._global_args(config_settings),
315
- "egg_info",
316
- *self._arbitrary_args(config_settings),
317
- ]
318
- try:
319
- with Distribution.patch():
320
- self.run_setup()
321
- except SetupRequirementsError as e:
322
- requirements += e.specifiers
323
-
324
- return requirements
325
-
326
- def run_setup(self, setup_script='setup.py'):
327
- # Note that we can reuse our build directory between calls
328
- # Correctness comes first, then optimization later
329
- __file__ = setup_script
330
- __name__ = '__main__'
331
-
332
- with _open_setup_script(__file__) as f:
333
- code = f.read().replace(r'\r\n', r'\n')
334
-
335
- exec(code, locals())
336
-
337
- def get_requires_for_build_wheel(self, config_settings=None):
338
- return self._get_build_requires(config_settings, requirements=['wheel'])
339
-
340
- def get_requires_for_build_sdist(self, config_settings=None):
341
- return self._get_build_requires(config_settings, requirements=[])
342
-
343
- def _bubble_up_info_directory(self, metadata_directory: str, suffix: str) -> str:
344
- """
345
- PEP 517 requires that the .dist-info directory be placed in the
346
- metadata_directory. To comply, we MUST copy the directory to the root.
347
-
348
- Returns the basename of the info directory, e.g. `proj-0.0.0.dist-info`.
349
- """
350
- info_dir = self._find_info_directory(metadata_directory, suffix)
351
- if not same_path(info_dir.parent, metadata_directory):
352
- shutil.move(str(info_dir), metadata_directory)
353
- # PEP 517 allow other files and dirs to exist in metadata_directory
354
- return info_dir.name
355
-
356
- def _find_info_directory(self, metadata_directory: str, suffix: str) -> Path:
357
- for parent, dirs, _ in os.walk(metadata_directory):
358
- candidates = [f for f in dirs if f.endswith(suffix)]
359
-
360
- if len(candidates) != 0 or len(dirs) != 1:
361
- assert len(candidates) == 1, f"Multiple {suffix} directories found"
362
- return Path(parent, candidates[0])
363
-
364
- msg = f"No {suffix} directory found in {metadata_directory}"
365
- raise errors.InternalError(msg)
366
-
367
- def prepare_metadata_for_build_wheel(self, metadata_directory,
368
- config_settings=None):
369
- sys.argv = [
370
- *sys.argv[:1],
371
- *self._global_args(config_settings),
372
- "dist_info",
373
- "--output-dir", metadata_directory,
374
- "--keep-egg-info",
375
- ]
376
- with no_install_setup_requires():
377
- self.run_setup()
378
-
379
- self._bubble_up_info_directory(metadata_directory, ".egg-info")
380
- return self._bubble_up_info_directory(metadata_directory, ".dist-info")
381
-
382
- def _build_with_temp_dir(self, setup_command, result_extension,
383
- result_directory, config_settings):
384
- result_directory = os.path.abspath(result_directory)
385
-
386
- # Build in a temporary directory, then copy to the target.
387
- os.makedirs(result_directory, exist_ok=True)
388
- with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
389
- sys.argv = [
390
- *sys.argv[:1],
391
- *self._global_args(config_settings),
392
- *setup_command,
393
- "--dist-dir", tmp_dist_dir,
394
- *self._arbitrary_args(config_settings),
395
- ]
396
- with no_install_setup_requires():
397
- self.run_setup()
398
-
399
- result_basename = _file_with_extension(
400
- tmp_dist_dir, result_extension)
401
- result_path = os.path.join(result_directory, result_basename)
402
- if os.path.exists(result_path):
403
- # os.rename will fail overwriting on non-Unix.
404
- os.remove(result_path)
405
- os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
406
-
407
- return result_basename
408
-
409
- def build_wheel(self, wheel_directory, config_settings=None,
410
- metadata_directory=None):
411
- with suppress_known_deprecation():
412
- return self._build_with_temp_dir(['bdist_wheel'], '.whl',
413
- wheel_directory, config_settings)
414
-
415
- def build_sdist(self, sdist_directory, config_settings=None):
416
- return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
417
- '.tar.gz', sdist_directory,
418
- config_settings)
419
-
420
- def _get_dist_info_dir(self, metadata_directory: Optional[str]) -> Optional[str]:
421
- if not metadata_directory:
422
- return None
423
- dist_info_candidates = list(Path(metadata_directory).glob("*.dist-info"))
424
- assert len(dist_info_candidates) <= 1
425
- return str(dist_info_candidates[0]) if dist_info_candidates else None
426
-
427
- if not LEGACY_EDITABLE:
428
-
429
- # PEP660 hooks:
430
- # build_editable
431
- # get_requires_for_build_editable
432
- # prepare_metadata_for_build_editable
433
- def build_editable(
434
- self, wheel_directory, config_settings=None, metadata_directory=None
435
- ):
436
- # XXX can or should we hide our editable_wheel command normally?
437
- info_dir = self._get_dist_info_dir(metadata_directory)
438
- opts = ["--dist-info-dir", info_dir] if info_dir else []
439
- cmd = ["editable_wheel", *opts, *self._editable_args(config_settings)]
440
- with suppress_known_deprecation():
441
- return self._build_with_temp_dir(
442
- cmd, ".whl", wheel_directory, config_settings
443
- )
444
-
445
- def get_requires_for_build_editable(self, config_settings=None):
446
- return self.get_requires_for_build_wheel(config_settings)
447
-
448
- def prepare_metadata_for_build_editable(self, metadata_directory,
449
- config_settings=None):
450
- return self.prepare_metadata_for_build_wheel(
451
- metadata_directory, config_settings
452
- )
453
-
454
-
455
- class _BuildMetaLegacyBackend(_BuildMetaBackend):
456
- """Compatibility backend for setuptools
457
-
458
- This is a version of setuptools.build_meta that endeavors
459
- to maintain backwards
460
- compatibility with pre-PEP 517 modes of invocation. It
461
- exists as a temporary
462
- bridge between the old packaging mechanism and the new
463
- packaging mechanism,
464
- and will eventually be removed.
465
- """
466
- def run_setup(self, setup_script='setup.py'):
467
- # In order to maintain compatibility with scripts assuming that
468
- # the setup.py script is in a directory on the PYTHONPATH, inject
469
- # '' into sys.path. (pypa/setuptools#1642)
470
- sys_path = list(sys.path) # Save the original path
471
-
472
- script_dir = os.path.dirname(os.path.abspath(setup_script))
473
- if script_dir not in sys.path:
474
- sys.path.insert(0, script_dir)
475
-
476
- # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
477
- # get the directory of the source code. They expect it to refer to the
478
- # setup.py script.
479
- sys_argv_0 = sys.argv[0]
480
- sys.argv[0] = setup_script
481
-
482
- try:
483
- super(_BuildMetaLegacyBackend,
484
- self).run_setup(setup_script=setup_script)
485
- finally:
486
- # While PEP 517 frontends should be calling each hook in a fresh
487
- # subprocess according to the standard (and thus it should not be
488
- # strictly necessary to restore the old sys.path), we'll restore
489
- # the original path so that the path manipulation does not persist
490
- # within the hook after run_setup is called.
491
- sys.path[:] = sys_path
492
- sys.argv[0] = sys_argv_0
493
-
494
-
495
- # The primary backend
496
- _BACKEND = _BuildMetaBackend()
497
-
498
- get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
499
- get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
500
- prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
501
- build_wheel = _BACKEND.build_wheel
502
- build_sdist = _BACKEND.build_sdist
503
-
504
- if not LEGACY_EDITABLE:
505
- get_requires_for_build_editable = _BACKEND.get_requires_for_build_editable
506
- prepare_metadata_for_build_editable = _BACKEND.prepare_metadata_for_build_editable
507
- build_editable = _BACKEND.build_editable
508
-
509
-
510
- # The legacy backend
511
- __legacy__ = _BuildMetaLegacyBackend()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/backbone/__init__.py DELETED
@@ -1,17 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip
3
-
4
- from .backbone import Backbone
5
- from .fpn import FPN
6
- from .regnet import RegNet
7
- from .resnet import (
8
- BasicStem,
9
- ResNet,
10
- ResNetBlockBase,
11
- build_resnet_backbone,
12
- make_stage,
13
- BottleneckBlock,
14
- )
15
-
16
- __all__ = [k for k in globals().keys() if not k.startswith("_")]
17
- # TODO can expose more resnet blocks after careful consideration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/8 Bola Piscina 5.12.0 Apk Descargar.md DELETED
@@ -1,109 +0,0 @@
1
- <br />
2
- <h1>8 Piscina de bolas 5.12.0 APK Descargar: Cómo jugar el juego de billar más grande del mundo en su dispositivo Android</h1>
3
- <p>Si eres un fan de los juegos de billar, debes haber oído hablar de <strong>8 Ball Pool</strong>, el juego de billar más popular y adictivo del mundo. Desarrollado por Miniclip, este juego le permite jugar en línea o fuera de línea con millones de jugadores de diferentes países, competir en torneos, ganar monedas y artículos exclusivos, y personalizar su señal y mesa. Si eres un principiante o un profesional, encontrarás 8 Ball Pool desafiante y divertido. </p>
4
- <h2>8 bola piscina 5.12.0 apk descargar</h2><br /><p><b><b>Download Zip</b> &#10031; <a href="https://bltlly.com/2v6JB8">https://bltlly.com/2v6JB8</a></b></p><br /><br />
5
- <p>En este artículo, te diremos todo lo que necesitas saber sobre <strong>8 Ball Pool 5.12.0 APK</strong>, la última versión del juego que fue lanzado el 20 de junio de 2023. Te mostraremos cómo descargarlo e instalarlo en tu dispositivo Android, cómo jugarlo online o offline, y cómo mejorar tus habilidades y ganar más partidos con algunos consejos y trucos. </p>
6
- <h2>¿Qué es la piscina de bolas 8? </h2>
7
- <h3>Una breve introducción al juego y sus características</h3>
8
- <p>8 Ball Pool es un juego de billar basado en física de billar 3D real, donde puedes jugar contra tus amigos u otros jugadores en línea en diferentes modos, como partidos 1-a-1, torneos o minijuegos. También puede jugar sin conexión en el modo de práctica o contra el ordenador. </p>
9
- <p>El juego tiene un sistema de niveles que te empareja con jugadores de nivel de habilidad similar, y un sistema de clasificación que muestra tu progreso en la clasificación global. También puede unirse a clubes y chatear con otros miembros, o crear su propio club e invitar a sus amigos. </p>
10
- <p>El juego tiene una variedad de pistas y mesas que se puede desbloquear o comprar con monedas o dinero en efectivo, las monedas del juego. También puede obtener monedas o dinero en efectivo girando la rueda, viendo videos, completando ofertas o comprándolas con dinero real. Las monedas y el dinero en efectivo se pueden usar para ingresar partidas de apuestas más altas, comprar artículos en la tienda de la piscina o actualizar sus señales. </p>
11
- <h3>La última versión 5.12.0 y lo nuevo en ella</h3>
12
-
13
- <ul>
14
- <li>Una nueva temporada con nuevas recompensas y desafíos</li>
15
- <li>Una nueva función que te permite jugar por el cambio y donar a la Global Gift Foundation</li>
16
- <li> Algunos ajustes y correcciones de errores que hacen el juego más suave y más estable</li>
17
- </ul>
18
- <p>Para disfrutar de estas nuevas características, es necesario descargar e instalar la última versión de 8 Ball Pool en su dispositivo Android. </p>
19
- <h2> Cómo descargar e instalar 8 bola piscina 5.12.0 APK en su dispositivo Android</h2>
20
- <h3>Los pasos para descargar e instalar el archivo APK desde una fuente de confianza</h3>
21
- <p>Si desea descargar e instalar 8 Ball Pool 5.12.0 APK en su dispositivo Android, es necesario seguir estos pasos:</p>
22
- <p></p>
23
- <ol>
24
- <li>Ir a un sitio web de confianza que proporciona archivos APK para aplicaciones de Android, tales como [Soft onic] o [APKPure]. </li>
25
- <li>Búsqueda de 8 Ball Pool 5.12.0 APK y descargarlo en su dispositivo. Asegúrese de que el tamaño del archivo es de aproximadamente 75 MB y el nombre del archivo es com.miniclip.eightballpool_5.12.0-2410_minAPI16(armeabi-v7a,x86)(nodpi)_apkmirror.com.apk. </li>
26
- <li>Antes de instalar el archivo APK, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
27
- <li>Busque el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla y espere a que termine la instalación. </li>
28
- <li>Una vez que se realiza la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio y disfrutar de jugar 8 Ball Pool 5.12.0 APK en su dispositivo Android. </li>
29
- </ol>
30
- <h3>Los permisos y requisitos para la aplicación</h3>
31
- <p>Antes de instalar 8 Ball Pool 5.12.0 APK en su dispositivo Android, usted debe ser consciente de los permisos y requisitos para la aplicación. La aplicación requiere los siguientes permisos:</p>
32
- <ul>
33
- <li> Acceso al almacenamiento, cámara, micrófono y ubicación de su dispositivo</li>
34
- <li>Acceso a las conexiones de red de su dispositivo, conexiones Wi-Fi y configuración de Bluetooth</li>
35
-
36
- <li>Acceso a la vibración de su dispositivo y evitar que duerma</li>
37
- </ul>
38
- <p>La aplicación también requiere los siguientes requisitos:</p>
39
- <ul>
40
- <li>Un dispositivo Android con Android 4.4 o superior</li>
41
- <li>Una conexión a Internet (Wi-Fi o datos móviles)</li>
42
- <li>Una cuenta de Google para iniciar sesión y sincronizar tu progreso</li>
43
- <li>Una cuenta de Facebook para conectar con tus amigos y retarlos</li>
44
- </ul>
45
- <p>Debes conceder estos permisos y cumplir con estos requisitos para asegurar el correcto funcionamiento de la aplicación y disfrutar de sus características. </p>
46
- <h2>Cómo jugar al billar de 8 bolas en línea o fuera de línea</h2>
47
- <h3>Las reglas y objetivos básicos del juego</h3>
48
- <p>Las reglas y objetivos básicos de 8 Ball Pool son similares a los de los juegos de billar reales. El juego se juega en una mesa de billar con seis bolsillos, una bola blanca y 15 bolas de objetos (siete bolas de color sólido, siete bolas a rayas y una bola negra). </p>
49
- <p>El juego comienza con un tiro de ruptura, donde un jugador golpea la bola blanca con un palo de taco e intenta dispersar las bolas de objeto en la mesa. El jugador que embolsa una pelota en el tiro de descanso puede optar por jugar ya sea sólidos o rayas, dependiendo del tipo de pelota que embolsó. El otro jugador tiene que jugar el tipo opuesto. </p>
50
- <p>El objetivo del juego es meter todas tus bolas (ya sean sólidas o rayas) antes que tu oponente, y luego meter la bola negra (también conocida como la bola 8) en un bolsillo designado. Tienes que llamar al bolsillo por cada tiro que haces, excepto por el tiro de ruptura. Si te embolsas una pelota en un bolsillo diferente al que pagaste, o si te embolsas la pelota de tu oponente o la bola blanca, es una falta y pierdes tu turno. </p>
51
- <p>También puedes perder el juego si cometes cualquiera de estas faltas:</p>
52
- <ul>
53
- <li>Embolsar la bola 8 antes de limpiar sus bolas</li>
54
- <li>Embolsar la bola 8 en un bolsillo diferente al que llamaste</li>
55
- <li>Embolsar la bola 8 y la bola blanca en el mismo tiro</li>
56
-
57
- </ul>
58
- <p>El juego termina cuando un jugador se embolsa la bola 8 legalmente o cuando un jugador pierde o se desconecta del juego. </p>
59
- <h3>Los diferentes modos, tablas, señales y elementos disponibles en el juego</h3>
60
- <p>8 Ball Pool tiene diferentes modos que puedes jugar online o offline, como:</p>
61
- <ul>
62
- <li><strong>1-on-1 partidos:</strong> Puede jugar contra otro jugador en línea en un partido al azar o un partido de amigos. Puede elegir entre diferentes tablas con diferentes cuotas de entrada y recompensas. Cuanto mayor sea la cuota de entrada, mayor será la recompensa. </li>
63
- <li><strong>Torneos:</strong> Puedes jugar contra otros siete jugadores en línea en un formato eliminatorio. Tienes que ganar cuatro partidos seguidos para ganar el torneo y obtener un gran premio. </li>
64
- <li><strong>Minijuegos:</strong> Puedes jugar algunos minijuegos para ganar monedas, dinero, tacos u otros objetos. Algunos de los minijuegos son Spin & Win, Scratch & Win, Hi-Lo, Lucky Shot y Cajas sorpresa.</li>
65
- <li><strong>Modo de práctica:</strong> Puedes jugar sin conexión contra la computadora o por ti mismo para practicar tus habilidades y probar diferentes disparos. </li>
66
- </ul>
67
- <p>El juego también tiene una variedad de pistas y mesas que puedes desbloquear o comprar con monedas o dinero en efectivo. Cada señal tiene diferentes atributos, como poder, puntería, efectos y tiempo. También puedes actualizar tus señales para mejorar sus atributos. Cada mesa tiene diferentes diseños, tamaños y reglas. También puede personalizar su mesa con diferentes telas, patrones y calcomanías. </p>
68
- <p>El juego también tiene una tienda de billar donde puedes comprar objetos como monedas, dinero en efectivo, tacos, mesas, paquetes de chat, avatares y señales de suerte. También puedes obtener algunos artículos gratis viendo videos, completando ofertas o invitando a amigos. </p>
69
- <h3>Los consejos y trucos para mejorar tus habilidades y ganar más partidos</h3>
70
- <p>Si quieres mejorar tus habilidades y ganar más partidos en 8 Ball Pool, debes seguir estos consejos y trucos:</p>
71
- <ul>
72
-
73
- <li><strong>Planificar con antelación:</strong> El juego requiere estrategia y previsión. Debes planificar con anticipación y pensar en qué bolas quieres meter primero, qué bolsillos quieres usar y cómo quieres colocar la bola blanca para el siguiente tiro. También debes evitar dejar tiros fáciles para tu oponente o bloquear tus propias bolas. </li>
74
- <li><strong>Usa spin:</strong> El juego te permite aplicar spin a la bola cue tocando el icono de spin y moviéndolo. Puede utilizar el giro para cambiar la dirección de la bola blanca después de que golpea una bola de objeto o un cojín. Puedes usar el giro para evitar rasguños, salir de situaciones difíciles o preparar mejores fotos. </li>
75
- <li><strong>Practicar offline:</strong> El juego tiene un modo de práctica donde puedes jugar offline contra el ordenador o por ti mismo. Puede utilizar este modo para practicar sus habilidades y probar diferentes disparos sin arriesgar sus monedas o clasificación. También puede aprender de los movimientos y errores de la computadora. </li>
76
- <li><strong>Ver vídeos:</strong> El juego tiene una sección de vídeo donde puedes ver vídeos de partidos o tutoriales de otros jugadores. Puedes usar estos videos para aprender de sus estrategias, técnicas y consejos. También puedes obtener algunas monedas u objetos viendo algunos videos. </li>
77
- </ul>
78
- <h2>Conclusión</h2>
79
- <h3>Un resumen de los principales puntos y beneficios de jugar 8 Ball Pool 5.12.0 APK en su dispositivo Android</h3>
80
- <p>Para resumir, 8 Ball Pool es un juego de billar que te permite jugar online o offline con millones de jugadores de diferentes países, competir en torneos, ganar monedas y artículos exclusivos, y personalizar tu señal y mesa. Se basa en la física real del pool 3D y tiene gráficos y sonidos realistas. </p>
81
-
82
- <p>Para jugar 8 Ball Pool 5.12.0 APK en su dispositivo Android, es necesario descargar e instalar desde un sitio web de confianza que proporciona archivos APK para aplicaciones Android. También debe habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo, conceder los permisos y cumplir los requisitos para la aplicación, e iniciar sesión con su cuenta de Google o Facebook. </p>
83
- <p>Jugar 8 Ball Pool 5.12.0 APK en su dispositivo Android tiene muchos beneficios, tales como:</p>
84
- <ul>
85
- <li> Usted puede disfrutar de jugar al billar en cualquier momento y en cualquier lugar con su dispositivo Android</li>
86
- <li>Puedes desafiar a tus amigos u otros jugadores en línea en diferentes modos</li>
87
- <li>Puedes mejorar tus habilidades y posicionarte en la clasificación global</li>
88
- <li>Puedes desbloquear o comprar varias pistas y tablas que se adapten a tu estilo</li>
89
- <li>Puedes ganar monedas y artículos exclusivos que mejoran tu juego</li>
90
- <li> Usted puede divertirse y relajarse con una experiencia de piscina realista</li>
91
- </ul>
92
- <h3>Una llamada a la acción para descargar y jugar el juego ahora</h3>
93
- <p>Si usted está interesado en jugar 8 Ball Pool 5.12.0 APK en su dispositivo Android, ¿qué estás esperando? Descárgalo e instálalo desde un sitio web de confianza que proporciona archivos APK para aplicaciones de Android, como [Softonic] o [APKPure]. Siga los pasos que hemos proporcionado anteriormente y disfrutar de jugar el juego de billar más grande del mundo en su dispositivo Android. Usted no se arrepentirá, como 8 Ball Pool 5.12.0 APK es un juego que le mantendrá entretenido y desafiado durante horas. También tendrá la oportunidad de jugar por el cambio y donar a la Global Gift Foundation, una organización benéfica que apoya a niños y familias necesitadas de todo el mundo. Así que, ¿qué estás esperando? Descargar y jugar 8 Ball Pool 5.12.0 APK ahora y unirse a los millones de jugadores que aman este juego. Usted tendrá una explosión! <h2>Preguntas frecuentes</h2>
94
- <h3>Algunas preguntas y respuestas comunes sobre el juego y el archivo APK</h3>
95
- <p>Aquí hay algunas preguntas y respuestas comunes que usted podría tener acerca de 8 Piscina de bolas 5.12.0 APK:</p>
96
- <ol>
97
-
98
- <p>Sí, 8 Ball Pool 5.12.0 APK es seguro para descargar e instalar, siempre y cuando lo obtenga de un sitio web de confianza que proporciona archivos APK para aplicaciones Android, como [Softonic] o [APKPure]. Estos sitios web escanean los archivos APK en busca de virus y malware antes de cargarlos, para que pueda estar seguro de que están limpios y seguros. </p>
99
- <li><strong>Es 8 bola piscina 5.12.0 APK libre para jugar? </strong></li>
100
- <p>Sí, 8 Ball Pool 5.12.0 APK es libre de jugar, pero también tiene algunas compras en la aplicación que se puede hacer con dinero real si desea obtener más monedas, dinero en efectivo, señales, o elementos en el juego. Sin embargo, estas compras son opcionales y no necesarias para disfrutar del juego. </p>
101
- <li><strong>¿Puedo jugar 8 bola piscina 5.12.0 APK fuera de línea? </strong></li>
102
- <p>Sí, puede jugar 8 Ball Pool 5.12.0 APK fuera de línea en el modo de práctica o contra el ordenador, pero no podrá jugar en línea con otros jugadores o acceder a algunas características que requieren una conexión a Internet, como torneos, mini-juegos o clubes. </p>
103
- <li><strong>¿Puedo jugar 8 bola piscina 5.12.0 APK en otros dispositivos? </strong></li>
104
- <p>Sí, puede jugar 8 Ball Pool 5.12.0 APK en otros dispositivos además de su dispositivo Android, como su PC, Mac, dispositivo iOS o teléfono de Windows. Solo tienes que descargar e instalar la versión apropiada del juego para tu dispositivo desde el sitio web oficial de Miniclip o la tienda de aplicaciones de tu dispositivo. </p>
105
- <li><strong>¿Cómo puedo contactar al equipo de soporte de 8 Ball Pool? </strong></li>
106
- <p>Si tiene algún problema o pregunta sobre 8 Ball Pool, puede ponerse en contacto con el equipo de soporte de Miniclip llenando un formulario en su sitio web o enviando un correo electrónico a [email protected]. También puede visitar su centro de ayuda o su página de Facebook para obtener más información y actualizaciones. </p>
107
- </ol></p> 64aa2da5cf<br />
108
- <br />
109
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/vite.config.ts DELETED
@@ -1,12 +0,0 @@
1
- import { sveltekit } from "@sveltejs/kit/vite";
2
- import { defineConfig } from "vite";
3
- import Icons from "unplugin-icons/vite";
4
-
5
- export default defineConfig({
6
- plugins: [
7
- sveltekit(),
8
- Icons({
9
- compiler: "svelte",
10
- }),
11
- ],
12
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/app.py DELETED
@@ -1,79 +0,0 @@
1
- import banana_dev as banana
2
- import base64
3
- from io import BytesIO
4
- from PIL import Image
5
- import gradio as gr
6
- import os
7
- # import boto3
8
-
9
- # model_key = os.environ.get("model_key")
10
- # api_key = os.environ.get("api_key")
11
- # aws_access_key_id = os.environ.get("aws_access_key_id")
12
- # aws_secret_access_key = os.environ.get("aws_secret_access_key")
13
-
14
- # #Create a session using AWS credentials
15
- # session = boto3.Session(aws_access_key_id, aws_secret_access_key)
16
-
17
- # #Create an S3 resource object using the session
18
- # s3 = session.resource('s3')
19
-
20
- # #Select your bucket
21
- # bucket = s3.Bucket('bwlmonet')
22
-
23
- model_inputs = {
24
- "endpoint": "txt2img",
25
- "params": {
26
- "prompt": "",
27
- "negative_prompt": "",
28
- "steps": 25,
29
- "sampler_name": "Euler a",
30
- "cfg_scale": 7.5,
31
- "seed": 42,
32
- "batch_size": 1,
33
- "n_iter": 1,
34
- "width": 768,
35
- "height": 768,
36
- "tiling": False
37
- }
38
- }
39
-
40
- # for obj in bucket.objects.all():
41
- # print(obj.key)
42
-
43
- def stable_diffusion_txt2img(prompt, api_key, model_key, model_inputs):
44
- # Update the model_inputs with the provided prompt
45
- model_inputs["params"]["prompt"] = prompt
46
-
47
- # Run the model
48
- out = banana.run(api_key, model_key, model_inputs)
49
-
50
- # Process the output
51
- image_byte_string = out["modelOutputs"][0]["images"]
52
- image_encoded = image_byte_string[0].encode("utf-8")
53
- image_bytes = BytesIO(base64.b64decode(image_encoded))
54
- image = Image.open(image_bytes)
55
-
56
- # Save image to S3
57
- # key = f"{prompt}.png"
58
- # image.save(key)
59
- # with open(key, "rb") as data:
60
- # bucket.put_object(Key=key, Body=data)
61
-
62
- # for obj in bucket.objects.all():
63
- # print(obj.key)
64
-
65
- return image
66
-
67
- # Gradio Interface
68
- def generator(prompt):
69
- return stable_diffusion_txt2img(prompt, api_key, model_key, model_inputs), stable_diffusion_txt2img(prompt, api_key, model_key, model_inputs)
70
-
71
- with gr.Blocks() as demo:
72
- prompt = gr.Textbox(label="Prompt")
73
- submit = gr.Button(label="Generate")
74
- image1 = gr.Image()
75
- image2 = gr.Image()
76
-
77
- submit.click(generator, inputs=[prompt], outputs=[image1, image2], api_name="mmsd")
78
-
79
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/__init__.py DELETED
@@ -1,111 +0,0 @@
1
- # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- import logging
15
-
16
- from boto3.compat import _warn_deprecated_python
17
- from boto3.session import Session
18
-
19
- __author__ = 'Amazon Web Services'
20
- __version__ = '1.26.132'
21
-
22
-
23
- # The default Boto3 session; autoloaded when needed.
24
- DEFAULT_SESSION = None
25
-
26
-
27
- def setup_default_session(**kwargs):
28
- """
29
- Set up a default session, passing through any parameters to the session
30
- constructor. There is no need to call this unless you wish to pass custom
31
- parameters, because a default session will be created for you.
32
- """
33
- global DEFAULT_SESSION
34
- DEFAULT_SESSION = Session(**kwargs)
35
-
36
-
37
- def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
38
- """
39
- Add a stream handler for the given name and level to the logging module.
40
- By default, this logs all boto3 messages to ``stdout``.
41
-
42
- >>> import boto3
43
- >>> boto3.set_stream_logger('boto3.resources', logging.INFO)
44
-
45
- For debugging purposes a good choice is to set the stream logger to ``''``
46
- which is equivalent to saying "log everything".
47
-
48
- .. WARNING::
49
- Be aware that when logging anything from ``'botocore'`` the full wire
50
- trace will appear in your logs. If your payloads contain sensitive data
51
- this should not be used in production.
52
-
53
- :type name: string
54
- :param name: Log name
55
- :type level: int
56
- :param level: Logging level, e.g. ``logging.INFO``
57
- :type format_string: str
58
- :param format_string: Log message format
59
- """
60
- if format_string is None:
61
- format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
62
-
63
- logger = logging.getLogger(name)
64
- logger.setLevel(level)
65
- handler = logging.StreamHandler()
66
- handler.setLevel(level)
67
- formatter = logging.Formatter(format_string)
68
- handler.setFormatter(formatter)
69
- logger.addHandler(handler)
70
-
71
-
72
- def _get_default_session():
73
- """
74
- Get the default session, creating one if needed.
75
-
76
- :rtype: :py:class:`~boto3.session.Session`
77
- :return: The default session
78
- """
79
- if DEFAULT_SESSION is None:
80
- setup_default_session()
81
- _warn_deprecated_python()
82
-
83
- return DEFAULT_SESSION
84
-
85
-
86
- def client(*args, **kwargs):
87
- """
88
- Create a low-level service client by name using the default session.
89
-
90
- See :py:meth:`boto3.session.Session.client`.
91
- """
92
- return _get_default_session().client(*args, **kwargs)
93
-
94
-
95
- def resource(*args, **kwargs):
96
- """
97
- Create a resource service client by name using the default session.
98
-
99
- See :py:meth:`boto3.session.Session.resource`.
100
- """
101
- return _get_default_session().resource(*args, **kwargs)
102
-
103
-
104
- # Set up logging to ``/dev/null`` like a library is supposed to.
105
- # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
106
- class NullHandler(logging.Handler):
107
- def emit(self, record):
108
- pass
109
-
110
-
111
- logging.getLogger('boto3').addHandler(NullHandler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CShorten/Last-Week-on-ArXiv/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Last Week On ArXiv
3
- emoji: 🐢
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.0.9
8
- app_file: app.py
9
- pinned: false
10
- license: afl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/BigDL-Nano_inference/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: BigDL-Nano Inference Demo
3
- emoji: 🦄
4
- colorFrom: yellow
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.0.13
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_rpn.py DELETED
@@ -1,228 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.config import get_cfg
7
- from detectron2.modeling.backbone import build_backbone
8
- from detectron2.modeling.proposal_generator.build import build_proposal_generator
9
- from detectron2.modeling.proposal_generator.rpn_outputs import find_top_rpn_proposals
10
- from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
11
- from detectron2.utils.events import EventStorage
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class RPNTest(unittest.TestCase):
17
- def test_rpn(self):
18
- torch.manual_seed(121)
19
- cfg = get_cfg()
20
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
21
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
22
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1)
23
- backbone = build_backbone(cfg)
24
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
25
- num_images = 2
26
- images_tensor = torch.rand(num_images, 20, 30)
27
- image_sizes = [(10, 10), (20, 30)]
28
- images = ImageList(images_tensor, image_sizes)
29
- image_shape = (15, 15)
30
- num_channels = 1024
31
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
32
- gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
33
- gt_instances = Instances(image_shape)
34
- gt_instances.gt_boxes = Boxes(gt_boxes)
35
- with EventStorage(): # capture events in a new storage to discard them
36
- proposals, proposal_losses = proposal_generator(
37
- images, features, [gt_instances[0], gt_instances[1]]
38
- )
39
-
40
- expected_losses = {
41
- "loss_rpn_cls": torch.tensor(0.0804563984),
42
- "loss_rpn_loc": torch.tensor(0.0990132466),
43
- }
44
- for name in expected_losses.keys():
45
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]))
46
-
47
- expected_proposal_boxes = [
48
- Boxes(torch.tensor([[0, 0, 10, 10], [7.3365392685, 0, 10, 10]])),
49
- Boxes(
50
- torch.tensor(
51
- [
52
- [0, 0, 30, 20],
53
- [0, 0, 16.7862777710, 13.1362524033],
54
- [0, 0, 30, 13.3173446655],
55
- [0, 0, 10.8602609634, 20],
56
- [7.7165775299, 0, 27.3875980377, 20],
57
- ]
58
- )
59
- ),
60
- ]
61
-
62
- expected_objectness_logits = [
63
- torch.tensor([0.1225359365, -0.0133192837]),
64
- torch.tensor([0.1415634006, 0.0989848152, 0.0565387346, -0.0072308783, -0.0428492837]),
65
- ]
66
-
67
- for proposal, expected_proposal_box, im_size, expected_objectness_logit in zip(
68
- proposals, expected_proposal_boxes, image_sizes, expected_objectness_logits
69
- ):
70
- self.assertEqual(len(proposal), len(expected_proposal_box))
71
- self.assertEqual(proposal.image_size, im_size)
72
- self.assertTrue(
73
- torch.allclose(proposal.proposal_boxes.tensor, expected_proposal_box.tensor)
74
- )
75
- self.assertTrue(torch.allclose(proposal.objectness_logits, expected_objectness_logit))
76
-
77
- def test_rrpn(self):
78
- torch.manual_seed(121)
79
- cfg = get_cfg()
80
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
81
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
82
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
83
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
84
- cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
85
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
86
- cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
87
- backbone = build_backbone(cfg)
88
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
89
- num_images = 2
90
- images_tensor = torch.rand(num_images, 20, 30)
91
- image_sizes = [(10, 10), (20, 30)]
92
- images = ImageList(images_tensor, image_sizes)
93
- image_shape = (15, 15)
94
- num_channels = 1024
95
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
96
- gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
97
- gt_instances = Instances(image_shape)
98
- gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
99
- with EventStorage(): # capture events in a new storage to discard them
100
- proposals, proposal_losses = proposal_generator(
101
- images, features, [gt_instances[0], gt_instances[1]]
102
- )
103
-
104
- expected_losses = {
105
- "loss_rpn_cls": torch.tensor(0.0432923734),
106
- "loss_rpn_loc": torch.tensor(0.1552739739),
107
- }
108
- for name in expected_losses.keys():
109
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]))
110
-
111
- expected_proposal_boxes = [
112
- RotatedBoxes(
113
- torch.tensor(
114
- [
115
- [0.60189795, 1.24095452, 61.98131943, 18.03621292, -4.07244873],
116
- [15.64940453, 1.69624567, 59.59749603, 16.34339333, 2.62692475],
117
- [-3.02982378, -2.69752932, 67.90952301, 59.62455750, 59.97010040],
118
- [16.71863365, 1.98309708, 35.61507797, 32.81484985, 62.92267227],
119
- [0.49432933, -7.92979717, 67.77606201, 62.93098450, -1.85656738],
120
- [8.00880814, 1.36017394, 121.81007385, 32.74150467, 50.44297409],
121
- [16.44299889, -4.82221127, 63.39775848, 61.22503662, 54.12270737],
122
- [5.00000000, 5.00000000, 10.00000000, 10.00000000, -0.76943970],
123
- [17.64130402, -0.98095351, 61.40377808, 16.28918839, 55.53118134],
124
- [0.13016054, 4.60568953, 35.80157471, 32.30180359, 62.52872086],
125
- [-4.26460743, 0.39604485, 124.30079651, 31.84611320, -1.58203125],
126
- [7.52815342, -0.91636634, 62.39784622, 15.45565224, 60.79549789],
127
- ]
128
- )
129
- ),
130
- RotatedBoxes(
131
- torch.tensor(
132
- [
133
- [0.07734215, 0.81635046, 65.33510590, 17.34688377, -1.51821899],
134
- [-3.41833067, -3.11320257, 64.17595673, 60.55617905, 58.27033234],
135
- [20.67383385, -6.16561556, 63.60531998, 62.52315903, 54.85546494],
136
- [15.00000000, 10.00000000, 30.00000000, 20.00000000, -0.18218994],
137
- [9.22646523, -6.84775209, 62.09895706, 65.46472931, -2.74307251],
138
- [15.00000000, 4.93451595, 30.00000000, 9.86903191, -0.60272217],
139
- [8.88342094, 2.65560246, 120.95362854, 32.45022202, 55.75970078],
140
- [16.39088631, 2.33887148, 34.78761292, 35.61492920, 60.81977463],
141
- [9.78298569, 10.00000000, 19.56597137, 20.00000000, -0.86660767],
142
- [1.28576660, 5.49873352, 34.93610382, 33.22600174, 60.51599884],
143
- [17.58912468, -1.63270092, 62.96052551, 16.45713997, 52.91245270],
144
- [5.64749718, -1.90428460, 62.37649155, 16.19474792, 61.09543991],
145
- [0.82255805, 2.34931135, 118.83985901, 32.83671188, 56.50753784],
146
- [-5.33874989, 1.64404404, 125.28501892, 33.35424042, -2.80731201],
147
- ]
148
- )
149
- ),
150
- ]
151
-
152
- expected_objectness_logits = [
153
- torch.tensor(
154
- [
155
- 0.10111768,
156
- 0.09112845,
157
- 0.08466332,
158
- 0.07589971,
159
- 0.06650183,
160
- 0.06350251,
161
- 0.04299347,
162
- 0.01864817,
163
- 0.00986163,
164
- 0.00078543,
165
- -0.04573630,
166
- -0.04799230,
167
- ]
168
- ),
169
- torch.tensor(
170
- [
171
- 0.11373727,
172
- 0.09377633,
173
- 0.05281663,
174
- 0.05143715,
175
- 0.04040275,
176
- 0.03250912,
177
- 0.01307789,
178
- 0.01177734,
179
- 0.00038105,
180
- -0.00540255,
181
- -0.01194804,
182
- -0.01461012,
183
- -0.03061717,
184
- -0.03599222,
185
- ]
186
- ),
187
- ]
188
-
189
- torch.set_printoptions(precision=8, sci_mode=False)
190
-
191
- for proposal, expected_proposal_box, im_size, expected_objectness_logit in zip(
192
- proposals, expected_proposal_boxes, image_sizes, expected_objectness_logits
193
- ):
194
- self.assertEqual(len(proposal), len(expected_proposal_box))
195
- self.assertEqual(proposal.image_size, im_size)
196
- # It seems that there's some randomness in the result across different machines:
197
- # This test can be run on a local machine for 100 times with exactly the same result,
198
- # However, a different machine might produce slightly different results,
199
- # thus the atol here.
200
- err_msg = "computed proposal boxes = {}, expected {}".format(
201
- proposal.proposal_boxes.tensor, expected_proposal_box.tensor
202
- )
203
- self.assertTrue(
204
- torch.allclose(
205
- proposal.proposal_boxes.tensor, expected_proposal_box.tensor, atol=1e-5
206
- ),
207
- err_msg,
208
- )
209
-
210
- err_msg = "computed objectness logits = {}, expected {}".format(
211
- proposal.objectness_logits, expected_objectness_logit
212
- )
213
- self.assertTrue(
214
- torch.allclose(proposal.objectness_logits, expected_objectness_logit, atol=1e-5),
215
- err_msg,
216
- )
217
-
218
- def test_rpn_proposals_inf(self):
219
- N, Hi, Wi, A = 3, 3, 3, 3
220
- proposals = [torch.rand(N, Hi * Wi * A, 4)]
221
- pred_logits = [torch.rand(N, Hi * Wi * A)]
222
- pred_logits[0][1][3:5].fill_(float("inf"))
223
- images = ImageList.from_tensors([torch.rand(3, 10, 10)] * 3)
224
- find_top_rpn_proposals(proposals, pred_logits, images, 0.5, 1000, 1000, 0, False)
225
-
226
-
227
- if __name__ == "__main__":
228
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_constants_and_functions.py DELETED
@@ -1,40 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from pybind11_tests import constants_and_functions as m
3
-
4
-
5
- def test_constants():
6
- assert m.some_constant == 14
7
-
8
-
9
- def test_function_overloading():
10
- assert m.test_function() == "test_function()"
11
- assert m.test_function(7) == "test_function(7)"
12
- assert m.test_function(m.MyEnum.EFirstEntry) == "test_function(enum=1)"
13
- assert m.test_function(m.MyEnum.ESecondEntry) == "test_function(enum=2)"
14
-
15
- assert m.test_function() == "test_function()"
16
- assert m.test_function("abcd") == "test_function(char *)"
17
- assert m.test_function(1, 1.0) == "test_function(int, float)"
18
- assert m.test_function(1, 1.0) == "test_function(int, float)"
19
- assert m.test_function(2.0, 2) == "test_function(float, int)"
20
-
21
-
22
- def test_bytes():
23
- assert m.print_bytes(m.return_bytes()) == "bytes[1 0 2 0]"
24
-
25
-
26
- def test_exception_specifiers():
27
- c = m.C()
28
- assert c.m1(2) == 1
29
- assert c.m2(3) == 1
30
- assert c.m3(5) == 2
31
- assert c.m4(7) == 3
32
- assert c.m5(10) == 5
33
- assert c.m6(14) == 8
34
- assert c.m7(20) == 13
35
- assert c.m8(29) == 21
36
-
37
- assert m.f1(33) == 34
38
- assert m.f2(53) == 55
39
- assert m.f3(86) == 89
40
- assert m.f4(140) == 144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/raw_pointer_cast.h DELETED
@@ -1,52 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/type_traits/pointer_traits.h>
21
-
22
- namespace thrust
23
- {
24
-
25
- template<typename Pointer>
26
- __host__ __device__
27
- typename thrust::detail::pointer_traits<Pointer>::raw_pointer
28
- raw_pointer_cast(Pointer ptr)
29
- {
30
- return thrust::detail::pointer_traits<Pointer>::get(ptr);
31
- }
32
-
33
- template <typename ToPointer, typename FromPointer>
34
- __host__ __device__
35
- ToPointer
36
- reinterpret_pointer_cast(FromPointer ptr)
37
- {
38
- typedef typename thrust::detail::pointer_element<ToPointer>::type to_element;
39
- return ToPointer(reinterpret_cast<to_element*>(thrust::raw_pointer_cast(ptr)));
40
- }
41
-
42
- template <typename ToPointer, typename FromPointer>
43
- __host__ __device__
44
- ToPointer
45
- static_pointer_cast(FromPointer ptr)
46
- {
47
- typedef typename thrust::detail::pointer_element<ToPointer>::type to_element;
48
- return ToPointer(static_cast<to_element*>(thrust::raw_pointer_cast(ptr)));
49
- }
50
-
51
- } // end thrust
52
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/mismatch.h DELETED
@@ -1,117 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/config.h>
32
- #include <thrust/system/cuda/detail/execution_policy.h>
33
- #include <thrust/pair.h>
34
- #include <thrust/distance.h>
35
-
36
- namespace thrust
37
- {
38
- namespace cuda_cub {
39
-
40
- template <class Derived,
41
- class InputIt1,
42
- class InputIt2,
43
- class BinaryPred>
44
- pair<InputIt1, InputIt2> __host__ __device__
45
- mismatch(execution_policy<Derived>& policy,
46
- InputIt1 first1,
47
- InputIt1 last1,
48
- InputIt2 first2,
49
- BinaryPred binary_pred);
50
-
51
- template <class Derived,
52
- class InputIt1,
53
- class InputIt2>
54
- pair<InputIt1, InputIt2> __host__ __device__
55
- mismatch(execution_policy<Derived>& policy,
56
- InputIt1 first1,
57
- InputIt1 last1,
58
- InputIt2 first2);
59
- } // namespace cuda_
60
- } // end namespace thrust
61
-
62
- #include <thrust/system/cuda/detail/find.h>
63
-
64
- namespace thrust
65
- {
66
- namespace cuda_cub {
67
-
68
- template <class Derived,
69
- class InputIt1,
70
- class InputIt2,
71
- class BinaryPred>
72
- pair<InputIt1, InputIt2> __host__ __device__
73
- mismatch(execution_policy<Derived>& policy,
74
- InputIt1 first1,
75
- InputIt1 last1,
76
- InputIt2 first2,
77
- BinaryPred binary_pred)
78
- {
79
- typedef transform_pair_of_input_iterators_t<bool,
80
- InputIt1,
81
- InputIt2,
82
- BinaryPred>
83
- transform_t;
84
-
85
- transform_t transform_first = transform_t(first1, first2, binary_pred);
86
-
87
- transform_t result = cuda_cub::find_if_not(policy,
88
- transform_first,
89
- transform_first + thrust::distance(first1, last1),
90
- identity());
91
-
92
- return thrust::make_pair(first1 + thrust::distance(transform_first,result),
93
- first2 + thrust::distance(transform_first,result));
94
- }
95
-
96
- template <class Derived,
97
- class InputIt1,
98
- class InputIt2>
99
- pair<InputIt1, InputIt2> __host__ __device__
100
- mismatch(execution_policy<Derived>& policy,
101
- InputIt1 first1,
102
- InputIt1 last1,
103
- InputIt2 first2)
104
- {
105
- typedef typename thrust::iterator_value<InputIt1>::type InputType1;
106
- return cuda_cub::mismatch(policy,
107
- first1,
108
- last1,
109
- first2,
110
- equal_to<InputType1>());
111
- }
112
-
113
-
114
-
115
- } // namespace cuda_cub
116
- } // end namespace thrust
117
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/for_each.h DELETED
@@ -1,34 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- // The purpose of this header is to #include the async/for_each.h header of the
18
- // sequential, host, and device systems. It should be #included in any code
19
- // which uses ADL to dispatch async for_each.
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
-
25
- //#include <thrust/system/detail/sequential/async/for_each.h>
26
-
27
- //#define __THRUST_HOST_SYSTEM_ASYNC_FOR_EACH_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/async/for_each.h>
28
- //#include __THRUST_HOST_SYSTEM_ASYNC_FOR_EACH_HEADER
29
- //#undef __THRUST_HOST_SYSTEM_ASYNC_FOR_EACH_HEADER
30
-
31
- #define __THRUST_DEVICE_SYSTEM_ASYNC_FOR_EACH_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/async/for_each.h>
32
- #include __THRUST_DEVICE_SYSTEM_ASYNC_FOR_EACH_HEADER
33
- #undef __THRUST_DEVICE_SYSTEM_ASYNC_FOR_EACH_HEADER
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/unique.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the unique.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch unique
24
-
25
- #include <thrust/system/detail/sequential/unique.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/unique.h>
32
- #include <thrust/system/cuda/detail/unique.h>
33
- #include <thrust/system/omp/detail/unique.h>
34
- #include <thrust/system/tbb/detail/unique.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_UNIQUE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/unique.h>
38
- #include __THRUST_HOST_SYSTEM_UNIQUE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_UNIQUE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_UNIQUE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/unique.h>
42
- #include __THRUST_DEVICE_SYSTEM_UNIQUE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_UNIQUE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/set_operations.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits set_operations
22
- #include <thrust/system/cpp/detail/set_operations.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmcv_custom/runner/checkpoint.py DELETED
@@ -1,85 +0,0 @@
1
- # Copyright (c) Open-MMLab. All rights reserved.
2
- import os.path as osp
3
- import time
4
- from tempfile import TemporaryDirectory
5
-
6
- import torch
7
- from torch.optim import Optimizer
8
-
9
- import mmcv
10
- from mmcv.parallel import is_module_wrapper
11
- from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
12
-
13
- try:
14
- import apex
15
- except:
16
- print('apex is not installed')
17
-
18
-
19
- def save_checkpoint(model, filename, optimizer=None, meta=None):
20
- """Save checkpoint to file.
21
-
22
- The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
23
- ``optimizer``, ``amp``. By default ``meta`` will contain version
24
- and time info.
25
-
26
- Args:
27
- model (Module): Module whose params are to be saved.
28
- filename (str): Checkpoint filename.
29
- optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
30
- meta (dict, optional): Metadata to be saved in checkpoint.
31
- """
32
- if meta is None:
33
- meta = {}
34
- elif not isinstance(meta, dict):
35
- raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
36
- meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
37
-
38
- if is_module_wrapper(model):
39
- model = model.module
40
-
41
- if hasattr(model, 'CLASSES') and model.CLASSES is not None:
42
- # save class name to the meta
43
- meta.update(CLASSES=model.CLASSES)
44
-
45
- checkpoint = {
46
- 'meta': meta,
47
- 'state_dict': weights_to_cpu(get_state_dict(model))
48
- }
49
- # save optimizer state dict in the checkpoint
50
- if isinstance(optimizer, Optimizer):
51
- checkpoint['optimizer'] = optimizer.state_dict()
52
- elif isinstance(optimizer, dict):
53
- checkpoint['optimizer'] = {}
54
- for name, optim in optimizer.items():
55
- checkpoint['optimizer'][name] = optim.state_dict()
56
-
57
- # save amp state dict in the checkpoint
58
- checkpoint['amp'] = apex.amp.state_dict()
59
-
60
- if filename.startswith('pavi://'):
61
- try:
62
- from pavi import modelcloud
63
- from pavi.exception import NodeNotFoundError
64
- except ImportError:
65
- raise ImportError(
66
- 'Please install pavi to load checkpoint from modelcloud.')
67
- model_path = filename[7:]
68
- root = modelcloud.Folder()
69
- model_dir, model_name = osp.split(model_path)
70
- try:
71
- model = modelcloud.get(model_dir)
72
- except NodeNotFoundError:
73
- model = root.create_training_model(model_dir)
74
- with TemporaryDirectory() as tmp_dir:
75
- checkpoint_file = osp.join(tmp_dir, model_name)
76
- with open(checkpoint_file, 'wb') as f:
77
- torch.save(checkpoint, f)
78
- f.flush()
79
- model.create_file(checkpoint_file, name=model_name)
80
- else:
81
- mmcv.mkdir_or_exist(osp.dirname(filename))
82
- # immediately flush buffer
83
- with open(filename, 'wb') as f:
84
- torch.save(checkpoint, f)
85
- f.flush()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/assigners/__init__.py DELETED
@@ -1,16 +0,0 @@
1
- from .approx_max_iou_assigner import ApproxMaxIoUAssigner
2
- from .assign_result import AssignResult
3
- from .atss_assigner import ATSSAssigner
4
- from .base_assigner import BaseAssigner
5
- from .center_region_assigner import CenterRegionAssigner
6
- from .grid_assigner import GridAssigner
7
- from .hungarian_assigner import HungarianAssigner
8
- from .max_iou_assigner import MaxIoUAssigner
9
- from .point_assigner import PointAssigner
10
- from .region_assigner import RegionAssigner
11
-
12
- __all__ = [
13
- 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
14
- 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
15
- 'HungarianAssigner', 'RegionAssigner'
16
- ]