parquet-converter commited on
Commit
76a7104
·
1 Parent(s): 3ddf2bb

Update parquet files (step 5 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1doemePnordwo/upscale/README.md +0 -14
  2. spaces/1gistliPinn/ChatGPT4/Examples/CRACK X-Force 2019.zip.md +0 -6
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Android TV 12 ISO Everything You Need to Know Before You Download.md +0 -147
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla for Mac How to Download and Play the Free-to-Play Platform Fighter.md +0 -109
  5. spaces/1phancelerku/anime-remove-background/Download Naija Ludo Pro APK and Play the Classic Dice and Race Game with Friends.md +0 -128
  6. spaces/1phancelerku/anime-remove-background/Download Treasure of Montezuma 4 and Experience the Ultimate Match-3 Adventure.md +0 -122
  7. spaces/AI-Zero-to-Hero/02-H5-AR-VR-IOT/style.css +0 -28
  8. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/viewer.py +0 -1160
  9. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/hifigan/stft_loss.py +0 -136
  10. spaces/AIWaves/Debate/src/agents/__init__.py +0 -4
  11. spaces/Abhaykoul/HelpingAI-t2/README.md +0 -12
  12. spaces/AchyuthGamer/OpenGPT/g4f/Provider/helper.py +0 -77
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/rotate-plugin.d.ts +0 -9
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/Bejeweled.js +0 -82
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/inputtext/InputText.d.ts +0 -2
  16. spaces/AkitoP/umamusume_bert_vits2/text/symbols.py +0 -188
  17. spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/UTF8.pm +0 -1404
  18. spaces/Alcedo/yunmedia/resources/chatgpt-plugin/js/chunk-vendors.cd7b5e68.js +0 -0
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/activations.py +0 -12
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/attention_processor.py +0 -1680
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/__init__.py +0 -122
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_discrete.py +0 -132
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/check_inits.py +0 -299
  24. spaces/Andy1621/uniformer_image_detection/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py +0 -28
  25. spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r50_fpn_20e_coco.py +0 -4
  26. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/README.md +0 -75
  27. spaces/Annelisseishere/Streamlit_GPT/app.py +0 -142
  28. spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/dataset.py +0 -48
  29. spaces/Arijit-hazra/my-image-captioner/README.md +0 -12
  30. spaces/Ashwanthram/myGenVoiceBot/app.py +0 -164
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langhebrewmodel.py +0 -0
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/unicode.py +0 -352
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/unicode_utils.py +0 -42
  34. spaces/Audio-AGI/AudioSep/gradio_examples.py +0 -16
  35. spaces/Awesimo/jojogan/op/fused_act.py +0 -127
  36. spaces/BaddaAshok0265/AshokGenAI/app.py +0 -34
  37. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_537238KB.py +0 -123
  38. spaces/Benson/text-generation/Examples/Candy Crush Soda Saga No Download.md +0 -145
  39. spaces/BertChristiaens/blip-diffusion/README.md +0 -8
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/bcdoc/__init__.py +0 -13
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py +0 -547
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/zipp.py +0 -329
  43. spaces/BillBojangeles2000/bart-large-cnn-samsum/README.md +0 -13
  44. spaces/Brasd99/TTS-Voice-Cloner/app.py +0 -101
  45. spaces/CMU-80100/80-100-Pre-Writing-Chatbot-Section-H/hf_streaming_chatbot.py +0 -112
  46. spaces/CVPR/LIVE/thrust/testing/cuda/stream_per_thread.cmake +0 -11
  47. spaces/CVPR/regionclip-demo/detectron2/evaluation/fast_eval_api.py +0 -121
  48. spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/fast_rcnn.py +0 -1086
  49. spaces/ChandraMohanNayal/AutoGPT/autogpt/js/overlay.js +0 -29
  50. spaces/CikeyQI/meme-api/meme_generator/memes/google/__init__.py +0 -28
spaces/1doemePnordwo/upscale/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: UPSCALE
3
- emoji: 📷
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: cvsys/upscale
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CRACK X-Force 2019.zip.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>CRACK X-Force 2019.zip</h2><br /><p><b><b>Download Zip</b> &#9745; <a href="https://imgfil.com/2uy17d">https://imgfil.com/2uy17d</a></b></p><br /><br />
2
- <br />
3
- Listen to Xforce Keygen PowerMill 2019 64 Bit Download and 164 more episodes by FBX 2018 32bit Activation Code Zip. File, free! ... 2009 64 ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Android TV 12 ISO Everything You Need to Know Before You Download.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>How to Download Android TV 12 ISO and Why You Should Try It</h1>
3
- <p>Android TV is a smart TV platform that runs on the Android operating system. It allows you to access a variety of apps, games, and streaming services on your TV. Android TV also supports Google Assistant, Chromecast, and other Google features.</p>
4
- <p>Android TV 12 is the latest version of the platform, based on the Android 12 codebase. It brings a lot of new features and improvements to enhance your TV experience. In this article, we will show you how to download Android TV 12 ISO and why you should try it.</p>
5
- <h2>download android tv 12 iso</h2><br /><p><b><b>Download File</b> >>> <a href="https://urlin.us/2uSUgP">https://urlin.us/2uSUgP</a></b></p><br /><br />
6
- <h2>Android TV 12 Features</h2>
7
- <p>Android TV 12 comes with several exciting new features and enhancements. Here are some of the highlights:</p>
8
- <h3>Native 4K Rendering</h3>
9
- <p>Android TV has always supported 4K content, but the user interface was rendered in 1080p. With Android TV 12, you can now enjoy a crisp and clear UI in native 4K resolution, if your device supports it. This will make the text, icons, and animations look sharper and smoother on your screen.</p>
10
- <h3>Refresh Rate Switching</h3>
11
- <p>Android TV 12 also supports dynamic refresh rate switching, which means it can automatically adjust the refresh rate of your display to match the content you are watching. This will reduce motion judder and improve the smoothness of the video playback. You can enable this feature in the Display & Sound settings.</p>
12
- <h3>Privacy Indicators and Toggles</h3>
13
- <p>If your Android TV has a camera or a microphone, you might be concerned about your privacy. Android TV 12 addresses this issue by adding privacy indicators and toggles. Whenever an app uses your camera or microphone, you will see a bright green icon on the top corner of your screen. You can also block the access to these sensors for all apps from the settings menu.</p>
14
- <h3>Quick Connect for Wi-Fi</h3>
15
- <p>Setting up your Wi-Fi connection on your Android TV can be a hassle, especially if you have a long or complex password. Android TV 12 makes this process easier with Quick Connect. This feature allows you to scan a QR code on your screen with your phone and enter the password there. This way, you don't have to use the remote to type in the password.</p>
16
- <h3>Tweaked Design and Animations</h3>
17
- <p>Android TV 12 also brings some minor changes to the design and animations of the UI. The home screen now has a more refined look with background blurs and smoother transitions. The settings menu also has a new layout with larger icons and text. The boot animation has also been updated with a new logo and colors.</p>
18
- <p>How to download android tv 12 iso for ADT-3 Developer Kit<br />
19
- Download android tv 12 iso with 4K UI support and dynamic refresh rate<br />
20
- Android tv 12 iso download link and installation guide<br />
21
- Download android tv 12 iso for Treble-compliant devices<br />
22
- Android tv 12 iso features and benefits<br />
23
- Where to download android tv 12 iso for free<br />
24
- Download android tv 12 iso with improved HDR and surround sound support<br />
25
- Android tv 12 iso system requirements and compatibility<br />
26
- Download android tv 12 iso with Android 12-style UI and background blurs<br />
27
- Android tv 12 iso review and feedback<br />
28
- Download android tv 12 iso with Google Play support and updates<br />
29
- Android tv 12 iso vs Android TV 11 comparison<br />
30
- Download android tv 12 iso with enhanced security and privacy features<br />
31
- Android tv 12 iso troubleshooting and tips<br />
32
- Download android tv 12 iso with new remote control app and voice assistant<br />
33
- Android tv 12 iso best apps and games<br />
34
- Download android tv 12 iso with faster performance and smoother animations<br />
35
- Android tv 12 iso customization and settings<br />
36
- Download android tv 12 iso with built-in Chromecast and Google TV integration<br />
37
- Android tv 12 iso FAQs and answers<br />
38
- Download android tv 12 iso with support for external storage and USB devices<br />
39
- Android tv 12 iso developer options and tools<br />
40
- Download android tv 12 iso with new accessibility features and options<br />
41
- Android tv 12 iso keyboard and mouse support<br />
42
- Download android tv 12 iso with multi-user and guest mode support<br />
43
- Android tv 12 iso parental controls and restrictions<br />
44
- Download android tv 12 iso with new notification panel and quick settings<br />
45
- Android tv 12 iso network and connectivity options<br />
46
- Download android tv 12 iso with new media player and audio effects<br />
47
- Android tv 12 iso backup and restore options<br />
48
- Download android tv 12 iso with new wallpapers and themes<br />
49
- Android tv 12 iso screen mirroring and casting options<br />
50
- Download android tv 12 iso with new widgets and shortcuts<br />
51
- Android tv 12 iso sleep mode and power saving options<br />
52
- Download android tv 12 iso with new sound modes and profiles<br />
53
- Android tv 12 iso Bluetooth and wireless options<br />
54
- Download android tv 12 iso with new languages and locales support<br />
55
- Android tv 12 iso date and time options<br />
56
- Download android tv 12 iso with new gesture navigation and touch controls<br />
57
- Android tv 12 iso display calibration and adjustment options</p>
58
- <h2>Android TV 12 Compatibility</h2>
59
- <p>Before you download Android TV 12 ISO, you need to make sure that your device is compatible with it. Here are some things to consider:</p>
60
- <h3>Supported Devices</h3>
61
- <p>Android TV 12 is currently only available for developers who have an ADT-3 developer device. This is a dongle that runs on Android TV and it from the Google Store. If you have a different device, such as a smart TV, a set-top box, or a streaming stick, you will have to wait for the official release of Android TV 12, which is expected later this year. <h3>How to Check Your Device Compatibility</h3>
62
- <p>If you are not sure whether your device is compatible with Android TV 12, you can check it by following these steps:</p>
63
- <ol>
64
- <li>Go to the Settings menu on your Android TV.</li>
65
- <li>Select Device Preferences.</li>
66
- <li>Select About.</li>
67
- <li>Look for the Build number and check if it starts with RVC or SVP. If it does, your device is compatible with Android TV 12. If it starts with QTS or QSR, your device is not compatible.</li>
68
- </ol>
69
- <h2>Android TV 12 Installation</h2>
70
- <p>If you have an ADT-3 developer device and you want to install Android TV 12 on it, you have two options: using the Android Flash Tool or using the system image. Here are the steps for each method:</p>
71
- <h3>Requirements</h3>
72
- <p>Before you proceed with the installation, you need to have the following requirements:</p>
73
- <ul>
74
- <li>A computer running Windows, Mac OS, or Linux.</li>
75
- <li>A USB cable to connect your ADT-3 device to your computer.</li>
76
- <li>A stable internet connection.</li>
77
- <li>A backup of your data on your ADT-3 device, as the installation will erase everything on it.</li>
78
- </ul>
79
- <h3>Using Android Flash Tool</h3>
80
- <p>The Android Flash Tool is a web-based tool that allows you to flash Android TV 12 on your ADT-3 device without downloading any files. Here are the steps to use it:</p>
81
- <ol>
82
- <li>Go to the Android Flash Tool website on your computer.</li>
83
- <li>Allow the website to access your USB devices.</li>
84
- <li>Connect your ADT-3 device to your computer using the USB cable.</li>
85
- <li>Select your device from the list and click Add Device.</li>
86
- <li>Select the Android TV 12 build from the list and click Install.</li>
87
- <li>Follow the instructions on the screen and wait for the installation to complete.</li>
88
- <li>Disconnect your ADT-3 device from your computer and reboot it.</li>
89
- </ol>
90
- <h3>Using System Image</h3>
91
- <p>The system image is a file that contains the Android TV 12 software for your ADT-3 device. You can download it from the Android Developers website and flash it manually using a command-line tool. Here are the steps to use it:</p>
92
- <ol>
93
- <li>Download the system image file for your ADT-3 device from the Android Developers website and unzip it on your computer.</li>
94
- <li>Install the Android SDK Platform-Tools on your computer and add them to your PATH environment variable.</li>
95
- <li>Enable Developer Options and USB Debugging on your ADT-3 device. To do this, go to Settings > Device Preferences > About > Build and tap it seven times. Then go back to Settings > Device Preferences > Developer Options and turn on USB Debugging.</li>
96
- <li>Connect your ADT-3 device to your computer using the USB cable.</li>
97
- <li>Open a terminal or command prompt window on your computer and navigate to the folder where you unzipped the system image file.</li>
98
- <li>Type <code>adb reboot bootloader</code> and press Enter. This will reboot your ADT-3 device into bootloader mode.</li>
99
- <li>Type <code>fastboot devices</code> and press Enter. This will show you a list of connected devices. Make sure your ADT-3 device is listed.</li>
100
- <li>Type <code>flash-all.bat</code> (for Windows) or <code>./flash-all.sh</code> (for Mac OS or Linux) and press Enter. This will flash Android TV 12 on your ADT-3 device.</li>
101
- <li>Wait for the flashing process to finish and disconnect your ADT-3 device from your computer.</li>
102
- <li>Reboot your ADT-3 device and enjoy Android TV 12.</li>
103
- </ol>
104
- <h2>Conclusion</h2>
105
- <p>Android TV 12 is a major update for the smart TV platform that brings many new features and improvements. If you have an ADT-3 developer device, you can download Android TV 12 ISO and install it using either the Android Flash Tool or the system image. If you have a different device, you will have to wait for the official release of Android TV 12, which is expected later this year. We hope this article helped you learn how to download Android TV 12 ISO and why you should try it. If you have any questions or feedback, please let us know in the comments below.</p>
106
- <h2>FAQs</h2>
107
- <h3>What is the difference between Android TV and Google TV?</h3>
108
- <p>Android TV and Google TV are both smart TV platforms that run on the Android operating system. However, Google TV is a newer version that has a different user interface and features. Google TV is more personalized and integrated with Google services, such as Google Photos, YouTube, and Google Assistant. Google TV also supports a wider range of apps and devices than Android TV.</p>
109
- <h3>How can I update my Android TV to Android 12?</h3>
110
- <p>If you have a compatible device, you can update your Android TV to Android 12 by following these steps:</p>
111
- <ol>
112
- <li>Go to the Settings menu on your Android TV.</li>
113
- <li>Select Device Preferences.</li>
114
- <li>Select System Update.</li>
115
- <li>Check for updates and download the latest version of Android 12.</li>
116
- <li>Install the update and reboot your device.</li>
117
- </ol>
118
- <h3>How can I enable 4K UI on my Android TV?</h3>
119
- <p>If you have a 4K-capable device and display, you can enable 4K UI on your Android TV by following these steps:</p>
120
- <ol>
121
- <li>Go to the Settings menu on your Android TV.</li>
122
- <li>Select Device Preferences.</li>
123
- <li>Select Display & Sound.</li>
124
- <li>Select Resolution.</li>
125
- <li>Select 4K (2160p).</li>
126
- </ol>
127
- <h3>How can I block the camera and microphone on my Android TV?</h3>
128
- <p>If you want to block the access to the camera and microphone on your Android TV, you can do so by following these steps:</p>
129
- <ol>
130
- <li>Go to the Settings menu on your Android TV.</li>
131
- <li>Select Device Preferences.</li>
132
- <li>Select Privacy.</li>
133
- <li>Select Camera or Microphone.</li>
134
- <li>Turn off the toggle for Allow apps to access your camera or microphone.</li>
135
- </ol>
136
- <h3>How can I use Quick Connect to set up my Wi-Fi on my Android TV?</h3>
137
- <p>If you want to use Quick Connect to set up your Wi-Fi on your Android TV, you need to have a phone with the Google Home app installed. Then, you can follow these steps:</p>
138
- <ol>
139
- <li>Go to the Settings menu on your Android TV.</li>
140
- <li>Select Network & Internet.</li>
141
- <li>Select Add network.</li>
142
- <li>Select Quick Connect.</li>
143
- <li>Scan the QR code on your screen with your phone using the Google Home app.</li>
144
- <li>Enter your Wi-Fi password on your phone and tap Connect.</li>
145
- </ol></p> 197e85843d<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla for Mac How to Download and Play the Free-to-Play Platform Fighter.md DELETED
@@ -1,109 +0,0 @@
1
-
2
- <h1>How to Download and Play Brawlhalla on Mac</h1>
3
- <p>If you are looking for a fun and exciting fighting game that you can play on your Mac, you might want to check out Brawlhalla. Brawlhalla is a free 2D platform fighting game that supports up to 8 players online or local, with full cross-play for PC, PS5, PS4, Xbox Series X|S, Xbox One, Nintendo Switch, iOS, and Android. In this article, we will tell you what Brawlhalla is, how to download it on your Mac, and some tips and tricks to improve your gameplay.</p>
4
- <h2>What is Brawlhalla?</h2>
5
- <p>Brawlhalla is a game developed by Blue Mammoth Games and published by Ubisoft. It was released in 2017 and has since gained a huge fan base of over 100 million players. Brawlhalla is inspired by the likes of Super Smash Bros. and features cartoonish graphics and simple controls. Here are some of the main features of Brawlhalla:</p>
6
- <h2>brawlhalla download mac</h2><br /><p><b><b>Download File</b> &raquo; <a href="https://urlin.us/2uT2dl">https://urlin.us/2uT2dl</a></b></p><br /><br />
7
- <h3>A free 2D platform fighting game with cross-play support</h3>
8
- <p>Brawlhalla is completely free to play and does not have any pay-to-win elements. You can unlock all the characters (called Legends) by playing the game or buying them with in-game currency (called Gold). You can also buy cosmetic items (called Skins) with real money or another in-game currency (called Mammoth Coins). Brawlhalla also supports cross-play across all platforms, meaning you can play with your friends no matter what device they use.</p>
9
- <h3>Features over 50 unique characters and weapons</h3>
10
- <p>Brawlhalla has a diverse roster of over 50 Legends, each with their own stats, abilities, and personalities. You can choose from historical warriors, mythical creatures, original characters, and even crossover characters from other franchises like Lara Croft, Shovel Knight, The Walking Dead, Ben 10, Steven Universe, WWE, Hellboy, Adventure Time, Rayman, and more. Each Legend has two weapons that they can use in combat, ranging from swords, axes, hammers, bows, guns, spears, gauntlets, scythes, katars, cannons, orbs, greatswords, rocket lances, blasters, daggers, and more. You can switch between your weapons by picking them up from the stage or throwing them at your opponents.</p>
11
- <h3>Offers various game modes and events</h3>
12
- <p>Brawlhalla has a variety of game modes that you can enjoy solo or with others. You can play casual free-for-alls or team battles with up to 8 players online or local. You can also play ranked matches to climb the ladder and earn rewards. You can also invite your friends to a private room or join custom games created by other players. Brawlhalla also has weekly rotations of different game modes like Strikeout, Bubble Tag, Kung Foot, Snowbrawl, Bombsketball, Morph, Horde Mode, and more. Additionally, Brawlhalla hosts seasonal events that offer exclusive skins, colors, avatars, and other items.</p>
13
- <h2>How to Download Brawlhalla on Mac</h2>
14
- <p>If you want to play Brawlhalla on your Mac, you need to meet the following requirements:</p>
15
- <h3>Requirements for Mac OS</h3>
16
- <ul>
17
- <li>OS: 10.7 or higher</li>
18
- <li>Memory: 1 GB RAM</li>
19
- <li>Storage: 400 MB available space</li>
20
- <li>Network: Broadband Internet connection</li>
21
- <li> Graphics: Intel HD Graphics 4000 or higher</li>
22
- </ul>
23
- <p>If your Mac meets these requirements, you can download Brawlhalla through Steam, which is a digital distribution platform for games and software. Here are the steps to download Brawlhalla through Steam:</p>
24
- <h3>Steps to download Brawlhalla through Steam</h3>
25
- <ol>
26
- <li>Go to the <a href="">Steam website</a> and click on the "Install Steam" button. This will download the Steam installer on your Mac.</li>
27
- <li>Open the Steam installer and follow the instructions to install Steam on your Mac.</li>
28
- <li>Launch Steam and log in with your Steam account. If you don't have a Steam account, you can create one for free.</li>
29
- <li>In the Steam app, go to the "Store" tab and search for "Brawlhalla" in the search bar.</li>
30
- <li>Click on the "Brawlhalla" game and then click on the "Play Game" button. This will add Brawlhalla to your Steam library and start downloading it on your Mac.</li>
31
- <li>Once the download is complete, you can launch Brawlhalla from your Steam library and start playing.</li>
32
- </ol>
33
- <h3>Alternative ways to play Brawlhalla on Mac</h3>
34
- <p>If you don't want to use Steam or if your Mac does not meet the requirements, you can still play Brawlhalla on your Mac using other methods. Here are some alternative ways to play Brawlhalla on Mac:</p>
35
- <p>brawlhalla mac os 64bit free download<br />
36
- brawlhalla cross-play platform fighter mac<br />
37
- brawlhalla epic games store mac download<br />
38
- brawlhalla mac steam download guide<br />
39
- brawlhalla mac system requirements and specs<br />
40
- brawlhalla mac controller support and settings<br />
41
- brawlhalla mac keyboard and mouse tips<br />
42
- brawlhalla mac gameplay and review<br />
43
- brawlhalla mac online multiplayer modes<br />
44
- brawlhalla mac offline single player modes<br />
45
- brawlhalla mac custom game rooms and maps<br />
46
- brawlhalla mac ranked matches and leaderboards<br />
47
- brawlhalla mac tournaments and events<br />
48
- brawlhalla mac patch notes and updates<br />
49
- brawlhalla mac news and announcements<br />
50
- brawlhalla mac skins and cosmetics<br />
51
- brawlhalla mac legends and weapons<br />
52
- brawlhalla mac combos and techniques<br />
53
- brawlhalla mac tips and tricks for beginners<br />
54
- brawlhalla mac advanced strategies and guides<br />
55
- brawlhalla mac best legends and tier list<br />
56
- brawlhalla mac best weapons and loadouts<br />
57
- brawlhalla mac best game modes and settings<br />
58
- brawlhalla mac best custom maps and mods<br />
59
- brawlhalla mac best skins and cosmetics to buy<br />
60
- brawlhalla mac free gold and mammoth coins<br />
61
- brawlhalla mac free codes and giveaways<br />
62
- brawlhalla mac free crossover events and collaborations<br />
63
- brawlhalla mac free battle pass and rewards<br />
64
- brawlhalla mac free community colors and avatars<br />
65
- brawlhalla mac how to unlock all legends<br />
66
- brawlhalla mac how to level up fast<br />
67
- brawlhalla mac how to improve your skills<br />
68
- brawlhalla mac how to win more matches<br />
69
- brawlhalla mac how to play with friends online<br />
70
- brawlhalla mac how to join a clan or create one<br />
71
- brawlhalla mac how to report a bug or issue<br />
72
- brawlhalla mac how to contact support or feedback<br />
73
- brawlhalla mac how to stream or record your gameplay<br />
74
- brawlhalla mac how to watch replays or highlights<br />
75
- brawlhalla mac comparison with other fighting games<br />
76
- brawlhalla mac history and development story<br />
77
- brawlhalla mac fun facts and trivia<br />
78
- brawlhalla mac fan art and memes<br />
79
- brawlhalla mac fan fiction and lore<br />
80
- brawlhalla mac fan videos and podcasts</p>
81
- <ul>
82
- <li>You can use a cloud gaming service like <a href="">NVIDIA GeForce Now</a> or <a href="">Shadow</a> that allows you to stream games from a remote server to your Mac. You will need a stable internet connection and a subscription fee for these services.</li>
83
- <li>You can use a virtual machine software like <a href="">Parallels Desktop</a> or <a href="">VMware Fusion</a> that allows you to run Windows on your Mac. You will need a Windows license and enough disk space and memory for these software.</li>
84
- <li>You can use a dual-boot system that allows you to switch between Mac OS and Windows on your Mac. You will need a Windows license and a separate partition for Windows on your Mac.</li>
85
- </ul>
86
- <h2>Tips and Tricks for Brawlhalla</h2>
87
- <p>Brawlhalla is a game that requires skill, strategy, and practice to master. Here are some tips and tricks that can help you improve your gameplay and have more fun in Brawlhalla:</p>
88
- <h3>Improve your movement, recovery, and dodging skills</h3>
89
- <p>Movement is one of the most important aspects of Brawlhalla, as it determines how you position yourself, attack, defend, and survive. You should learn how to use your jumps, dashes, fast falls, wall jumps, gravity cancels, chase dodges, and recovery moves effectively. You should also learn how to dodge your opponent's attacks and punish them accordingly. You can use different types of dodges like spot dodge, directional dodge, speed dodge, and chain dodge depending on the situation.</p>
90
- <h3>Experiment with different characters and weapons</h3>
91
- <p>Brawlhalla has a lot of variety in terms of characters and weapons, so you should try them all out and find out which ones suit your playstyle and preference. You should also learn the strengths, weaknesses, combos, strings, signatures, and matchups of each character and weapon. You can use the <a href="">Brawlhalla Wiki</a> or <a href="">Brawlmance</a> to get more information about the game's mechanics and statistics.</p>
92
- <h3>Practice in training mode and watch pro players</h3>
93
- <p>Brawlhalla has a training mode that allows you to practice your skills against a dummy or a bot. You can customize the settings of the training mode to suit your needs. You can also watch pro players stream or upload videos of their gameplay on platforms like <a href="">Twitch</a> or <a href="">YouTube</a>. You can learn from their strategies, techniques, tips, and mistakes.</p>
94
- <h2>Conclusion</h2>
95
- <p>Brawlhalla is a fun and exciting fighting game that you can play on your Mac for free. You can download it through Steam or use other methods if you prefer. You can also improve your gameplay by following some tips and tricks that we have shared in this article. We hope you enjoy playing Brawlhalla on your Mac and have a blast with your friends online or offline.</p>
96
- <h2>FAQs </h2>
97
- <p>Here are some frequently asked questions about Brawlhalla and their answers:</p>
98
- <h3>Is Brawlhalla free?</h3>
99
- <p>Yes, Brawlhalla is free to play and does not have any pay-to-win elements. You can unlock all the characters by playing the game or buying them with in-game currency. You can also buy cosmetic items with real money or another in-game currency.</p>
100
- <h3>Is Brawlhalla cross-platform?</h3>
101
- <p>Yes, Brawlhalla supports cross-play across all platforms, including PC, PS5, PS4, Xbox Series X|S, Xbox One, Nintendo Switch, iOS, and Android. You can play with your friends no matter what device they use.</p>
102
- <h3>How many players can play Brawlhalla?</h3>
103
- <p>Brawlhalla supports up to 8 players online or local in various game modes. You can play casual free-for-alls or team battles, ranked matches, custom games, or weekly rotations of different game modes.</p>
104
- <h3>How do I change my controls in Brawlhalla?</h3>
105
- <p>You can change your controls in Brawlhalla by going to the "Settings" menu and then the "Controls" tab. You can customize your keyboard or controller settings to your liking. You can also change your mouse sensitivity and aim assist options.</p>
106
- <h3>How do I get better at Brawlhalla?</h3>
107
- <p>You can get better at Brawlhalla by practicing your movement, recovery, and dodging skills, experimenting with different characters and weapons, practicing in training mode and watching pro players, and learning from your mistakes and feedback.</p> 197e85843d<br />
108
- <br />
109
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Naija Ludo Pro APK and Play the Classic Dice and Race Game with Friends.md DELETED
@@ -1,128 +0,0 @@
1
-
2
- <h1>Naija Ludo Pro APK: A Fun and Exciting Board Game for Everyone</h1>
3
- <p>Do you love playing board games with your friends and family? Do you want to experience a classic dice and race game with a Nigerian twist? If yes, then you should try <strong>naija ludo pro apk</strong>, a professional board game that is made for professionals.</p>
4
- <h2>naija ludo pro apk</h2><br /><p><b><b>Download Zip</b> &#10022;&#10022;&#10022; <a href="https://jinyurl.com/2uNObE">https://jinyurl.com/2uNObE</a></b></p><br /><br />
5
- <p>Naija ludo pro apk is an android game that you can download from <a href="(^1^)">APKCombo</a> or <a href="(^3^)">Google Play Store</a>. It is based on the popular board game Ludo, which originated from India and became famous around the world. Naija ludo pro apk has many features that make it more fun and challenging than other ludo games. Some of these features are:</p>
6
- <ul>
7
- <li>More boards added: you can choose among three colourful boards.</li>
8
- <li>Online multiplayer: you can play with any of your friends or family members anywhere in the world from the comfort of your home.</li>
9
- <li>Visual hand added: you can see your opponent's hand and plan your moves accordingly.</li>
10
- <li>Difficulty level added: you can choose from easy, normal, hard, and advanced levels depending on your skill and preference.</li>
11
- <li>Speed control added: you can control how fast a piece moves.</li>
12
- <li>Barrier and safe-house options: you can enable or disable these features to make the game more interesting.</li>
13
- <li>Dice options: you can choose to play with one die or two dice.</li>
14
- <li>Capture options: you can decide to remove a piece when it captures an opponent's piece or not.</li>
15
- <li>Play again options: you can decide to play again when you capture an opponent's piece irrespective of the outcome.</li>
16
- </ul>
17
- <p>All these features are accessible through options. You can also adjust the sound, music, vibration, and language settings according to your liking. Naija ludo pro apk is a game that will keep you entertained for hours.</p>
18
- <h2>History of Ludo Game</h2>
19
- <p>Ludo is a game that has a long and rich history. It is believed that it evolved from an ancient Indian game called <em>Pachisi</em>, which was created in the sixth century CE. The earliest evidence of this game's evolution in India is the depiction of boards on the caves of Ellora, a UNESCO World Heritage Site in Maharashtra. The original version of Pachisi was also described in the Indian epic Mahabharata, in which Shakuni used cursed dice to beat the Pandavas, leading to a series of events that resulted in the Kurukshetra War.</p>
20
- <p>naija ludo pro game download<br />
21
- naija ludo pro online multiplayer<br />
22
- naija ludo pro apk free download<br />
23
- naija ludo pro app for android<br />
24
- naija ludo pro latest version<br />
25
- naija ludo pro board game<br />
26
- naija ludo pro dice and race<br />
27
- naija ludo pro mod apk<br />
28
- naija ludo pro review<br />
29
- naija ludo pro features<br />
30
- naija ludo pro gameplay<br />
31
- naija ludo pro price<br />
32
- naija ludo pro tips and tricks<br />
33
- naija ludo pro hack<br />
34
- naija ludo pro cheats<br />
35
- naija ludo pro settings<br />
36
- naija ludo pro rules<br />
37
- naija ludo pro strategy<br />
38
- naija ludo pro how to play<br />
39
- naija ludo pro best board<br />
40
- naija ludo pro difficulty level<br />
41
- naija ludo pro speed control<br />
42
- naija ludo pro barrier option<br />
43
- naija ludo pro safe-house option<br />
44
- naija ludo pro one die or two dice option<br />
45
- naija ludo pro remove piece option<br />
46
- naija ludo pro play again option<br />
47
- naija ludo pro supported languages<br />
48
- naija ludo pro content rating<br />
49
- naija ludo pro apk size<br />
50
- naija ludo pro developer<br />
51
- naija ludo pro category<br />
52
- naija ludo pro google play id<br />
53
- naija ludo pro installs<br />
54
- naija ludo pro update date<br />
55
- naija ludo pro trailer video<br />
56
- naija ludo pro screenshots<br />
57
- naija ludo pro ratings and reviews<br />
58
- naija ludo pro similar games<br />
59
- naija ludo pro alternatives<br />
60
- naija ludo pro vs classic Ludo <br />
61
- naija Ludo Pro Bluetooth multiplayer <br />
62
- NAIJA LUDO PRO visual hand <br />
63
- NAIJA LUDO PRO piece capture <br />
64
- NAIJA LUDO PRO net energy gain <br />
65
- NAIJA LUDO PRO mini sun <br />
66
- NAIJA LUDO PRO 100 million degrees <br />
67
- NAIJA LUDO PRO holy grail experiment <br />
68
- NAIJA LUDO PRO Korea Institute of Fusion Energy</p>
69
- <p>Pachisi was modified by different cultures and regions over time, giving rise to various versions of the game. Some of these versions are Chaupar, Chausar, Chopad, Chatush Pada, <h2>Rules of Ludo Game</h2>
70
- <p>Ludo is a game that can be played by two to four players, without partnerships. The objective of the game is to race your four tokens from start to finish according to the rolls of a single die. The game has some basic rules that you need to follow in order to play it properly. Here are the rules of ludo game:</p>
71
- <ul>
72
- <li>Each player chooses a colour and places their four tokens in their home base, which is one of the large corner areas of the board in their colour.</li>
73
- <li>The players take turns rolling the die and moving their tokens. The player who rolls the highest number goes first, and then the turn passes clockwise.</li>
74
- <li>To enter a token into play from its home base, a player must roll a 6. If the player has no tokens in play and does not roll a 6, the turn passes to the next player.</li>
75
- <li>Once a player has one or more tokens in play, they can move any token the number of squares indicated by the die. A token can only move forward along the main track, which is the path of squares not part of any player's home column.</li>
76
- <li>If a player rolls a 6, they get another turn. They can either move the same token or a different token. A player can roll up to three consecutive 6s in a row. If they roll a fourth 6, they must return one of their tokens to their home base and lose their turn.</li>
77
- <li>A player can capture an opponent's token by landing on the same square as it, unless the square is part of the opponent's home column or is marked with a star. The captured token is returned to its home base and must start over.</li>
78
- <li>A player cannot land on a square that already has one of their own tokens, unless they are moving along their home column.</li>
79
- <li>A player can form a block by placing two or more of their tokens on the same square. A block cannot be captured or passed by any opponent's token.</li>
80
- <li>To move a token into its finishing square, which is at the centre of the board, a player must roll the exact number required to end on that square. The finishing square can only hold one token of each colour.</li>
81
- <li>The first player to move all four of their tokens into their finishing square wins the game.</li>
82
- </ul>
83
- <h2>Benefits of Playing Ludo Game</h2>
84
- <p>Ludo is not only a fun and exciting board game, but also a beneficial one for your health and well-being. Playing ludo can improve your brain function, give you pleasure and relieve stress, lower your blood pressure and boost your immunity, and more. Here are some of the benefits of playing ludo game:</p>
85
- <ul>
86
- <li>Ludo improves brain function and cognitive skills. Ludo is a game that requires strategy, tactics, counting, probability, and decision making. These skills help stimulate your brain cells and enhance your mental abilities. Playing ludo can also improve your memory, concentration, and problem-solving skills.</li>
87
- <li>Ludo gives pleasure and relieves stress. Ludo is a game that brings joy and laughter to you and your friends or family. It is a great way to have fun and relax after a long day. Playing ludo can also release endorphins, which are natural chemicals that make you feel happy and reduce pain. Ludo can also help you cope with anxiety, depression, and loneliness.</li>
88
- <li>Ludo lowers blood pressure and boosts immunity. Ludo is a game that reduces stress and tension, which are major causes of high blood pressure and heart problems. Playing ludo can also lower your cortisol levels, which are hormones that weaken your immune system and make you more prone to infections and diseases. Ludo can also increase your blood circulation and oxygen supply to your organs.</li>
89
- </ul> <h2>Tips and Tricks for Playing Ludo Game</h2>
90
- <p>Ludo is a game that requires both luck and skill. You need to roll the dice well, but you also need to use your brain to make the best moves. Here are some tips and tricks for playing ludo game that can help you win more often:</p>
91
- <ul>
92
- <li>Strategize your moves: One of the most important tips to win a game of ludo is to think ahead of the opponent. You can do this by predicting their next moves and preventing their tokens from landing on yours. This step is important as not predicting or analyzing the opponent’s playstyle might end you going back to the home base.</li>
93
- <li>Play with all your tokens: Do not focus on only one token and neglect the others. Try to move all your tokens out of your base as soon as possible and spread them across the board. This way, you can have more options to choose from and avoid being blocked or captured by the opponent.</li>
94
- <li>Park the token at the start and capture: A smart trick to play ludo is to park one of your tokens at the start square of your home column and wait for an opportunity to capture an opponent’s token. This way, you can secure your position and also earn an extra turn.</li>
95
- <li>Utilize the safe boxes: The safe boxes are the squares marked with a star on the board. They are located at the corners of each arm and at the centre of the board. These boxes are safe from being captured by any opponent, so use them wisely to protect your tokens or to plan your next move.</li>
96
- <li>Know your opponent: Another tip to win ludo is to observe and learn from your opponent’s behaviour and patterns. You can notice their strengths and weaknesses, their preferences and tendencies, their habits and mistakes. By knowing your opponent, you can anticipate their actions and counter them effectively.</li>
97
- <li>Dice value division: A clever trick to play ludo is to divide the value of the dice by two and use it for two different tokens. For example, if you roll a 6, you can move one token 3 squares and another token 3 squares. This way, you can optimize your moves and cover more ground.</li>
98
- <li>Rule of 7: A simple rule to remember when playing ludo is that the sum of the opposite sides of a die is always 7. For example, if you roll a 1, the opposite side will be a 6. If you roll a 2, the opposite side will be a 5. This rule can help you predict what number you might get next and plan accordingly.</li>
99
- <li>Remember your tokens’ positions: A common mistake that players make in ludo is forgetting where their tokens are on the board. This can lead to missing opportunities or making blunders. To avoid this, try to keep track of your tokens’ positions and movements in your mind or on a paper.</li>
100
- <li>Home > Kill: A golden rule to follow when playing ludo is to always prioritize killing an opponent’s token over moving your own token closer to home. This way, you can eliminate their chances of winning and also gain an extra turn.</li>
101
- <li>Hone your skills: The best way to improve your ludo game is to practice regularly and learn from your experiences. You can play with different opponents, try different strategies, and experiment with different settings. The more you play, the more you will learn and master the game.</li>
102
- <li>Choose your format based on risk: Zupee offers four different formats of ludo games: Ludo Turbo, Ludo Supreme, Ludo Ninja, and Ludo Classic. Each format has its own rules, time limit, scoring system, and prize pool. Depending on your risk appetite and skill level, you can choose the format that suits you best.</li>
103
- </ul>
104
- <h2>Conclusion</h2>
105
- <p>Ludo is a game that has been enjoyed by millions of people for centuries. It is a game that combines luck and skill, fun and challenge, joy and laughter. It is a game that can be played by anyone, anywhere, anytime.</p>
106
- <p>Naija ludo pro apk is a game that takes ludo to the next level. It is a game that offers more features, more options, more boards, more levels, more fun. It is a game that lets you play with your friends or family online or offline.</p>
107
- <p>If you are looking for a professional board game that is made for professionals, then naija ludo pro apk is the game for you. Download it today from <a href="(^1^)">APKCombo</a> or <a href=" ">Google Play Store</a> and enjoy the game of ludo like never before.</p>
108
- <h2>FAQs</h2>
109
- <p>Here are some of the frequently asked questions about naija ludo pro apk:</p>
110
- <ol>
111
- <li><strong>Where can I download naija ludo pro apk?</strong></li>
112
- <p>You can download naija ludo pro apk from <a href="">APKCombo</a> or <a href="">Google Play Store</a>. These are the official and trusted sources for downloading the game. You can also scan the QR code on the game's website to download it directly to your device.</p>
113
- <li><strong>Is naija ludo pro apk safe and secure?</strong></li>
114
- <p>Yes, naija ludo pro apk is safe and secure to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It also does not require any unnecessary permissions or access to your personal information. It is a game that respects your privacy and security.</p>
115
- <li><strong>Can I play naija ludo pro apk online with other players?</strong></li>
116
- <p>Yes, you can play naija ludo pro apk online with other players from around the world. You can either join a random match or create a private room and invite your friends or family to join. You can also chat with your opponents and send them emojis during the game.</p>
117
- <li><strong>Can I customize the board and pieces in naija ludo pro apk?</strong></li>
118
- <p>Yes, you can customize the board and pieces in naija ludo pro apk according to your preference. You can choose among three different boards: classic, modern, and Nigerian. You can also choose among four different sets of pieces: standard, premium, deluxe, and royal. You can also change the colour of your pieces if you want.</p>
119
- <li><strong>What are the differences between naija ludo pro apk and other ludo games?</strong></li>
120
- <p>Naija ludo pro apk is a game that has many differences from other ludo games. Some of these differences are:</p>
121
- <ul>
122
- <li>Naija ludo pro apk has more features and options than other ludo games. You can control the speed, difficulty, dice, capture, play again, barrier, and safe-house options in the game.</li>
123
- <li>Naija ludo pro apk has more boards and pieces than other ludo games. You can choose among three colourful boards and four sets of pieces in the game.</li>
124
- <li>Naija ludo pro apk has a visual hand feature that lets you see your opponent's hand and plan your moves accordingly.</li>
125
- <li>Naija ludo pro apk has a Nigerian theme that gives it a unique flavour and style.</li>
126
- </ul></p> 197e85843d<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Treasure of Montezuma 4 and Experience the Ultimate Match-3 Adventure.md DELETED
@@ -1,122 +0,0 @@
1
-
2
- <h1>How to Download Treasure of Montezuma 4 and Enjoy a Thrilling Puzzle Adventure</h1>
3
- <p>If you are looking for a new and exciting puzzle game to play, you should definitely check out Treasure of Montezuma 4. This is the fourth installment of the popular series that has captivated millions of players around the world. In this article, we will tell you what Treasure of Montezuma 4 is, why you should download it, and how to do it from different platforms. Read on and get ready to embark on an amazing journey through the ancient Aztec civilization.</p>
4
- <h2>download treasure of montezuma 4</h2><br /><p><b><b>Download</b> &mdash; <a href="https://jinyurl.com/2uNMkr">https://jinyurl.com/2uNMkr</a></b></p><br /><br />
5
- <h2>What is Treasure of Montezuma 4?</h2>
6
- <p>Treasure of Montezuma 4 is a tile-matching puzzle game that combines elements of adventure, mystery, and magic. You play as Anna, an archaeologist who travels to an Aztec ruin to uncover an ancient secret. Along the way, you will encounter various challenges and surprises that will keep you hooked for hours.</p>
7
- <h3>A brief introduction to the game and its features</h3>
8
- <p>The game has three modes: Story Mode, Quest Mode, and Puzzle Mode. In Story Mode, you will follow Anna's story as she explores the ruin and faces an epic boss battle. In Quest Mode, you will complete different tasks and earn rewards. In Puzzle Mode, you will solve tricky puzzles with limited moves.</p>
9
- <p>The game also has 98 levels in Story Mode and 69 levels in Quest Mode, each with different goals and obstacles. You will need to match three or more tiles of the same color to clear them from the board and create powerful combos. You will also collect crystals and coins that you can use to upgrade your character and build your own Ziggurat.</p>
10
- <p>download treasure of montezuma 4 full version<br />
11
- download treasure of montezuma 4 for pc<br />
12
- download treasure of montezuma 4 free online<br />
13
- download treasure of montezuma 4 apk<br />
14
- download treasure of montezuma 4 mod<br />
15
- download treasure of montezuma 4 crack<br />
16
- download treasure of montezuma 4 game<br />
17
- download treasure of montezuma 4 android<br />
18
- download treasure of montezuma 4 ios<br />
19
- download treasure of montezuma 4 mac<br />
20
- download treasure of montezuma 4 windows 10<br />
21
- download treasure of montezuma 4 steam<br />
22
- download treasure of montezuma 4 torrent<br />
23
- download treasure of montezuma 4 cheats<br />
24
- download treasure of montezuma 4 tips<br />
25
- download treasure of montezuma 4 walkthrough<br />
26
- download treasure of montezuma 4 guide<br />
27
- download treasure of montezuma 4 review<br />
28
- download treasure of montezuma 4 gameplay<br />
29
- download treasure of montezuma 4 trailer<br />
30
- download treasure of montezuma 4 latest version<br />
31
- download treasure of montezuma 4 update<br />
32
- download treasure of montezuma 4 patch<br />
33
- download treasure of montezuma 4 serial key<br />
34
- download treasure of montezuma 4 license key<br />
35
- download treasure of montezuma 4 activation key<br />
36
- download treasure of montezuma 4 registration key<br />
37
- download treasure of montezuma 4 product key<br />
38
- download treasure of montezuma 4 cd key<br />
39
- download treasure of montezuma 4 keygen<br />
40
- download treasure of montezuma 4 generator<br />
41
- download treasure of montezuma 4 hack<br />
42
- download treasure of montezuma 4 unlimited lives<br />
43
- download treasure of montezuma 4 unlimited coins<br />
44
- download treasure of montezuma 4 unlimited gems<br />
45
- download treasure of montezuma 4 unlimited boosters<br />
46
- download treasure of montezuma 4 unlimited time<br />
47
- download treasure of montezuma 4 no ads<br />
48
- download treasure of montezuma 4 premium<br />
49
- download treasure of montezuma 4 pro<br />
50
- download treasure of montezuma 4 deluxe<br />
51
- download treasure of montezuma 4 gold edition<br />
52
- download treasure of montezuma 4 collector's edition<br />
53
- download treasure of montezuma 4 special edition<br />
54
- download treasure of montezuma 4 ultimate edition<br />
55
- how to download treasure of montezuma 4 <br />
56
- where to download treasure of montezuma 4 <br />
57
- why to download treasure of montezuma 4</p>
58
- <p>Moreover, the game features seven powerful totems and eight unique bonuses that will help you in your quest. The totems are ancient gods that have special abilities, such as creating explosions, swapping tiles, or freezing time. The bonuses are items that you can activate during the game, such as hammers, bombs, or lightning bolts.</p>
59
- <p>The game also has stunning graphics and sound effects that create an immersive atmosphere. You will enjoy the colorful animations, the realistic backgrounds, and the authentic music. You will also learn interesting facts about the Aztec culture and history as you play.</p>
60
- <h2>Why should you download Treasure of Montezuma 4?</h2>
61
- <p>Treasure of Montezuma 4 is not just another puzzle game. It is a game that offers many benefits for players of all ages and preferences. Here are some of them:</p>
62
- <h3>The benefits of playing the game, such as fun, challenge, and learning</h3>
63
- <h4>Fun: How the game offers a variety of modes, levels, and special effects</h4>
64
- <p>One of the main reasons to download Treasure of Montezuma 4 is that it is fun. The game offers a variety of modes, levels, and special effects that make it entertaining and engaging. You will never get bored with this game because there is always something new and exciting to discover. Whether you want to follow Anna's story, complete quests, or solve puzzles , you will find something that suits your mood and taste.</p>
65
- <h4>Challenge: How the game tests your skills, strategy, and speed</h4>
66
- <p>Another reason to download Treasure of Montezuma 4 is that it is challenging. The game tests your skills, strategy, and speed in different ways. You will need to think fast and act faster to clear the board and achieve the goals. You will also need to plan ahead and use the totems and bonuses wisely to overcome the obstacles. The game has different difficulty levels, from easy to hard, so you can adjust the challenge according to your preference and ability.</p>
67
- <h4>Learning: How the game teaches you about the Aztec culture and history</h4>
68
- <p>A third reason to download Treasure of Montezuma 4 is that it is educational. The game teaches you about the Aztec culture and history in a fun and interactive way. You will learn about the Aztec gods, symbols, rituals, and architecture as you play. You will also discover the secrets of the Ziggurat, a massive pyramid that was built by the Aztecs to honor their gods. The game has a built-in encyclopedia that provides more information and facts about the topics covered in the game.</p>
69
- <h2>How to download Treasure of Montezuma 4?</h2>
70
- <p>Now that you know what Treasure of Montezuma 4 is and why you should download it, you might be wondering how to do it. Well, the good news is that downloading Treasure of Montezuma 4 is easy and convenient. You can download the game from different platforms, such as Steam, PlayStation, and GameHouse. Here are the steps to do it from each platform:</p>
71
- <h3>The steps to download the game from different platforms, such as Steam, PlayStation, and GameHouse</h3>
72
- <h4>Steam: How to buy the game for a discounted price and install it on your PC</h4>
73
- <p>If you want to download Treasure of Montezuma 4 from Steam, you will need to have a Steam account and a compatible PC. You can create a Steam account for free by visiting <a href="">https://store.steampowered.com/join/</a>. Once you have an account, you can buy the game for a discounted price of $2.99 (regular price $9.99) by visiting <a href="">https://store.steampowered.com/app/347400/The_Treasures_of_Montezuma_4/</a>. After you buy the game, you can install it on your PC by following these steps:</p>
74
- <ol>
75
- <li>Open Steam and log in with your account.</li>
76
- <li>Go to Library and find Treasure of Montezuma 4 in your list of games.</li>
77
- <li>Click on Install and choose a location for the game files.</li>
78
- <li>Wait for the installation to finish and click on Play.</li>
79
- </ol>
80
- <p>Congratulations! You have successfully downloaded Treasure of Montezuma 4 from Steam. Enjoy!</p>
81
- <h4>PlayStation: How to purchase the game from the PlayStation Store and play it on your PS4 or PS5</h4>
82
- <p>If you want to download Treasure of Montezuma 4 from PlayStation, you will need to have a PlayStation account and a PS4 or PS5 console. You can create a PlayStation account for free by visiting <a href="">https://www.playstation.com/en-us/support/account/create-account/</a>. Once you have an account, you can purchase the game for $9.99 by visiting <a href="">https://store.playstation.com/en-us/product/UP4151-CUSA01975_00-TREASUREMONTEZUM</a>. After you purchase the game, you can download it on your PS4 or PS5 by following these steps:</p>
83
- <ol>
84
- <li>Turn on your PS4 or PS5 and log in with your account.</li>
85
- <li>Go to Library and find Treasure of Montezuma 4 in your list of games.</li>
86
- <li>Select Download and wait for the download to finish.</li>
87
- <li>Select Start to launch the game.</li>
88
- </ol>
89
- <p>Congratulations! You have successfully downloaded Treasure of Montezuma 4 from PlayStation. Enjoy!</p>
90
- <h4>GameHouse: How to sign up for a free trial and access thousands of games, including Treasure of Montezuma 4</h4>
91
- <p>If you want to download Treasure of Montezuma 4 from GameHouse, you will need to sign up for a free trial and access thousands of games, including Treasure of Montezuma 4. GameHouse is a website that offers unlimited access to over 2,500 games for a monthly fee of $10.99. However, you can try it for free for 14 days by visiting <a href="">https://www.gamehouse.com/</a>. Once you sign up for a free trial, you can download Treasure of Montezuma 4 by following these steps:</p>
92
- <ol>
93
- <li>Open GameHouse and log in with your account.</li>
94
- <li>Go to Puzzle Games and find Treasure of Montezuma 4 in the list of games.</li>
95
- <li>Select Play Now and wait for the game to load.</li>
96
- <li>Select Full Screen to enjoy the game in full screen mode.</li>
97
- </ol>
98
- <p>Congratulations! You have successfully downloaded Treasure of Montezuma 4 from GameHouse. Enjoy!</p>
99
- <h2>Conclusion</h2>
100
- <p>Treasure of Montezuma 4 is a fantastic puzzle game that will keep you entertained and challenged for hours. It has three modes, 98 levels, seven totems, eight bonuses, and stunning graphics and sound effects. It also teaches you about the Aztec culture and history in a fun and interactive way. You can download the game from Steam, PlayStation, or GameHouse, depending on your preference and device. Don't miss this opportunity to experience a thrilling puzzle adventure. Download Treasure of Montezuma 4 today and discover the secrets of the Ziggurat!</p>
101
- <h2>FAQs</h2>
102
- <p>Here are some frequently asked questions about Treasure of Montezuma 4:</p>
103
- <ul>
104
- <li><b>What are the system requirements for Treasure of Montezuma 4?</b></li>
105
- <p>The system requirements for Treasure of Montezuma 4 are as follows:</p>
106
- <table>
107
- <tr><td>Platform</td><td>Minimum Requirements</td></tr>
108
- <tr><td>Steam</td><td>OS: Windows XP/Vista/7/8/10<br>CPU: 1.5 GHz<br>RAM: 256 MB<br>Disk Space: 500 MB<br>DirectX: 9.0 or higher</td></tr>
109
- <tr><td>PlayStation</td><td>PS4 or PS5 console<br>Internet connection<br>PlayStation account<br>Disk Space: 1 GB</td></tr>
110
- <tr><td>GameHouse</td><td>OS: Windows 7/8/10<br>CPU: 1.6 GHz<br>RAM: 512 MB<br>Disk Space: 500 MB<br>DirectX: 9.0 or higher<br>Internet connection<br>GameHouse account</td></tr>
111
- </table>
112
- <li><b>How long does it take to finish Treasure of Montezuma 4?</b></li>
113
- <p>The time it takes to finish Treasure of Montezuma 4 depends on your skill level and play style. However, on average, it takes about 10 hours to complete Story Mode, 5 hours to complete Quest Mode, and 2 hours to complete Puzzle Mode.</p>
114
- <li><b>Can I play Treasure of Montezuma 4 offline?</b></li>
115
- <p>You can play Treasure of Montezuma 4 offline if you download it from Steam or PlayStation. However, you will need an internet connection to activate the game and access some features, such as achievements and leaderboards. If you download it from GameHouse, you will need an internet connection to play the game.</p>
116
- <li><b>Is Treasure of Montezuma 4 suitable for children?</b></li>
117
- <p>Treasure of Montezuma 4 is suitable for children aged 7 and above. The game has a rating of E (Everyone) by the ESRB and a rating of PEGI 3 by the PEGI. The game does not contain any violence, blood, gore, or sexual content. However, some parents may find some aspects of the game inappropriate for younger children, such as the depiction of Aztec gods and rituals.</p>
118
- <li><b>Is there a sequel to Treasure of Montezuma 4?</b></li>
119
- <p>Treasure of Montezuma 4 is the latest installment of the series as of June 2023. There is no official announcement about a sequel yet. However, you can check out the previous games in the series, such as Treasure of Montezuma, Treasure of Montezuma 2, and Treasure of Montezuma 3.</p>
120
- </ul></p> 401be4b1e0<br />
121
- <br />
122
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Zero-to-Hero/02-H5-AR-VR-IOT/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/viewer.py DELETED
@@ -1,1160 +0,0 @@
1
- """A pyglet-based interactive 3D scene viewer.
2
- """
3
- import copy
4
- import os
5
- import sys
6
- from threading import Thread, RLock
7
- import time
8
-
9
- import imageio
10
- import numpy as np
11
- import OpenGL
12
- import trimesh
13
-
14
- try:
15
- from Tkinter import Tk, tkFileDialog as filedialog
16
- except Exception:
17
- try:
18
- from tkinter import Tk, filedialog as filedialog
19
- except Exception:
20
- pass
21
-
22
- from .constants import (TARGET_OPEN_GL_MAJOR, TARGET_OPEN_GL_MINOR,
23
- MIN_OPEN_GL_MAJOR, MIN_OPEN_GL_MINOR,
24
- TEXT_PADDING, DEFAULT_SCENE_SCALE,
25
- DEFAULT_Z_FAR, DEFAULT_Z_NEAR, RenderFlags, TextAlign)
26
- from .light import DirectionalLight
27
- from .node import Node
28
- from .camera import PerspectiveCamera, OrthographicCamera, IntrinsicsCamera
29
- from .trackball import Trackball
30
- from .renderer import Renderer
31
- from .mesh import Mesh
32
-
33
- import pyglet
34
- from pyglet import clock
35
- pyglet.options['shadow_window'] = False
36
-
37
-
38
- class Viewer(pyglet.window.Window):
39
- """An interactive viewer for 3D scenes.
40
-
41
- The viewer's camera is separate from the scene's, but will take on
42
- the parameters of the scene's main view camera and start in the same pose.
43
- If the scene does not have a camera, a suitable default will be provided.
44
-
45
- Parameters
46
- ----------
47
- scene : :class:`Scene`
48
- The scene to visualize.
49
- viewport_size : (2,) int
50
- The width and height of the initial viewing window.
51
- render_flags : dict
52
- A set of flags for rendering the scene. Described in the note below.
53
- viewer_flags : dict
54
- A set of flags for controlling the viewer's behavior.
55
- Described in the note below.
56
- registered_keys : dict
57
- A map from ASCII key characters to tuples containing:
58
-
59
- - A function to be called whenever the key is pressed,
60
- whose first argument will be the viewer itself.
61
- - (Optionally) A list of additional positional arguments
62
- to be passed to the function.
63
- - (Optionally) A dict of keyword arguments to be passed
64
- to the function.
65
-
66
- kwargs : dict
67
- Any keyword arguments left over will be interpreted as belonging to
68
- either the :attr:`.Viewer.render_flags` or :attr:`.Viewer.viewer_flags`
69
- dictionaries. Those flag sets will be updated appropriately.
70
-
71
- Note
72
- ----
73
- The basic commands for moving about the scene are given as follows:
74
-
75
- - **Rotating about the scene**: Hold the left mouse button and
76
- drag the cursor.
77
- - **Rotating about the view axis**: Hold ``CTRL`` and the left mouse
78
- button and drag the cursor.
79
- - **Panning**:
80
-
81
- - Hold SHIFT, then hold the left mouse button and drag the cursor, or
82
- - Hold the middle mouse button and drag the cursor.
83
-
84
- - **Zooming**:
85
-
86
- - Scroll the mouse wheel, or
87
- - Hold the right mouse button and drag the cursor.
88
-
89
- Other keyboard commands are as follows:
90
-
91
- - ``a``: Toggles rotational animation mode.
92
- - ``c``: Toggles backface culling.
93
- - ``f``: Toggles fullscreen mode.
94
- - ``h``: Toggles shadow rendering.
95
- - ``i``: Toggles axis display mode
96
- (no axes, world axis, mesh axes, all axes).
97
- - ``l``: Toggles lighting mode
98
- (scene lighting, Raymond lighting, or direct lighting).
99
- - ``m``: Toggles face normal visualization.
100
- - ``n``: Toggles vertex normal visualization.
101
- - ``o``: Toggles orthographic mode.
102
- - ``q``: Quits the viewer.
103
- - ``r``: Starts recording a GIF, and pressing again stops recording
104
- and opens a file dialog.
105
- - ``s``: Opens a file dialog to save the current view as an image.
106
- - ``w``: Toggles wireframe mode
107
- (scene default, flip wireframes, all wireframe, or all solid).
108
- - ``z``: Resets the camera to the initial view.
109
-
110
- Note
111
- ----
112
- The valid keys for ``render_flags`` are as follows:
113
-
114
- - ``flip_wireframe``: `bool`, If `True`, all objects will have their
115
- wireframe modes flipped from what their material indicates.
116
- Defaults to `False`.
117
- - ``all_wireframe``: `bool`, If `True`, all objects will be rendered
118
- in wireframe mode. Defaults to `False`.
119
- - ``all_solid``: `bool`, If `True`, all objects will be rendered in
120
- solid mode. Defaults to `False`.
121
- - ``shadows``: `bool`, If `True`, shadows will be rendered.
122
- Defaults to `False`.
123
- - ``vertex_normals``: `bool`, If `True`, vertex normals will be
124
- rendered as blue lines. Defaults to `False`.
125
- - ``face_normals``: `bool`, If `True`, face normals will be rendered as
126
- blue lines. Defaults to `False`.
127
- - ``cull_faces``: `bool`, If `True`, backfaces will be culled.
128
- Defaults to `True`.
129
- - ``point_size`` : float, The point size in pixels. Defaults to 1px.
130
-
131
- Note
132
- ----
133
- The valid keys for ``viewer_flags`` are as follows:
134
-
135
- - ``rotate``: `bool`, If `True`, the scene's camera will rotate
136
- about an axis. Defaults to `False`.
137
- - ``rotate_rate``: `float`, The rate of rotation in radians per second.
138
- Defaults to `PI / 3.0`.
139
- - ``rotate_axis``: `(3,) float`, The axis in world coordinates to rotate
140
- about. Defaults to ``[0,0,1]``.
141
- - ``view_center``: `(3,) float`, The position to rotate the scene about.
142
- Defaults to the scene's centroid.
143
- - ``use_raymond_lighting``: `bool`, If `True`, an additional set of three
144
- directional lights that move with the camera will be added to the scene.
145
- Defaults to `False`.
146
- - ``use_direct_lighting``: `bool`, If `True`, an additional directional
147
- light that moves with the camera and points out of it will be added to
148
- the scene. Defaults to `False`.
149
- - ``lighting_intensity``: `float`, The overall intensity of the
150
- viewer's additional lights (when they're in use). Defaults to 3.0.
151
- - ``use_perspective_cam``: `bool`, If `True`, a perspective camera will
152
- be used. Otherwise, an orthographic camera is used. Defaults to `True`.
153
- - ``save_directory``: `str`, A directory to open the file dialogs in.
154
- Defaults to `None`.
155
- - ``window_title``: `str`, A title for the viewer's application window.
156
- Defaults to `"Scene Viewer"`.
157
- - ``refresh_rate``: `float`, A refresh rate for rendering, in Hertz.
158
- Defaults to `30.0`.
159
- - ``fullscreen``: `bool`, Whether to make viewer fullscreen.
160
- Defaults to `False`.
161
- - ``show_world_axis``: `bool`, Whether to show the world axis.
162
- Defaults to `False`.
163
- - ``show_mesh_axes``: `bool`, Whether to show the individual mesh axes.
164
- Defaults to `False`.
165
- - ``caption``: `list of dict`, Text caption(s) to display on the viewer.
166
- Defaults to `None`.
167
-
168
- Note
169
- ----
170
- Animation can be accomplished by running the viewer with ``run_in_thread``
171
- enabled. Then, just run a loop in your main thread, updating the scene as
172
- needed. Before updating the scene, be sure to acquire the
173
- :attr:`.Viewer.render_lock`, and release it when your update is done.
174
- """
175
-
176
- def __init__(self, scene, viewport_size=None,
177
- render_flags=None, viewer_flags=None,
178
- registered_keys=None, run_in_thread=False,
179
- auto_start=True,
180
- **kwargs):
181
-
182
- #######################################################################
183
- # Save attributes and flags
184
- #######################################################################
185
- if viewport_size is None:
186
- viewport_size = (640, 480)
187
- self._scene = scene
188
- self._viewport_size = viewport_size
189
- self._render_lock = RLock()
190
- self._is_active = False
191
- self._should_close = False
192
- self._run_in_thread = run_in_thread
193
- self._auto_start = auto_start
194
-
195
- self._default_render_flags = {
196
- 'flip_wireframe': False,
197
- 'all_wireframe': False,
198
- 'all_solid': False,
199
- 'shadows': False,
200
- 'vertex_normals': False,
201
- 'face_normals': False,
202
- 'cull_faces': True,
203
- 'point_size': 1.0,
204
- }
205
- self._default_viewer_flags = {
206
- 'mouse_pressed': False,
207
- 'rotate': False,
208
- 'rotate_rate': np.pi / 3.0,
209
- 'rotate_axis': np.array([0.0, 0.0, 1.0]),
210
- 'view_center': None,
211
- 'record': False,
212
- 'use_raymond_lighting': False,
213
- 'use_direct_lighting': False,
214
- 'lighting_intensity': 3.0,
215
- 'use_perspective_cam': True,
216
- 'save_directory': None,
217
- 'window_title': 'Scene Viewer',
218
- 'refresh_rate': 30.0,
219
- 'fullscreen': False,
220
- 'show_world_axis': False,
221
- 'show_mesh_axes': False,
222
- 'caption': None
223
- }
224
- self._render_flags = self._default_render_flags.copy()
225
- self._viewer_flags = self._default_viewer_flags.copy()
226
- self._viewer_flags['rotate_axis'] = (
227
- self._default_viewer_flags['rotate_axis'].copy()
228
- )
229
-
230
- if render_flags is not None:
231
- self._render_flags.update(render_flags)
232
- if viewer_flags is not None:
233
- self._viewer_flags.update(viewer_flags)
234
-
235
- for key in kwargs:
236
- if key in self.render_flags:
237
- self._render_flags[key] = kwargs[key]
238
- elif key in self.viewer_flags:
239
- self._viewer_flags[key] = kwargs[key]
240
-
241
- # TODO MAC OS BUG FOR SHADOWS
242
- if sys.platform == 'darwin':
243
- self._render_flags['shadows'] = False
244
-
245
- self._registered_keys = {}
246
- if registered_keys is not None:
247
- self._registered_keys = {
248
- ord(k.lower()): registered_keys[k] for k in registered_keys
249
- }
250
-
251
- #######################################################################
252
- # Save internal settings
253
- #######################################################################
254
-
255
- # Set up caption stuff
256
- self._message_text = None
257
- self._ticks_till_fade = 2.0 / 3.0 * self.viewer_flags['refresh_rate']
258
- self._message_opac = 1.0 + self._ticks_till_fade
259
-
260
- # Set up raymond lights and direct lights
261
- self._raymond_lights = self._create_raymond_lights()
262
- self._direct_light = self._create_direct_light()
263
-
264
- # Set up axes
265
- self._axes = {}
266
- self._axis_mesh = Mesh.from_trimesh(
267
- trimesh.creation.axis(origin_size=0.1, axis_radius=0.05,
268
- axis_length=1.0), smooth=False)
269
- if self.viewer_flags['show_world_axis']:
270
- self._set_axes(world=self.viewer_flags['show_world_axis'],
271
- mesh=self.viewer_flags['show_mesh_axes'])
272
-
273
- #######################################################################
274
- # Set up camera node
275
- #######################################################################
276
- self._camera_node = None
277
- self._prior_main_camera_node = None
278
- self._default_camera_pose = None
279
- self._default_persp_cam = None
280
- self._default_orth_cam = None
281
- self._trackball = None
282
- self._saved_frames = []
283
-
284
- # Extract main camera from scene and set up our mirrored copy
285
- znear = None
286
- zfar = None
287
- if scene.main_camera_node is not None:
288
- n = scene.main_camera_node
289
- camera = copy.copy(n.camera)
290
- if isinstance(camera, (PerspectiveCamera, IntrinsicsCamera)):
291
- self._default_persp_cam = camera
292
- znear = camera.znear
293
- zfar = camera.zfar
294
- elif isinstance(camera, OrthographicCamera):
295
- self._default_orth_cam = camera
296
- znear = camera.znear
297
- zfar = camera.zfar
298
- self._default_camera_pose = scene.get_pose(scene.main_camera_node)
299
- self._prior_main_camera_node = n
300
-
301
- # Set defaults as needed
302
- if zfar is None:
303
- zfar = max(scene.scale * 10.0, DEFAULT_Z_FAR)
304
- if znear is None or znear == 0:
305
- if scene.scale == 0:
306
- znear = DEFAULT_Z_NEAR
307
- else:
308
- znear = min(scene.scale / 10.0, DEFAULT_Z_NEAR)
309
-
310
- if self._default_persp_cam is None:
311
- self._default_persp_cam = PerspectiveCamera(
312
- yfov=np.pi / 3.0, znear=znear, zfar=zfar
313
- )
314
- if self._default_orth_cam is None:
315
- xmag = ymag = scene.scale
316
- if scene.scale == 0:
317
- xmag = ymag = 1.0
318
- self._default_orth_cam = OrthographicCamera(
319
- xmag=xmag, ymag=ymag,
320
- znear=znear,
321
- zfar=zfar
322
- )
323
- if self._default_camera_pose is None:
324
- self._default_camera_pose = self._compute_initial_camera_pose()
325
-
326
- # Pick camera
327
- if self.viewer_flags['use_perspective_cam']:
328
- camera = self._default_persp_cam
329
- else:
330
- camera = self._default_orth_cam
331
-
332
- self._camera_node = Node(
333
- matrix=self._default_camera_pose, camera=camera
334
- )
335
- scene.add_node(self._camera_node)
336
- scene.main_camera_node = self._camera_node
337
- self._reset_view()
338
-
339
- #######################################################################
340
- # Initialize OpenGL context and renderer
341
- #######################################################################
342
- self._renderer = Renderer(
343
- self._viewport_size[0], self._viewport_size[1],
344
- self.render_flags['point_size']
345
- )
346
- self._is_active = True
347
-
348
- if self.run_in_thread:
349
- self._thread = Thread(target=self._init_and_start_app)
350
- self._thread.start()
351
- else:
352
- if auto_start:
353
- self._init_and_start_app()
354
-
355
- def start(self):
356
- self._init_and_start_app()
357
-
358
- @property
359
- def scene(self):
360
- """:class:`.Scene` : The scene being visualized.
361
- """
362
- return self._scene
363
-
364
- @property
365
- def viewport_size(self):
366
- """(2,) int : The width and height of the viewing window.
367
- """
368
- return self._viewport_size
369
-
370
- @property
371
- def render_lock(self):
372
- """:class:`threading.RLock` : If acquired, prevents the viewer from
373
- rendering until released.
374
-
375
- Run :meth:`.Viewer.render_lock.acquire` before making updates to
376
- the scene in a different thread, and run
377
- :meth:`.Viewer.render_lock.release` once you're done to let the viewer
378
- continue.
379
- """
380
- return self._render_lock
381
-
382
- @property
383
- def is_active(self):
384
- """bool : `True` if the viewer is active, or `False` if it has
385
- been closed.
386
- """
387
- return self._is_active
388
-
389
- @property
390
- def run_in_thread(self):
391
- """bool : Whether the viewer was run in a separate thread.
392
- """
393
- return self._run_in_thread
394
-
395
- @property
396
- def render_flags(self):
397
- """dict : Flags for controlling the renderer's behavior.
398
-
399
- - ``flip_wireframe``: `bool`, If `True`, all objects will have their
400
- wireframe modes flipped from what their material indicates.
401
- Defaults to `False`.
402
- - ``all_wireframe``: `bool`, If `True`, all objects will be rendered
403
- in wireframe mode. Defaults to `False`.
404
- - ``all_solid``: `bool`, If `True`, all objects will be rendered in
405
- solid mode. Defaults to `False`.
406
- - ``shadows``: `bool`, If `True`, shadows will be rendered.
407
- Defaults to `False`.
408
- - ``vertex_normals``: `bool`, If `True`, vertex normals will be
409
- rendered as blue lines. Defaults to `False`.
410
- - ``face_normals``: `bool`, If `True`, face normals will be rendered as
411
- blue lines. Defaults to `False`.
412
- - ``cull_faces``: `bool`, If `True`, backfaces will be culled.
413
- Defaults to `True`.
414
- - ``point_size`` : float, The point size in pixels. Defaults to 1px.
415
-
416
- """
417
- return self._render_flags
418
-
419
- @render_flags.setter
420
- def render_flags(self, value):
421
- self._render_flags = value
422
-
423
- @property
424
- def viewer_flags(self):
425
- """dict : Flags for controlling the viewer's behavior.
426
-
427
- The valid keys for ``viewer_flags`` are as follows:
428
-
429
- - ``rotate``: `bool`, If `True`, the scene's camera will rotate
430
- about an axis. Defaults to `False`.
431
- - ``rotate_rate``: `float`, The rate of rotation in radians per second.
432
- Defaults to `PI / 3.0`.
433
- - ``rotate_axis``: `(3,) float`, The axis in world coordinates to
434
- rotate about. Defaults to ``[0,0,1]``.
435
- - ``view_center``: `(3,) float`, The position to rotate the scene
436
- about. Defaults to the scene's centroid.
437
- - ``use_raymond_lighting``: `bool`, If `True`, an additional set of
438
- three directional lights that move with the camera will be added to
439
- the scene. Defaults to `False`.
440
- - ``use_direct_lighting``: `bool`, If `True`, an additional directional
441
- light that moves with the camera and points out of it will be
442
- added to the scene. Defaults to `False`.
443
- - ``lighting_intensity``: `float`, The overall intensity of the
444
- viewer's additional lights (when they're in use). Defaults to 3.0.
445
- - ``use_perspective_cam``: `bool`, If `True`, a perspective camera will
446
- be used. Otherwise, an orthographic camera is used. Defaults to
447
- `True`.
448
- - ``save_directory``: `str`, A directory to open the file dialogs in.
449
- Defaults to `None`.
450
- - ``window_title``: `str`, A title for the viewer's application window.
451
- Defaults to `"Scene Viewer"`.
452
- - ``refresh_rate``: `float`, A refresh rate for rendering, in Hertz.
453
- Defaults to `30.0`.
454
- - ``fullscreen``: `bool`, Whether to make viewer fullscreen.
455
- Defaults to `False`.
456
- - ``show_world_axis``: `bool`, Whether to show the world axis.
457
- Defaults to `False`.
458
- - ``show_mesh_axes``: `bool`, Whether to show the individual mesh axes.
459
- Defaults to `False`.
460
- - ``caption``: `list of dict`, Text caption(s) to display on
461
- the viewer. Defaults to `None`.
462
-
463
- """
464
- return self._viewer_flags
465
-
466
- @viewer_flags.setter
467
- def viewer_flags(self, value):
468
- self._viewer_flags = value
469
-
470
- @property
471
- def registered_keys(self):
472
- """dict : Map from ASCII key character to a handler function.
473
-
474
- This is a map from ASCII key characters to tuples containing:
475
-
476
- - A function to be called whenever the key is pressed,
477
- whose first argument will be the viewer itself.
478
- - (Optionally) A list of additional positional arguments
479
- to be passed to the function.
480
- - (Optionally) A dict of keyword arguments to be passed
481
- to the function.
482
-
483
- """
484
- return self._registered_keys
485
-
486
- @registered_keys.setter
487
- def registered_keys(self, value):
488
- self._registered_keys = value
489
-
490
- def close_external(self):
491
- """Close the viewer from another thread.
492
-
493
- This function will wait for the actual close, so you immediately
494
- manipulate the scene afterwards.
495
- """
496
- self._should_close = True
497
- while self.is_active:
498
- time.sleep(1.0 / self.viewer_flags['refresh_rate'])
499
-
500
- def save_gif(self, filename=None):
501
- """Save the stored GIF frames to a file.
502
-
503
- To use this asynchronously, run the viewer with the ``record``
504
- flag and the ``run_in_thread`` flags set.
505
- Kill the viewer after your desired time with
506
- :meth:`.Viewer.close_external`, and then call :meth:`.Viewer.save_gif`.
507
-
508
- Parameters
509
- ----------
510
- filename : str
511
- The file to save the GIF to. If not specified,
512
- a file dialog will be opened to ask the user where
513
- to save the GIF file.
514
- """
515
- if filename is None:
516
- filename = self._get_save_filename(['gif', 'all'])
517
- if filename is not None:
518
- self.viewer_flags['save_directory'] = os.path.dirname(filename)
519
- imageio.mimwrite(filename, self._saved_frames,
520
- fps=self.viewer_flags['refresh_rate'],
521
- palettesize=128, subrectangles=True)
522
- self._saved_frames = []
523
-
524
- def on_close(self):
525
- """Exit the event loop when the window is closed.
526
- """
527
- # Remove our camera and restore the prior one
528
- if self._camera_node is not None:
529
- self.scene.remove_node(self._camera_node)
530
- if self._prior_main_camera_node is not None:
531
- self.scene.main_camera_node = self._prior_main_camera_node
532
-
533
- # Delete any lighting nodes that we've attached
534
- if self.viewer_flags['use_raymond_lighting']:
535
- for n in self._raymond_lights:
536
- if self.scene.has_node(n):
537
- self.scene.remove_node(n)
538
- if self.viewer_flags['use_direct_lighting']:
539
- if self.scene.has_node(self._direct_light):
540
- self.scene.remove_node(self._direct_light)
541
-
542
- # Delete any axis nodes that we've attached
543
- self._remove_axes()
544
-
545
- # Delete renderer
546
- if self._renderer is not None:
547
- self._renderer.delete()
548
- self._renderer = None
549
-
550
- # Force clean-up of OpenGL context data
551
- try:
552
- OpenGL.contextdata.cleanupContext()
553
- self.close()
554
- except Exception:
555
- pass
556
- finally:
557
- self._is_active = False
558
- super(Viewer, self).on_close()
559
- pyglet.app.exit()
560
-
561
- def on_draw(self):
562
- """Redraw the scene into the viewing window.
563
- """
564
- if self._renderer is None:
565
- return
566
-
567
- if self.run_in_thread or not self._auto_start:
568
- self.render_lock.acquire()
569
-
570
- # Make OpenGL context current
571
- self.switch_to()
572
-
573
- # Render the scene
574
- self.clear()
575
- self._render()
576
-
577
- if self._message_text is not None:
578
- self._renderer.render_text(
579
- self._message_text,
580
- self.viewport_size[0] - TEXT_PADDING,
581
- TEXT_PADDING,
582
- font_pt=20,
583
- color=np.array([0.1, 0.7, 0.2,
584
- np.clip(self._message_opac, 0.0, 1.0)]),
585
- align=TextAlign.BOTTOM_RIGHT
586
- )
587
-
588
- if self.viewer_flags['caption'] is not None:
589
- for caption in self.viewer_flags['caption']:
590
- xpos, ypos = self._location_to_x_y(caption['location'])
591
- self._renderer.render_text(
592
- caption['text'],
593
- xpos,
594
- ypos,
595
- font_name=caption['font_name'],
596
- font_pt=caption['font_pt'],
597
- color=caption['color'],
598
- scale=caption['scale'],
599
- align=caption['location']
600
- )
601
-
602
- if self.run_in_thread or not self._auto_start:
603
- self.render_lock.release()
604
-
605
- def on_resize(self, width, height):
606
- """Resize the camera and trackball when the window is resized.
607
- """
608
- if self._renderer is None:
609
- return
610
-
611
- self._viewport_size = (width, height)
612
- self._trackball.resize(self._viewport_size)
613
- self._renderer.viewport_width = self._viewport_size[0]
614
- self._renderer.viewport_height = self._viewport_size[1]
615
- self.on_draw()
616
-
617
- def on_mouse_press(self, x, y, buttons, modifiers):
618
- """Record an initial mouse press.
619
- """
620
- self._trackball.set_state(Trackball.STATE_ROTATE)
621
- if (buttons == pyglet.window.mouse.LEFT):
622
- ctrl = (modifiers & pyglet.window.key.MOD_CTRL)
623
- shift = (modifiers & pyglet.window.key.MOD_SHIFT)
624
- if (ctrl and shift):
625
- self._trackball.set_state(Trackball.STATE_ZOOM)
626
- elif ctrl:
627
- self._trackball.set_state(Trackball.STATE_ROLL)
628
- elif shift:
629
- self._trackball.set_state(Trackball.STATE_PAN)
630
- elif (buttons == pyglet.window.mouse.MIDDLE):
631
- self._trackball.set_state(Trackball.STATE_PAN)
632
- elif (buttons == pyglet.window.mouse.RIGHT):
633
- self._trackball.set_state(Trackball.STATE_ZOOM)
634
-
635
- self._trackball.down(np.array([x, y]))
636
-
637
- # Stop animating while using the mouse
638
- self.viewer_flags['mouse_pressed'] = True
639
-
640
- def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
641
- """Record a mouse drag.
642
- """
643
- self._trackball.drag(np.array([x, y]))
644
-
645
- def on_mouse_release(self, x, y, button, modifiers):
646
- """Record a mouse release.
647
- """
648
- self.viewer_flags['mouse_pressed'] = False
649
-
650
- def on_mouse_scroll(self, x, y, dx, dy):
651
- """Record a mouse scroll.
652
- """
653
- if self.viewer_flags['use_perspective_cam']:
654
- self._trackball.scroll(dy)
655
- else:
656
- spfc = 0.95
657
- spbc = 1.0 / 0.95
658
- sf = 1.0
659
- if dy > 0:
660
- sf = spfc * dy
661
- elif dy < 0:
662
- sf = - spbc * dy
663
-
664
- c = self._camera_node.camera
665
- xmag = max(c.xmag * sf, 1e-8)
666
- ymag = max(c.ymag * sf, 1e-8 * c.ymag / c.xmag)
667
- c.xmag = xmag
668
- c.ymag = ymag
669
-
670
- def on_key_press(self, symbol, modifiers):
671
- """Record a key press.
672
- """
673
- # First, check for registered key callbacks
674
- if symbol in self.registered_keys:
675
- tup = self.registered_keys[symbol]
676
- callback = None
677
- args = []
678
- kwargs = {}
679
- if not isinstance(tup, (list, tuple, np.ndarray)):
680
- callback = tup
681
- else:
682
- callback = tup[0]
683
- if len(tup) == 2:
684
- args = tup[1]
685
- if len(tup) == 3:
686
- kwargs = tup[2]
687
- callback(self, *args, **kwargs)
688
- return
689
-
690
- # Otherwise, use default key functions
691
-
692
- # A causes the frame to rotate
693
- self._message_text = None
694
- if symbol == pyglet.window.key.A:
695
- self.viewer_flags['rotate'] = not self.viewer_flags['rotate']
696
- if self.viewer_flags['rotate']:
697
- self._message_text = 'Rotation On'
698
- else:
699
- self._message_text = 'Rotation Off'
700
-
701
- # C toggles backface culling
702
- elif symbol == pyglet.window.key.C:
703
- self.render_flags['cull_faces'] = (
704
- not self.render_flags['cull_faces']
705
- )
706
- if self.render_flags['cull_faces']:
707
- self._message_text = 'Cull Faces On'
708
- else:
709
- self._message_text = 'Cull Faces Off'
710
-
711
- # F toggles face normals
712
- elif symbol == pyglet.window.key.F:
713
- self.viewer_flags['fullscreen'] = (
714
- not self.viewer_flags['fullscreen']
715
- )
716
- self.set_fullscreen(self.viewer_flags['fullscreen'])
717
- self.activate()
718
- if self.viewer_flags['fullscreen']:
719
- self._message_text = 'Fullscreen On'
720
- else:
721
- self._message_text = 'Fullscreen Off'
722
-
723
- # S toggles shadows
724
- elif symbol == pyglet.window.key.H and sys.platform != 'darwin':
725
- self.render_flags['shadows'] = not self.render_flags['shadows']
726
- if self.render_flags['shadows']:
727
- self._message_text = 'Shadows On'
728
- else:
729
- self._message_text = 'Shadows Off'
730
-
731
- elif symbol == pyglet.window.key.I:
732
- if (self.viewer_flags['show_world_axis'] and not
733
- self.viewer_flags['show_mesh_axes']):
734
- self.viewer_flags['show_world_axis'] = False
735
- self.viewer_flags['show_mesh_axes'] = True
736
- self._set_axes(False, True)
737
- self._message_text = 'Mesh Axes On'
738
- elif (not self.viewer_flags['show_world_axis'] and
739
- self.viewer_flags['show_mesh_axes']):
740
- self.viewer_flags['show_world_axis'] = True
741
- self.viewer_flags['show_mesh_axes'] = True
742
- self._set_axes(True, True)
743
- self._message_text = 'All Axes On'
744
- elif (self.viewer_flags['show_world_axis'] and
745
- self.viewer_flags['show_mesh_axes']):
746
- self.viewer_flags['show_world_axis'] = False
747
- self.viewer_flags['show_mesh_axes'] = False
748
- self._set_axes(False, False)
749
- self._message_text = 'All Axes Off'
750
- else:
751
- self.viewer_flags['show_world_axis'] = True
752
- self.viewer_flags['show_mesh_axes'] = False
753
- self._set_axes(True, False)
754
- self._message_text = 'World Axis On'
755
-
756
- # L toggles the lighting mode
757
- elif symbol == pyglet.window.key.L:
758
- if self.viewer_flags['use_raymond_lighting']:
759
- self.viewer_flags['use_raymond_lighting'] = False
760
- self.viewer_flags['use_direct_lighting'] = True
761
- self._message_text = 'Direct Lighting'
762
- elif self.viewer_flags['use_direct_lighting']:
763
- self.viewer_flags['use_raymond_lighting'] = False
764
- self.viewer_flags['use_direct_lighting'] = False
765
- self._message_text = 'Default Lighting'
766
- else:
767
- self.viewer_flags['use_raymond_lighting'] = True
768
- self.viewer_flags['use_direct_lighting'] = False
769
- self._message_text = 'Raymond Lighting'
770
-
771
- # M toggles face normals
772
- elif symbol == pyglet.window.key.M:
773
- self.render_flags['face_normals'] = (
774
- not self.render_flags['face_normals']
775
- )
776
- if self.render_flags['face_normals']:
777
- self._message_text = 'Face Normals On'
778
- else:
779
- self._message_text = 'Face Normals Off'
780
-
781
- # N toggles vertex normals
782
- elif symbol == pyglet.window.key.N:
783
- self.render_flags['vertex_normals'] = (
784
- not self.render_flags['vertex_normals']
785
- )
786
- if self.render_flags['vertex_normals']:
787
- self._message_text = 'Vert Normals On'
788
- else:
789
- self._message_text = 'Vert Normals Off'
790
-
791
- # O toggles orthographic camera mode
792
- elif symbol == pyglet.window.key.O:
793
- self.viewer_flags['use_perspective_cam'] = (
794
- not self.viewer_flags['use_perspective_cam']
795
- )
796
- if self.viewer_flags['use_perspective_cam']:
797
- camera = self._default_persp_cam
798
- self._message_text = 'Perspective View'
799
- else:
800
- camera = self._default_orth_cam
801
- self._message_text = 'Orthographic View'
802
-
803
- cam_pose = self._camera_node.matrix.copy()
804
- cam_node = Node(matrix=cam_pose, camera=camera)
805
- self.scene.remove_node(self._camera_node)
806
- self.scene.add_node(cam_node)
807
- self.scene.main_camera_node = cam_node
808
- self._camera_node = cam_node
809
-
810
- # Q quits the viewer
811
- elif symbol == pyglet.window.key.Q:
812
- self.on_close()
813
-
814
- # R starts recording frames
815
- elif symbol == pyglet.window.key.R:
816
- if self.viewer_flags['record']:
817
- self.save_gif()
818
- self.set_caption(self.viewer_flags['window_title'])
819
- else:
820
- self.set_caption(
821
- '{} (RECORDING)'.format(self.viewer_flags['window_title'])
822
- )
823
- self.viewer_flags['record'] = not self.viewer_flags['record']
824
-
825
- # S saves the current frame as an image
826
- elif symbol == pyglet.window.key.S:
827
- self._save_image()
828
-
829
- # W toggles through wireframe modes
830
- elif symbol == pyglet.window.key.W:
831
- if self.render_flags['flip_wireframe']:
832
- self.render_flags['flip_wireframe'] = False
833
- self.render_flags['all_wireframe'] = True
834
- self.render_flags['all_solid'] = False
835
- self._message_text = 'All Wireframe'
836
- elif self.render_flags['all_wireframe']:
837
- self.render_flags['flip_wireframe'] = False
838
- self.render_flags['all_wireframe'] = False
839
- self.render_flags['all_solid'] = True
840
- self._message_text = 'All Solid'
841
- elif self.render_flags['all_solid']:
842
- self.render_flags['flip_wireframe'] = False
843
- self.render_flags['all_wireframe'] = False
844
- self.render_flags['all_solid'] = False
845
- self._message_text = 'Default Wireframe'
846
- else:
847
- self.render_flags['flip_wireframe'] = True
848
- self.render_flags['all_wireframe'] = False
849
- self.render_flags['all_solid'] = False
850
- self._message_text = 'Flip Wireframe'
851
-
852
- # Z resets the camera viewpoint
853
- elif symbol == pyglet.window.key.Z:
854
- self._reset_view()
855
-
856
- if self._message_text is not None:
857
- self._message_opac = 1.0 + self._ticks_till_fade
858
-
859
- @staticmethod
860
- def _time_event(dt, self):
861
- """The timer callback.
862
- """
863
- # Don't run old dead events after we've already closed
864
- if not self._is_active:
865
- return
866
-
867
- if self.viewer_flags['record']:
868
- self._record()
869
- if (self.viewer_flags['rotate'] and not
870
- self.viewer_flags['mouse_pressed']):
871
- self._rotate()
872
-
873
- # Manage message opacity
874
- if self._message_text is not None:
875
- if self._message_opac > 1.0:
876
- self._message_opac -= 1.0
877
- else:
878
- self._message_opac *= 0.90
879
- if self._message_opac < 0.05:
880
- self._message_opac = 1.0 + self._ticks_till_fade
881
- self._message_text = None
882
-
883
- if self._should_close:
884
- self.on_close()
885
- else:
886
- self.on_draw()
887
-
888
- def _reset_view(self):
889
- """Reset the view to a good initial state.
890
-
891
- The view is initially along the positive x-axis at a
892
- sufficient distance from the scene.
893
- """
894
- scale = self.scene.scale
895
- if scale == 0.0:
896
- scale = DEFAULT_SCENE_SCALE
897
- centroid = self.scene.centroid
898
-
899
- if self.viewer_flags['view_center'] is not None:
900
- centroid = self.viewer_flags['view_center']
901
-
902
- self._camera_node.matrix = self._default_camera_pose
903
- self._trackball = Trackball(
904
- self._default_camera_pose, self.viewport_size, scale, centroid
905
- )
906
-
907
- def _get_save_filename(self, file_exts):
908
- file_types = {
909
- 'png': ('png files', '*.png'),
910
- 'jpg': ('jpeg files', '*.jpg'),
911
- 'gif': ('gif files', '*.gif'),
912
- 'all': ('all files', '*'),
913
- }
914
- filetypes = [file_types[x] for x in file_exts]
915
- try:
916
- root = Tk()
917
- save_dir = self.viewer_flags['save_directory']
918
- if save_dir is None:
919
- save_dir = os.getcwd()
920
- filename = filedialog.asksaveasfilename(
921
- initialdir=save_dir, title='Select file save location',
922
- filetypes=filetypes
923
- )
924
- except Exception:
925
- return None
926
-
927
- root.destroy()
928
- if filename == ():
929
- return None
930
- return filename
931
-
932
- def _save_image(self):
933
- filename = self._get_save_filename(['png', 'jpg', 'gif', 'all'])
934
- if filename is not None:
935
- self.viewer_flags['save_directory'] = os.path.dirname(filename)
936
- imageio.imwrite(filename, self._renderer.read_color_buf())
937
-
938
- def _record(self):
939
- """Save another frame for the GIF.
940
- """
941
- data = self._renderer.read_color_buf()
942
- if not np.all(data == 0.0):
943
- self._saved_frames.append(data)
944
-
945
- def _rotate(self):
946
- """Animate the scene by rotating the camera.
947
- """
948
- az = (self.viewer_flags['rotate_rate'] /
949
- self.viewer_flags['refresh_rate'])
950
- self._trackball.rotate(az, self.viewer_flags['rotate_axis'])
951
-
952
- def _render(self):
953
- """Render the scene into the framebuffer and flip.
954
- """
955
- scene = self.scene
956
- self._camera_node.matrix = self._trackball.pose.copy()
957
-
958
- # Set lighting
959
- vli = self.viewer_flags['lighting_intensity']
960
- if self.viewer_flags['use_raymond_lighting']:
961
- for n in self._raymond_lights:
962
- n.light.intensity = vli / 3.0
963
- if not self.scene.has_node(n):
964
- scene.add_node(n, parent_node=self._camera_node)
965
- else:
966
- self._direct_light.light.intensity = vli
967
- for n in self._raymond_lights:
968
- if self.scene.has_node(n):
969
- self.scene.remove_node(n)
970
-
971
- if self.viewer_flags['use_direct_lighting']:
972
- if not self.scene.has_node(self._direct_light):
973
- scene.add_node(
974
- self._direct_light, parent_node=self._camera_node
975
- )
976
- elif self.scene.has_node(self._direct_light):
977
- self.scene.remove_node(self._direct_light)
978
-
979
- flags = RenderFlags.NONE
980
- if self.render_flags['flip_wireframe']:
981
- flags |= RenderFlags.FLIP_WIREFRAME
982
- elif self.render_flags['all_wireframe']:
983
- flags |= RenderFlags.ALL_WIREFRAME
984
- elif self.render_flags['all_solid']:
985
- flags |= RenderFlags.ALL_SOLID
986
-
987
- if self.render_flags['shadows']:
988
- flags |= RenderFlags.SHADOWS_DIRECTIONAL | RenderFlags.SHADOWS_SPOT
989
- if self.render_flags['vertex_normals']:
990
- flags |= RenderFlags.VERTEX_NORMALS
991
- if self.render_flags['face_normals']:
992
- flags |= RenderFlags.FACE_NORMALS
993
- if not self.render_flags['cull_faces']:
994
- flags |= RenderFlags.SKIP_CULL_FACES
995
-
996
- self._renderer.render(self.scene, flags)
997
-
998
- def _init_and_start_app(self):
999
- # Try multiple configs starting with target OpenGL version
1000
- # and multisampling and removing these options if exception
1001
- # Note: multisampling not available on all hardware
1002
- from pyglet.gl import Config
1003
- confs = [Config(sample_buffers=1, samples=4,
1004
- depth_size=24,
1005
- double_buffer=True,
1006
- major_version=TARGET_OPEN_GL_MAJOR,
1007
- minor_version=TARGET_OPEN_GL_MINOR),
1008
- Config(depth_size=24,
1009
- double_buffer=True,
1010
- major_version=TARGET_OPEN_GL_MAJOR,
1011
- minor_version=TARGET_OPEN_GL_MINOR),
1012
- Config(sample_buffers=1, samples=4,
1013
- depth_size=24,
1014
- double_buffer=True,
1015
- major_version=MIN_OPEN_GL_MAJOR,
1016
- minor_version=MIN_OPEN_GL_MINOR),
1017
- Config(depth_size=24,
1018
- double_buffer=True,
1019
- major_version=MIN_OPEN_GL_MAJOR,
1020
- minor_version=MIN_OPEN_GL_MINOR)]
1021
- for conf in confs:
1022
- try:
1023
- super(Viewer, self).__init__(config=conf, resizable=True,
1024
- width=self._viewport_size[0],
1025
- height=self._viewport_size[1])
1026
- break
1027
- except pyglet.window.NoSuchConfigException:
1028
- pass
1029
-
1030
- if not self.context:
1031
- raise ValueError('Unable to initialize an OpenGL 3+ context')
1032
- clock.schedule_interval(
1033
- Viewer._time_event, 1.0 / self.viewer_flags['refresh_rate'], self
1034
- )
1035
- self.switch_to()
1036
- self.set_caption(self.viewer_flags['window_title'])
1037
- pyglet.app.run()
1038
-
1039
- def _compute_initial_camera_pose(self):
1040
- centroid = self.scene.centroid
1041
- if self.viewer_flags['view_center'] is not None:
1042
- centroid = self.viewer_flags['view_center']
1043
- scale = self.scene.scale
1044
- if scale == 0.0:
1045
- scale = DEFAULT_SCENE_SCALE
1046
-
1047
- s2 = 1.0 / np.sqrt(2.0)
1048
- cp = np.eye(4)
1049
- cp[:3,:3] = np.array([
1050
- [0.0, -s2, s2],
1051
- [1.0, 0.0, 0.0],
1052
- [0.0, s2, s2]
1053
- ])
1054
- hfov = np.pi / 6.0
1055
- dist = scale / (2.0 * np.tan(hfov))
1056
- cp[:3,3] = dist * np.array([1.0, 0.0, 1.0]) + centroid
1057
-
1058
- return cp
1059
-
1060
- def _create_raymond_lights(self):
1061
- thetas = np.pi * np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])
1062
- phis = np.pi * np.array([0.0, 2.0 / 3.0, 4.0 / 3.0])
1063
-
1064
- nodes = []
1065
-
1066
- for phi, theta in zip(phis, thetas):
1067
- xp = np.sin(theta) * np.cos(phi)
1068
- yp = np.sin(theta) * np.sin(phi)
1069
- zp = np.cos(theta)
1070
-
1071
- z = np.array([xp, yp, zp])
1072
- z = z / np.linalg.norm(z)
1073
- x = np.array([-z[1], z[0], 0.0])
1074
- if np.linalg.norm(x) == 0:
1075
- x = np.array([1.0, 0.0, 0.0])
1076
- x = x / np.linalg.norm(x)
1077
- y = np.cross(z, x)
1078
-
1079
- matrix = np.eye(4)
1080
- matrix[:3,:3] = np.c_[x,y,z]
1081
- nodes.append(Node(
1082
- light=DirectionalLight(color=np.ones(3), intensity=1.0),
1083
- matrix=matrix
1084
- ))
1085
-
1086
- return nodes
1087
-
1088
- def _create_direct_light(self):
1089
- light = DirectionalLight(color=np.ones(3), intensity=1.0)
1090
- n = Node(light=light, matrix=np.eye(4))
1091
- return n
1092
-
1093
- def _set_axes(self, world, mesh):
1094
- scale = self.scene.scale
1095
- if world:
1096
- if 'scene' not in self._axes:
1097
- n = Node(mesh=self._axis_mesh, scale=np.ones(3) * scale * 0.3)
1098
- self.scene.add_node(n)
1099
- self._axes['scene'] = n
1100
- else:
1101
- if 'scene' in self._axes:
1102
- self.scene.remove_node(self._axes['scene'])
1103
- self._axes.pop('scene')
1104
-
1105
- if mesh:
1106
- old_nodes = []
1107
- existing_axes = set([self._axes[k] for k in self._axes])
1108
- for node in self.scene.mesh_nodes:
1109
- if node not in existing_axes:
1110
- old_nodes.append(node)
1111
-
1112
- for node in old_nodes:
1113
- if node in self._axes:
1114
- continue
1115
- n = Node(
1116
- mesh=self._axis_mesh,
1117
- scale=np.ones(3) * node.mesh.scale * 0.5
1118
- )
1119
- self.scene.add_node(n, parent_node=node)
1120
- self._axes[node] = n
1121
- else:
1122
- to_remove = set()
1123
- for main_node in self._axes:
1124
- if main_node in self.scene.mesh_nodes:
1125
- self.scene.remove_node(self._axes[main_node])
1126
- to_remove.add(main_node)
1127
- for main_node in to_remove:
1128
- self._axes.pop(main_node)
1129
-
1130
- def _remove_axes(self):
1131
- for main_node in self._axes:
1132
- axis_node = self._axes[main_node]
1133
- self.scene.remove_node(axis_node)
1134
- self._axes = {}
1135
-
1136
- def _location_to_x_y(self, location):
1137
- if location == TextAlign.CENTER:
1138
- return (self.viewport_size[0] / 2.0, self.viewport_size[1] / 2.0)
1139
- elif location == TextAlign.CENTER_LEFT:
1140
- return (TEXT_PADDING, self.viewport_size[1] / 2.0)
1141
- elif location == TextAlign.CENTER_RIGHT:
1142
- return (self.viewport_size[0] - TEXT_PADDING,
1143
- self.viewport_size[1] / 2.0)
1144
- elif location == TextAlign.BOTTOM_LEFT:
1145
- return (TEXT_PADDING, TEXT_PADDING)
1146
- elif location == TextAlign.BOTTOM_RIGHT:
1147
- return (self.viewport_size[0] - TEXT_PADDING, TEXT_PADDING)
1148
- elif location == TextAlign.BOTTOM_CENTER:
1149
- return (self.viewport_size[0] / 2.0, TEXT_PADDING)
1150
- elif location == TextAlign.TOP_LEFT:
1151
- return (TEXT_PADDING, self.viewport_size[1] - TEXT_PADDING)
1152
- elif location == TextAlign.TOP_RIGHT:
1153
- return (self.viewport_size[0] - TEXT_PADDING,
1154
- self.viewport_size[1] - TEXT_PADDING)
1155
- elif location == TextAlign.TOP_CENTER:
1156
- return (self.viewport_size[0] / 2.0,
1157
- self.viewport_size[1] - TEXT_PADDING)
1158
-
1159
-
1160
- __all__ = ['Viewer']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/hifigan/stft_loss.py DELETED
@@ -1,136 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- # Copyright 2019 Tomoki Hayashi
4
- # MIT License (https://opensource.org/licenses/MIT)
5
-
6
- """STFT-based Loss modules."""
7
-
8
- import torch
9
- import torch.nn.functional as F
10
-
11
-
12
- def stft(x, fft_size, hop_size, win_length, window):
13
- """Perform STFT and convert to magnitude spectrogram.
14
- Args:
15
- x (Tensor): Input signal tensor (B, T).
16
- fft_size (int): FFT size.
17
- hop_size (int): Hop size.
18
- win_length (int): Window length.
19
- window (str): Window function type.
20
- Returns:
21
- Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
22
- """
23
- x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
24
- real = x_stft[..., 0]
25
- imag = x_stft[..., 1]
26
-
27
- # NOTE(kan-bayashi): clamp is needed to avoid nan or inf
28
- return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
29
-
30
-
31
- class SpectralConvergengeLoss(torch.nn.Module):
32
- """Spectral convergence loss module."""
33
-
34
- def __init__(self):
35
- """Initilize spectral convergence loss module."""
36
- super(SpectralConvergengeLoss, self).__init__()
37
-
38
- def forward(self, x_mag, y_mag):
39
- """Calculate forward propagation.
40
- Args:
41
- x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
42
- y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
43
- Returns:
44
- Tensor: Spectral convergence loss value.
45
- """
46
- return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
47
-
48
-
49
- class LogSTFTMagnitudeLoss(torch.nn.Module):
50
- """Log STFT magnitude loss module."""
51
-
52
- def __init__(self):
53
- """Initilize los STFT magnitude loss module."""
54
- super(LogSTFTMagnitudeLoss, self).__init__()
55
-
56
- def forward(self, x_mag, y_mag):
57
- """Calculate forward propagation.
58
- Args:
59
- x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
60
- y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
61
- Returns:
62
- Tensor: Log STFT magnitude loss value.
63
- """
64
- return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
65
-
66
-
67
- class STFTLoss(torch.nn.Module):
68
- """STFT loss module."""
69
-
70
- def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"):
71
- """Initialize STFT loss module."""
72
- super(STFTLoss, self).__init__()
73
- self.fft_size = fft_size
74
- self.shift_size = shift_size
75
- self.win_length = win_length
76
- self.window = getattr(torch, window)(win_length)
77
- self.spectral_convergenge_loss = SpectralConvergengeLoss()
78
- self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
79
-
80
- def forward(self, x, y):
81
- """Calculate forward propagation.
82
- Args:
83
- x (Tensor): Predicted signal (B, T).
84
- y (Tensor): Groundtruth signal (B, T).
85
- Returns:
86
- Tensor: Spectral convergence loss value.
87
- Tensor: Log STFT magnitude loss value.
88
- """
89
- x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window.to(x.get_device()))
90
- y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(x.get_device()))
91
- sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
92
- mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
93
-
94
- return sc_loss, mag_loss
95
-
96
-
97
- class MultiResolutionSTFTLoss(torch.nn.Module):
98
- """Multi resolution STFT loss module."""
99
-
100
- def __init__(self,
101
- fft_sizes=[1024, 2048, 512],
102
- hop_sizes=[120, 240, 50],
103
- win_lengths=[600, 1200, 240],
104
- window="hann_window"):
105
- """Initialize Multi resolution STFT loss module.
106
- Args:
107
- fft_sizes (list): List of FFT sizes.
108
- hop_sizes (list): List of hop sizes.
109
- win_lengths (list): List of window lengths.
110
- window (str): Window function type.
111
- """
112
- super(MultiResolutionSTFTLoss, self).__init__()
113
- assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
114
- self.stft_losses = torch.nn.ModuleList()
115
- for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
116
- self.stft_losses += [STFTLoss(fs, ss, wl, window)]
117
-
118
- def forward(self, x, y):
119
- """Calculate forward propagation.
120
- Args:
121
- x (Tensor): Predicted signal (B, T).
122
- y (Tensor): Groundtruth signal (B, T).
123
- Returns:
124
- Tensor: Multi resolution spectral convergence loss value.
125
- Tensor: Multi resolution log STFT magnitude loss value.
126
- """
127
- sc_loss = 0.0
128
- mag_loss = 0.0
129
- for f in self.stft_losses:
130
- sc_l, mag_l = f(x, y)
131
- sc_loss += sc_l
132
- mag_loss += mag_l
133
- sc_loss /= len(self.stft_losses)
134
- mag_loss /= len(self.stft_losses)
135
-
136
- return sc_loss, mag_loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .evolve import *
2
- from .SOP import *
3
- from .State import *
4
- from .utils import *
 
 
 
 
 
spaces/Abhaykoul/HelpingAI-t2/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: HelpingAI T2
3
- emoji: ⚡
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/helper.py DELETED
@@ -1,77 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import asyncio
4
- import sys
5
- from asyncio import AbstractEventLoop
6
- from os import path
7
- from typing import Dict, List
8
- import browser_cookie3
9
-
10
- # Change event loop policy on windows
11
- if sys.platform == 'win32':
12
- if isinstance(
13
- asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy
14
- ):
15
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
16
-
17
- # Local Cookie Storage
18
- _cookies: Dict[str, Dict[str, str]] = {}
19
-
20
- # If event loop is already running, handle nested event loops
21
- # If "nest_asyncio" is installed, patch the event loop.
22
- def get_event_loop() -> AbstractEventLoop:
23
- try:
24
- asyncio.get_running_loop()
25
- except RuntimeError:
26
- try:
27
- return asyncio.get_event_loop()
28
- except RuntimeError:
29
- asyncio.set_event_loop(asyncio.new_event_loop())
30
- return asyncio.get_event_loop()
31
- try:
32
- event_loop = asyncio.get_event_loop()
33
- if not hasattr(event_loop.__class__, "_nest_patched"):
34
- import nest_asyncio
35
- nest_asyncio.apply(event_loop)
36
- return event_loop
37
- except ImportError:
38
- raise RuntimeError(
39
- 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.'
40
- )
41
-
42
-
43
- # Load cookies for a domain from all supported browsers.
44
- # Cache the results in the "_cookies" variable.
45
- def get_cookies(cookie_domain: str) -> Dict[str, str]:
46
- if cookie_domain not in _cookies:
47
- _cookies[cookie_domain] = {}
48
- try:
49
- for cookie in browser_cookie3.load(cookie_domain):
50
- _cookies[cookie_domain][cookie.name] = cookie.value
51
- except:
52
- pass
53
- return _cookies[cookie_domain]
54
-
55
-
56
- def format_prompt(messages: List[Dict[str, str]], add_special_tokens=False) -> str:
57
- if add_special_tokens or len(messages) > 1:
58
- formatted = "\n".join(
59
- [
60
- "%s: %s" % ((message["role"]).capitalize(), message["content"])
61
- for message in messages
62
- ]
63
- )
64
- return f"{formatted}\nAssistant:"
65
- else:
66
- return messages[0]["content"]
67
-
68
-
69
- def get_browser(user_data_dir: str = None):
70
- from undetected_chromedriver import Chrome
71
- from platformdirs import user_config_dir
72
-
73
- if not user_data_dir:
74
- user_data_dir = user_config_dir("g4f")
75
- user_data_dir = path.join(user_data_dir, "Default")
76
-
77
- return Chrome(user_data_dir=user_data_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/rotate-plugin.d.ts DELETED
@@ -1,9 +0,0 @@
1
- import Rotate from './rotate';
2
-
3
- export default class RotatePlugin extends Phaser.Plugins.BasePlugin {
4
- add(
5
- gameObject: Phaser.GameObjects.GameObject,
6
- config?: Rotate.IConfig
7
- ): Rotate;
8
-
9
- }
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/Bejeweled.js DELETED
@@ -1,82 +0,0 @@
1
- import ComponentBase from '../../plugins/utils/componentbase/ComponentBase.js';
2
- import MainState from './states/MainState.js';
3
- import Board from './board/Board.js';
4
- import Input from './input/Input.js';
5
- import WaitEvents from '../../plugins/waitevents.js';
6
- import InputMethods from './methods/InputMethods.js';
7
- import BoardMethods from './methods/BoardMethods.js';
8
- import WaitEventMethods from './methods/WaitEventMethods.js';
9
- import DataManagerMethods from '../../plugins/utils/data/DataManagerMethods.js';
10
-
11
-
12
- const GetValue = Phaser.Utils.Objects.GetValue;
13
-
14
- class Bejeweled extends ComponentBase {
15
- constructor(scene, config) {
16
- super(scene, config);
17
- // this.scene
18
-
19
- var rexBoardKey = GetValue(config, 'rexBoard', 'rexBoard');
20
- this.rexBoard = scene[rexBoardKey];
21
-
22
- this.board = new Board(this, config);
23
-
24
- var defaultInput = GetValue(config, 'input', true);
25
- if (defaultInput) {
26
- this.input = new Input(this, config);
27
- } else {
28
- this.input = undefined;
29
- }
30
-
31
- this.waitEvents = new WaitEvents();
32
-
33
- this.mainState = new MainState(this, config);
34
-
35
- this.boot();
36
- }
37
-
38
- boot() {
39
- this.scene.events.once('shutdown', this.destroy, this);
40
- }
41
-
42
- shutdown(fromScene) {
43
- super.shutdown(fromScene);
44
-
45
- if (this.input) {
46
- this.input.destroy();
47
- }
48
- this.board.destroy();
49
- this.mainState.destroy();
50
- this.waitEvents.destroy();
51
-
52
- this.destroyDataManager();
53
-
54
- this.board = undefined;
55
- this.mainState = undefined;
56
- this.input = undefined;
57
- this.waitEvents = undefined;
58
-
59
- return this;
60
- }
61
-
62
- destroy(fromScene) {
63
- this.emit('destroy');
64
- super.destroy(fromScene);
65
- return this;
66
- }
67
-
68
- start() {
69
- this.mainState.goto('START');
70
- return this;
71
- }
72
- }
73
-
74
- Object.assign(
75
- Bejeweled.prototype,
76
- InputMethods,
77
- BoardMethods,
78
- WaitEventMethods,
79
- DataManagerMethods
80
- );
81
-
82
- export default Bejeweled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/inputtext/InputText.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import InputText from '../../../plugins/inputtext';
2
- export default InputText;
 
 
 
spaces/AkitoP/umamusume_bert_vits2/text/symbols.py DELETED
@@ -1,188 +0,0 @@
1
- punctuation = ["!", "?", "…", ",", ".", "'", "-"]
2
- pu_symbols = punctuation + ["SP", "UNK"]
3
- pad = "_"
4
-
5
- # chinese
6
- zh_symbols = [
7
- "E",
8
- "En",
9
- "a",
10
- "ai",
11
- "an",
12
- "ang",
13
- "ao",
14
- "b",
15
- "c",
16
- "ch",
17
- "d",
18
- "e",
19
- "ei",
20
- "en",
21
- "eng",
22
- "er",
23
- "f",
24
- "g",
25
- "h",
26
- "i",
27
- "i0",
28
- "ia",
29
- "ian",
30
- "iang",
31
- "iao",
32
- "ie",
33
- "in",
34
- "ing",
35
- "iong",
36
- "ir",
37
- "iu",
38
- "j",
39
- "k",
40
- "l",
41
- "m",
42
- "n",
43
- "o",
44
- "ong",
45
- "ou",
46
- "p",
47
- "q",
48
- "r",
49
- "s",
50
- "sh",
51
- "t",
52
- "u",
53
- "ua",
54
- "uai",
55
- "uan",
56
- "uang",
57
- "ui",
58
- "un",
59
- "uo",
60
- "v",
61
- "van",
62
- "ve",
63
- "vn",
64
- "w",
65
- "x",
66
- "y",
67
- "z",
68
- "zh",
69
- "AA",
70
- "EE",
71
- "OO",
72
- ]
73
- num_zh_tones = 6
74
-
75
- # japanese
76
- ja_symbols = [
77
- "N",
78
- "a",
79
- "a:",
80
- "b",
81
- "by",
82
- "ch",
83
- "d",
84
- "dy",
85
- "e",
86
- "e:",
87
- "f",
88
- "g",
89
- "gy",
90
- "h",
91
- "hy",
92
- "i",
93
- "i:",
94
- "j",
95
- "k",
96
- "ky",
97
- "m",
98
- "my",
99
- "n",
100
- "ny",
101
- "o",
102
- "o:",
103
- "p",
104
- "py",
105
- "q",
106
- "r",
107
- "ry",
108
- "s",
109
- "sh",
110
- "t",
111
- "ts",
112
- "ty",
113
- "u",
114
- "u:",
115
- "w",
116
- "y",
117
- "z",
118
- "zy",
119
- # ":"
120
- ]
121
- num_ja_tones = 1
122
-
123
- # English
124
- en_symbols = [
125
- "aa",
126
- "ae",
127
- "ah",
128
- "ao",
129
- "aw",
130
- "ay",
131
- "b",
132
- "ch",
133
- "d",
134
- "dh",
135
- "eh",
136
- "er",
137
- "ey",
138
- "f",
139
- "g",
140
- "hh",
141
- "ih",
142
- "iy",
143
- "jh",
144
- "k",
145
- "l",
146
- "m",
147
- "n",
148
- "ng",
149
- "ow",
150
- "oy",
151
- "p",
152
- "r",
153
- "s",
154
- "sh",
155
- "t",
156
- "th",
157
- "uh",
158
- "uw",
159
- "V",
160
- "w",
161
- "y",
162
- "z",
163
- "zh",
164
- ]
165
- num_en_tones = 4
166
-
167
- # combine all symbols
168
- normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols))
169
- symbols = [pad] + normal_symbols + pu_symbols
170
- sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
171
-
172
- # combine all tones
173
- num_tones = num_zh_tones + num_ja_tones + num_en_tones
174
-
175
- # language maps
176
- language_id_map = {"ZH": 0, "JP": 1, "EN": 2}
177
- num_languages = len(language_id_map.keys())
178
-
179
- language_tone_start_map = {
180
- "ZH": 0,
181
- "JP": num_zh_tones,
182
- "EN": num_zh_tones + num_ja_tones,
183
- }
184
-
185
- if __name__ == "__main__":
186
- a = set(zh_symbols)
187
- b = set(en_symbols)
188
- print(sorted(a & b))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/UTF8.pm DELETED
@@ -1,1404 +0,0 @@
1
- ################################################################
2
- # #
3
- # UTF8 #
4
- # #
5
- ################################################################
6
-
7
- package NLP::UTF8;
8
-
9
- use NLP::utilities;
10
- $util = NLP::utilities;
11
-
12
- %empty_ht = ();
13
-
14
- sub new {
15
- local($caller) = @_;
16
-
17
- my $object = {};
18
- my $class = ref( $caller ) || $caller;
19
- bless($object, $class);
20
- return $object;
21
- }
22
-
23
- sub unicode_string2string {
24
- # input: string that might contain unicode sequences such as "U+0627"
25
- # output: string in pure utf-8
26
- local($caller,$s) = @_;
27
-
28
- my $pre;
29
- my $unicode;
30
- my $post;
31
- my $r1;
32
- my $r2;
33
- my $r3;
34
-
35
- ($pre,$unicode,$post) = ($s =~ /^(.*)(?:U\+|\\u)([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])(.*)$/);
36
- return $s unless defined($post);
37
- $r1 = $caller->unicode_string2string($pre);
38
- $r2 = $caller->unicode_hex_string2string($unicode);
39
- $r3 = $caller->unicode_string2string($post);
40
- $result = $r1 . $r2 . $r3;
41
- return $result;
42
- }
43
-
44
- sub unicode_hex_string2string {
45
- # input: "0627" (interpreted as hex code)
46
- # output: utf-8 string for Arabic letter alef
47
- local($caller,$unicode) = @_;
48
- return "" unless defined($unicode);
49
- my $d = hex($unicode);
50
- return $caller->unicode2string($d);
51
- }
52
-
53
- sub unicode2string {
54
- # input: non-neg integer, e.g. 0x627
55
- # output: utf-8 string for Arabic letter alef
56
- local($caller,$d) = @_;
57
- return "" unless defined($d) && $d >= 0;
58
- return sprintf("%c",$d) if $d <= 0x7F;
59
-
60
- my $lastbyte1 = ($d & 0x3F) | 0x80;
61
- $d >>= 6;
62
- return sprintf("%c%c",$d | 0xC0, $lastbyte1) if $d <= 0x1F;
63
-
64
- my $lastbyte2 = ($d & 0x3F) | 0x80;
65
- $d >>= 6;
66
- return sprintf("%c%c%c",$d | 0xE0, $lastbyte2, $lastbyte1) if $d <= 0xF;
67
-
68
- my $lastbyte3 = ($d & 0x3F) | 0x80;
69
- $d >>= 6;
70
- return sprintf("%c%c%c%c",$d | 0xF0, $lastbyte3, $lastbyte2, $lastbyte1) if $d <= 0x7;
71
-
72
- my $lastbyte4 = ($d & 0x3F) | 0x80;
73
- $d >>= 6;
74
- return sprintf("%c%c%c%c%c",$d | 0xF8, $lastbyte4, $lastbyte3, $lastbyte2, $lastbyte1) if $d <= 0x3;
75
-
76
- my $lastbyte5 = ($d & 0x3F) | 0x80;
77
- $d >>= 6;
78
- return sprintf("%c%c%c%c%c%c",$d | 0xFC, $lastbyte5, $lastbyte4, $lastbyte3, $lastbyte2, $lastbyte1) if $d <= 0x1;
79
- return ""; # bad input
80
- }
81
-
82
- sub html2utf8 {
83
- local($caller, $string) = @_;
84
-
85
- return $string unless $string =~ /\&\#\d{3,5};/;
86
-
87
- my $prev = "";
88
- my $s = $string;
89
- while ($s ne $prev) {
90
- $prev = $s;
91
- ($pre,$d,$post) = ($s =~ /^(.*)\&\#(\d+);(.*)$/);
92
- if (defined($d) && ((($d >= 160) && ($d <= 255))
93
- || (($d >= 1500) && ($d <= 1699))
94
- || (($d >= 19968) && ($d <= 40879)))) {
95
- $html_code = "\&\#" . $d . ";";
96
- $utf8_code = $caller->unicode2string($d);
97
- $s =~ s/$html_code/$utf8_code/;
98
- }
99
- }
100
- return $s;
101
- }
102
-
103
- sub xhtml2utf8 {
104
- local($caller, $string) = @_;
105
-
106
- return $string unless $string =~ /\&\#x[0-9a-fA-F]{2,5};/;
107
-
108
- my $prev = "";
109
- my $s = $string;
110
- while ($s ne $prev) {
111
- $prev = $s;
112
- if (($pre, $html_code, $x, $post) = ($s =~ /^(.*)(\&\#x([0-9a-fA-F]{2,5});)(.*)$/)) {
113
- $utf8_code = $caller->unicode_hex_string2string($x);
114
- $s =~ s/$html_code/$utf8_code/;
115
- }
116
- }
117
- return $s;
118
- }
119
-
120
- sub utf8_marker {
121
- return sprintf("%c%c%c\n", 0xEF, 0xBB, 0xBF);
122
- }
123
-
124
- sub enforcer {
125
- # input: string that might not conform to utf-8
126
- # output: string in pure utf-8, with a few "smart replacements" and possibly "?"
127
- local($caller,$s,$no_repair) = @_;
128
-
129
- my $ascii;
130
- my $utf8;
131
- my $rest;
132
-
133
- return $s if $s =~ /^[\x00-\x7F]*$/;
134
-
135
- $no_repair = 0 unless defined($no_repair);
136
- $orig = $s;
137
- $result = "";
138
-
139
- while ($s ne "") {
140
- ($ascii,$rest) = ($s =~ /^([\x00-\x7F]+)(.*)$/);
141
- if (defined($ascii)) {
142
- $result .= $ascii;
143
- $s = $rest;
144
- next;
145
- }
146
- ($utf8,$rest) = ($s =~ /^([\xC0-\xDF][\x80-\xBF])(.*)$/);
147
- ($utf8,$rest) = ($s =~ /^([\xE0-\xEF][\x80-\xBF][\x80-\xBF])(.*)$/)
148
- unless defined($rest);
149
- ($utf8,$rest) = ($s =~ /^([\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])(.*)$/)
150
- unless defined($rest);
151
- ($utf8,$rest) = ($s =~ /^([\xF8-\xFB][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF])(.*)$/)
152
- unless defined($rest);
153
- if (defined($utf8)) {
154
- $result .= $utf8;
155
- $s = $rest;
156
- next;
157
- }
158
- ($c,$rest) = ($s =~ /^(.)(.*)$/);
159
- if (defined($c)) {
160
- if ($no_repair) { $result .= "?"; }
161
- elsif ($c =~ /\x85/) { $result .= "..."; }
162
- elsif ($c =~ /\x91/) { $result .= "'"; }
163
- elsif ($c =~ /\x92/) { $result .= "'"; }
164
- elsif ($c =~ /\x93/) { $result .= $caller->unicode2string(0x201C); }
165
- elsif ($c =~ /\x94/) { $result .= $caller->unicode2string(0x201D); }
166
- elsif ($c =~ /[\xC0-\xFF]/) {
167
- $c2 = $c;
168
- $c2 =~ tr/[\xC0-\xFF]/[\x80-\xBF]/;
169
- $result .= "\xC3$c2";
170
- } else {
171
- $result .= "?";
172
- }
173
- $s = $rest;
174
- next;
175
- }
176
- $s = "";
177
- }
178
- $result .= "\n" if ($orig =~ /\n$/) && ! ($result =~ /\n$/);
179
- return $result;
180
- }
181
-
182
- sub split_into_utf8_characters {
183
- # input: utf8 string
184
- # output: list of sub-strings, each representing a utf8 character
185
- local($caller,$string,$group_control, *ht) = @_;
186
-
187
- @characters = ();
188
- $end_of_token_p_string = "";
189
- $skipped_bytes = "";
190
- $group_control = "" unless defined($group_control);
191
- $group_ascii_numbers = ($group_control =~ /ASCII numbers/);
192
- $group_ascii_spaces = ($group_control =~ /ASCII spaces/);
193
- $group_ascii_punct = ($group_control =~ /ASCII punct/);
194
- $group_ascii_chars = ($group_control =~ /ASCII chars/);
195
- $group_xml_chars = ($group_control =~ /XML chars/);
196
- $group_xml_tags = ($group_control =~ /XML tags/);
197
- $return_only_chars = ($group_control =~ /return only chars/);
198
- $return_trailing_whitespaces = ($group_control =~ /return trailing whitespaces/);
199
- if ($group_control =~ /ASCII all/) {
200
- $group_ascii_numbers = 1;
201
- $group_ascii_spaces = 1;
202
- $group_ascii_chars = 1;
203
- $group_ascii_punct = 1;
204
- }
205
- if ($group_control =~ /(XML chars and tags|XML tags and chars)/) {
206
- $group_xml_chars = 1;
207
- $group_xml_tags = 1;
208
- }
209
- $orig_string = $string;
210
- $string .= " ";
211
- while ($string =~ /\S/) {
212
- # one-character UTF-8 = ASCII
213
- if ($string =~ /^[\x00-\x7F]/) {
214
- if ($group_xml_chars
215
- && (($dec_unicode, $rest) = ($string =~ /^&#(\d+);(.*)$/s))
216
- && ($utf8_char = $caller->unicode2string($dec_unicode))) {
217
- push(@characters, $utf8_char);
218
- $string = $rest;
219
- } elsif ($group_xml_chars
220
- && (($hex_unicode, $rest) = ($string =~ /^&#x([0-9a-f]{1,6});(.*)$/is))
221
- && ($utf8_char = $caller->unicode_hex_string2string($hex_unicode))) {
222
- push(@characters, $utf8_char);
223
- $string = $rest;
224
- } elsif ($group_xml_chars
225
- && (($html_entity_name, $rest) = ($string =~ /^&([a-z]{1,6});(.*)$/is))
226
- && ($dec_unicode = $ht{HTML_ENTITY_NAME_TO_DECUNICODE}->{$html_entity_name})
227
- && ($utf8_char = $caller->unicode2string($dec_unicode))
228
- ) {
229
- push(@characters, $utf8_char);
230
- $string = $rest;
231
- } elsif ($group_xml_tags
232
- && (($tag, $rest) = ($string =~ /^(<\/?[a-zA-Z][-_:a-zA-Z0-9]*(\s+[a-zA-Z][-_:a-zA-Z0-9]*=\"[^"]*\")*\s*\/?>)(.*)$/s))) {
233
- push(@characters, $tag);
234
- $string = $rest;
235
- } elsif ($group_ascii_numbers && ($string =~ /^[12]\d\d\d\.[01]?\d.[0-3]?\d([^0-9].*)?$/)) {
236
- ($date) = ($string =~ /^(\d\d\d\d\.\d?\d.\d?\d)([^0-9].*)?$/);
237
- push(@characters,$date);
238
- $string = substr($string, length($date));
239
- } elsif ($group_ascii_numbers && ($string =~ /^\d/)) {
240
- ($number) = ($string =~ /^(\d+(,\d\d\d)*(\.\d+)?)/);
241
- push(@characters,$number);
242
- $string = substr($string, length($number));
243
- } elsif ($group_ascii_spaces && ($string =~ /^(\s+)/)) {
244
- ($space) = ($string =~ /^(\s+)/);
245
- $string = substr($string, length($space));
246
- } elsif ($group_ascii_punct && (($punct_seq) = ($string =~ /^(-+|\.+|[:,%()"])/))) {
247
- push(@characters,$punct_seq);
248
- $string = substr($string, length($punct_seq));
249
- } elsif ($group_ascii_chars && (($word) = ($string =~ /^(\$[A-Z]*|[A-Z]{1,3}\$)/))) {
250
- push(@characters,$word);
251
- $string = substr($string, length($word));
252
- } elsif ($group_ascii_chars && (($abbrev) = ($string =~ /^((?:Jan|Feb|Febr|Mar|Apr|Jun|Jul|Aug|Sep|Sept|Oct|Nov|Dec|Mr|Mrs|Dr|a.m|p.m)\.)/))) {
253
- push(@characters,$abbrev);
254
- $string = substr($string, length($abbrev));
255
- } elsif ($group_ascii_chars && (($word) = ($string =~ /^(second|minute|hour|day|week|month|year|inch|foot|yard|meter|kilometer|mile)-(?:long|old)/i))) {
256
- push(@characters,$word);
257
- $string = substr($string, length($word));
258
- } elsif ($group_ascii_chars && (($word) = ($string =~ /^(zero|one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|hundred|thousand|million|billion|trillion)-/i))) {
259
- push(@characters,$word);
260
- $string = substr($string, length($word));
261
- } elsif ($group_ascii_chars && (($word) = ($string =~ /^([a-zA-Z]+)(?:[ ,;%?|()"]|'s |' |\. |\d+[:hms][0-9 ])/))) {
262
- push(@characters,$word);
263
- $string = substr($string, length($word));
264
- } elsif ($group_ascii_chars && ($string =~ /^([\x21-\x27\x2A-\x7E]+)/)) { # exclude ()
265
- ($ascii) = ($string =~ /^([\x21-\x27\x2A-\x7E]+)/); # ASCII black-characters
266
- push(@characters,$ascii);
267
- $string = substr($string, length($ascii));
268
- } elsif ($group_ascii_chars && ($string =~ /^([\x21-\x7E]+)/)) {
269
- ($ascii) = ($string =~ /^([\x21-\x7E]+)/); # ASCII black-characters
270
- push(@characters,$ascii);
271
- $string = substr($string, length($ascii));
272
- } elsif ($group_ascii_chars && ($string =~ /^([\x00-\x7F]+)/)) {
273
- ($ascii) = ($string =~ /^([\x00-\x7F]+)/);
274
- push(@characters,$ascii);
275
- $string = substr($string, length($ascii));
276
- } else {
277
- push(@characters,substr($string, 0, 1));
278
- $string = substr($string, 1);
279
- }
280
-
281
- # two-character UTF-8
282
- } elsif ($string =~ /^[\xC0-\xDF][\x80-\xBF]/) {
283
- push(@characters,substr($string, 0, 2));
284
- $string = substr($string, 2);
285
-
286
- # three-character UTF-8
287
- } elsif ($string =~ /^[\xE0-\xEF][\x80-\xBF][\x80-\xBF]/) {
288
- push(@characters,substr($string, 0, 3));
289
- $string = substr($string, 3);
290
-
291
- # four-character UTF-8
292
- } elsif ($string =~ /^[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]/) {
293
- push(@characters,substr($string, 0, 4));
294
- $string = substr($string, 4);
295
-
296
- # five-character UTF-8
297
- } elsif ($string =~ /^[\xF8-\xFB][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF]/) {
298
- push(@characters,substr($string, 0, 5));
299
- $string = substr($string, 5);
300
-
301
- # six-character UTF-8
302
- } elsif ($string =~ /^[\xFC-\xFD][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF]/) {
303
- push(@characters,substr($string, 0, 6));
304
- $string = substr($string, 6);
305
-
306
- # not a UTF-8 character
307
- } else {
308
- $skipped_bytes .= substr($string, 0, 1);
309
- $string = substr($string, 1);
310
- }
311
-
312
- $end_of_token_p_string .= ($string =~ /^\S/) ? "0" : "1"
313
- if $#characters >= length($end_of_token_p_string);
314
- }
315
- $string =~ s/ $//; # remove previously added space, but keep original spaces
316
- if ($return_trailing_whitespaces) {
317
- while ($string =~ /^[ \t]/) {
318
- push(@characters,substr($string, 0, 1));
319
- $string = substr($string, 1);
320
- }
321
- push(@characters, "\n") if $orig_string =~ /\n$/;
322
- }
323
- return ($return_only_chars) ? @characters : ($skipped_bytes, $end_of_token_p_string, @characters);
324
- }
325
-
326
- sub max_substring_info {
327
- local($caller,$s1,$s2,$info_type) = @_;
328
-
329
- ($skipped_bytes1, $end_of_token_p_string1, @char_list1) = $caller->split_into_utf8_characters($s1, "", *empty_ht);
330
- ($skipped_bytes2, $end_of_token_p_string2, @char_list2) = $caller->split_into_utf8_characters($s2, "", *empty_ht);
331
- return 0 if $skipped_bytes1 || $skipped_bytes2;
332
-
333
- $best_substring_start1 = 0;
334
- $best_substring_start2 = 0;
335
- $best_substring_length = 0;
336
-
337
- foreach $start_pos2 ((0 .. $#char_list2)) {
338
- last if $start_pos2 + $best_substring_length > $#char_list2;
339
- foreach $start_pos1 ((0 .. $#char_list1)) {
340
- last if $start_pos1 + $best_substring_length > $#char_list1;
341
- $matching_length = 0;
342
- while (($start_pos1 + $matching_length <= $#char_list1)
343
- && ($start_pos2 + $matching_length <= $#char_list2)
344
- && ($char_list1[$start_pos1+$matching_length] eq $char_list2[$start_pos2+$matching_length])) {
345
- $matching_length++;
346
- }
347
- if ($matching_length > $best_substring_length) {
348
- $best_substring_length = $matching_length;
349
- $best_substring_start1 = $start_pos1;
350
- $best_substring_start2 = $start_pos2;
351
- }
352
- }
353
- }
354
- if ($info_type =~ /^max-ratio1$/) {
355
- $length1 = $#char_list1 + 1;
356
- return ($length1 > 0) ? ($best_substring_length / $length1) : 0;
357
- } elsif ($info_type =~ /^max-ratio2$/) {
358
- $length2 = $#char_list2 + 1;
359
- return ($length2 > 0) ? ($best_substring_length / $length2) : 0;
360
- } elsif ($info_type =~ /^substring$/) {
361
- return join("", @char_list1[$best_substring_start1 .. $best_substring_start1+$best_substring_length-1]);
362
- } else {
363
- $length1 = $#char_list1 + 1;
364
- $length2 = $#char_list2 + 1;
365
- $info = "s1=$s1;s2=$s2";
366
- $info .= ";best_substring_length=$best_substring_length";
367
- $info .= ";best_substring_start1=$best_substring_start1";
368
- $info .= ";best_substring_start2=$best_substring_start2";
369
- $info .= ";length1=$length1";
370
- $info .= ";length2=$length2";
371
- return $info;
372
- }
373
- }
374
-
375
- sub n_shared_chars_at_start {
376
- local($caller,$s1,$s2) = @_;
377
-
378
- my $n = 0;
379
- while (($s1 ne "") && ($s2 ne "")) {
380
- ($c1, $rest1) = ($s1 =~ /^(.[\x80-\xBF]*)(.*)$/);
381
- ($c2, $rest2) = ($s2 =~ /^(.[\x80-\xBF]*)(.*)$/);
382
- if ($c1 eq $c2) {
383
- $n++;
384
- $s1 = $rest1;
385
- $s2 = $rest2;
386
- } else {
387
- last;
388
- }
389
- }
390
- return $n;
391
- }
392
-
393
- sub char_length {
394
- local($caller,$string,$byte_offset) = @_;
395
-
396
- my $char = ($byte_offset) ? substr($string, $byte_offset) : $string;
397
- return 1 if $char =~ /^[\x00-\x7F]/;
398
- return 2 if $char =~ /^[\xC0-\xDF]/;
399
- return 3 if $char =~ /^[\xE0-\xEF]/;
400
- return 4 if $char =~ /^[\xF0-\xF7]/;
401
- return 5 if $char =~ /^[\xF8-\xFB]/;
402
- return 6 if $char =~ /^[\xFC-\xFD]/;
403
- return 0;
404
- }
405
-
406
- sub length_in_utf8_chars {
407
- local($caller,$s) = @_;
408
-
409
- $s =~ s/[\x80-\xBF]//g;
410
- $s =~ s/[\x00-\x7F\xC0-\xFF]/c/g;
411
- return length($s);
412
- }
413
-
414
- sub byte_length_of_n_chars {
415
- local($caller,$char_length,$string,$byte_offset,$undef_return_value) = @_;
416
-
417
- $byte_offset = 0 unless defined($byte_offset);
418
- $undef_return_value = -1 unless defined($undef_return_value);
419
- my $result = 0;
420
- my $len;
421
- foreach $i ((1 .. $char_length)) {
422
- $len = $caller->char_length($string,($byte_offset+$result));
423
- return $undef_return_value unless $len;
424
- $result += $len;
425
- }
426
- return $result;
427
- }
428
-
429
- sub replace_non_ASCII_bytes {
430
- local($caller,$string,$replacement) = @_;
431
-
432
- $replacement = "HEX" unless defined($replacement);
433
- if ($replacement =~ /^(Unicode|U\+4|\\u|HEX)$/) {
434
- $new_string = "";
435
- while (($pre,$utf8_char, $post) = ($string =~ /^([\x09\x0A\x20-\x7E]*)([\x00-\x08\x0B-\x1F\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF]|[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]|[\xF8-\xFF][\x80-\xBF]+|[\x80-\xBF])(.*)$/s)) {
436
- if ($replacement =~ /Unicode/) {
437
- $new_string .= $pre . "<U" . (uc $caller->utf8_to_unicode($utf8_char)) . ">";
438
- } elsif ($replacement =~ /\\u/) {
439
- $new_string .= $pre . "\\u" . (uc sprintf("%04x", $caller->utf8_to_unicode($utf8_char)));
440
- } elsif ($replacement =~ /U\+4/) {
441
- $new_string .= $pre . "<U+" . (uc $caller->utf8_to_4hex_unicode($utf8_char)) . ">";
442
- } else {
443
- $new_string .= $pre . "<HEX-" . $caller->utf8_to_hex($utf8_char) . ">";
444
- }
445
- $string = $post;
446
- }
447
- $new_string .= $string;
448
- } else {
449
- $new_string = $string;
450
- $new_string =~ s/[\x80-\xFF]/$replacement/g;
451
- }
452
- return $new_string;
453
- }
454
-
455
- sub valid_utf8_string_p {
456
- local($caller,$string) = @_;
457
-
458
- return $string =~ /^(?:[\x09\x0A\x20-\x7E]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF]|[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])*$/;
459
- }
460
-
461
- sub valid_utf8_string_incl_ascii_control_p {
462
- local($caller,$string) = @_;
463
-
464
- return $string =~ /^(?:[\x00-\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF]|[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])*$/;
465
- }
466
-
467
- sub utf8_to_hex {
468
- local($caller,$s) = @_;
469
-
470
- $hex = "";
471
- foreach $i ((0 .. length($s)-1)) {
472
- $hex .= uc sprintf("%2.2x",ord(substr($s, $i, 1)));
473
- }
474
- return $hex;
475
- }
476
-
477
- sub hex_to_utf8 {
478
- local($caller,$s) = @_;
479
- # surface string \xE2\x80\xBA to UTF8
480
-
481
- my $utf8 = "";
482
- while (($hex, $rest) = ($s =~ /^(?:\\x)?([0-9A-Fa-f]{2,2})(.*)$/)) {
483
- $utf8 .= sprintf("%c", hex($hex));
484
- $s = $rest;
485
- }
486
- return $utf8;
487
- }
488
-
489
- sub utf8_to_4hex_unicode {
490
- local($caller,$s) = @_;
491
-
492
- return sprintf("%4.4x", $caller->utf8_to_unicode($s));
493
- }
494
-
495
- sub utf8_to_unicode {
496
- local($caller,$s) = @_;
497
-
498
- $unicode = 0;
499
- foreach $i ((0 .. length($s)-1)) {
500
- $c = substr($s, $i, 1);
501
- if ($c =~ /^[\x80-\xBF]$/) {
502
- $unicode = $unicode * 64 + (ord($c) & 0x3F);
503
- } elsif ($c =~ /^[\xC0-\xDF]$/) {
504
- $unicode = $unicode * 32 + (ord($c) & 0x1F);
505
- } elsif ($c =~ /^[\xE0-\xEF]$/) {
506
- $unicode = $unicode * 16 + (ord($c) & 0x0F);
507
- } elsif ($c =~ /^[\xF0-\xF7]$/) {
508
- $unicode = $unicode * 8 + (ord($c) & 0x07);
509
- } elsif ($c =~ /^[\xF8-\xFB]$/) {
510
- $unicode = $unicode * 4 + (ord($c) & 0x03);
511
- } elsif ($c =~ /^[\xFC-\xFD]$/) {
512
- $unicode = $unicode * 2 + (ord($c) & 0x01);
513
- }
514
- }
515
- return $unicode;
516
- }
517
-
518
- sub charhex {
519
- local($caller,$string) = @_;
520
-
521
- my $result = "";
522
- while ($string ne "") {
523
- $char = substr($string, 0, 1);
524
- $string = substr($string, 1);
525
- if ($char =~ /^[ -~]$/) {
526
- $result .= $char;
527
- } else {
528
- $hex = sprintf("%2.2x",ord($char));
529
- $hex =~ tr/a-f/A-F/;
530
- $result .= "<HEX-$hex>";
531
- }
532
- }
533
- return $result;
534
- }
535
-
536
- sub windows1252_to_utf8 {
537
- local($caller,$s, $norm_to_ascii_p, $preserve_potential_utf8s_p) = @_;
538
-
539
- return $s if $s =~ /^[\x00-\x7F]*$/; # all ASCII
540
-
541
- $norm_to_ascii_p = 1 unless defined($norm_to_ascii_p);
542
- $preserve_potential_utf8s_p = 1 unless defined($preserve_potential_utf8s_p);
543
- my $result = "";
544
- my $c = "";
545
- while ($s ne "") {
546
- $n_bytes = 1;
547
- if ($s =~ /^[\x00-\x7F]/) {
548
- $result .= substr($s, 0, 1); # ASCII
549
- } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xC0-\xDF][\x80-\xBF]/)) {
550
- $result .= substr($s, 0, 2); # valid 2-byte UTF8
551
- $n_bytes = 2;
552
- } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xE0-\xEF][\x80-\xBF][\x80-\xBF]/)) {
553
- $result .= substr($s, 0, 3); # valid 3-byte UTF8
554
- $n_bytes = 3;
555
- } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]/)) {
556
- $result .= substr($s, 0, 4); # valid 4-byte UTF8
557
- $n_bytes = 4;
558
- } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xF8-\xFB][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF]/)) {
559
- $result .= substr($s, 0, 5); # valid 5-byte UTF8
560
- $n_bytes = 5;
561
- } elsif ($s =~ /^[\xA0-\xBF]/) {
562
- $c = substr($s, 0, 1);
563
- $result .= "\xC2$c";
564
- } elsif ($s =~ /^[\xC0-\xFF]/) {
565
- $c = substr($s, 0, 1);
566
- $c =~ tr/[\xC0-\xFF]/[\x80-\xBF]/;
567
- $result .= "\xC3$c";
568
- } elsif ($s =~ /^\x80/) {
569
- $result .= "\xE2\x82\xAC"; # Euro sign
570
- } elsif ($s =~ /^\x82/) {
571
- $result .= "\xE2\x80\x9A"; # single low quotation mark
572
- } elsif ($s =~ /^\x83/) {
573
- $result .= "\xC6\x92"; # Latin small letter f with hook
574
- } elsif ($s =~ /^\x84/) {
575
- $result .= "\xE2\x80\x9E"; # double low quotation mark
576
- } elsif ($s =~ /^\x85/) {
577
- $result .= ($norm_to_ascii_p) ? "..." : "\xE2\x80\xA6"; # horizontal ellipsis (three dots)
578
- } elsif ($s =~ /^\x86/) {
579
- $result .= "\xE2\x80\xA0"; # dagger
580
- } elsif ($s =~ /^\x87/) {
581
- $result .= "\xE2\x80\xA1"; # double dagger
582
- } elsif ($s =~ /^\x88/) {
583
- $result .= "\xCB\x86"; # circumflex
584
- } elsif ($s =~ /^\x89/) {
585
- $result .= "\xE2\x80\xB0"; # per mille sign
586
- } elsif ($s =~ /^\x8A/) {
587
- $result .= "\xC5\xA0"; # Latin capital letter S with caron
588
- } elsif ($s =~ /^\x8B/) {
589
- $result .= "\xE2\x80\xB9"; # single left-pointing angle quotation mark
590
- } elsif ($s =~ /^\x8C/) {
591
- $result .= "\xC5\x92"; # OE ligature
592
- } elsif ($s =~ /^\x8E/) {
593
- $result .= "\xC5\xBD"; # Latin capital letter Z with caron
594
- } elsif ($s =~ /^\x91/) {
595
- $result .= ($norm_to_ascii_p) ? "`" : "\xE2\x80\x98"; # left single quotation mark
596
- } elsif ($s =~ /^\x92/) {
597
- $result .= ($norm_to_ascii_p) ? "'" : "\xE2\x80\x99"; # right single quotation mark
598
- } elsif ($s =~ /^\x93/) {
599
- $result .= "\xE2\x80\x9C"; # left double quotation mark
600
- } elsif ($s =~ /^\x94/) {
601
- $result .= "\xE2\x80\x9D"; # right double quotation mark
602
- } elsif ($s =~ /^\x95/) {
603
- $result .= "\xE2\x80\xA2"; # bullet
604
- } elsif ($s =~ /^\x96/) {
605
- $result .= ($norm_to_ascii_p) ? "-" : "\xE2\x80\x93"; # n dash
606
- } elsif ($s =~ /^\x97/) {
607
- $result .= ($norm_to_ascii_p) ? "-" : "\xE2\x80\x94"; # m dash
608
- } elsif ($s =~ /^\x98/) {
609
- $result .= ($norm_to_ascii_p) ? "~" : "\xCB\x9C"; # small tilde
610
- } elsif ($s =~ /^\x99/) {
611
- $result .= "\xE2\x84\xA2"; # trade mark sign
612
- } elsif ($s =~ /^\x9A/) {
613
- $result .= "\xC5\xA1"; # Latin small letter s with caron
614
- } elsif ($s =~ /^\x9B/) {
615
- $result .= "\xE2\x80\xBA"; # single right-pointing angle quotation mark
616
- } elsif ($s =~ /^\x9C/) {
617
- $result .= "\xC5\x93"; # oe ligature
618
- } elsif ($s =~ /^\x9E/) {
619
- $result .= "\xC5\xBE"; # Latin small letter z with caron
620
- } elsif ($s =~ /^\x9F/) {
621
- $result .= "\xC5\xB8"; # Latin capital letter Y with diaeresis
622
- } else {
623
- $result .= "?";
624
- }
625
- $s = substr($s, $n_bytes);
626
- }
627
- return $result;
628
- }
629
-
630
- sub delete_weird_stuff {
631
- local($caller, $s) = @_;
632
-
633
- # delete control chacters (except tab and linefeed), zero-width characters, byte order mark,
634
- # directional marks, join marks, variation selectors, Arabic tatweel
635
- $s =~ s/([\x00-\x08\x0B-\x1F\x7F]|\xC2[\x80-\x9F]|\xD9\x80|\xE2\x80[\x8B-\x8F]|\xEF\xB8[\x80-\x8F]|\xEF\xBB\xBF|\xF3\xA0[\x84-\x87][\x80-\xBF])//g;
636
- return $s;
637
- }
638
-
639
- sub number_of_utf8_character {
640
- local($caller, $s) = @_;
641
-
642
- $s2 = $s;
643
- $s2 =~ s/[\x80-\xBF]//g;
644
- return length($s2);
645
- }
646
-
647
- sub cap_letter_reg_exp {
648
- # includes A-Z and other Latin-based capital letters with accents, umlauts and other decorations etc.
649
- return "[A-Z]|\xC3[\x80-\x96\x98-\x9E]|\xC4[\x80\x82\x84\x86\x88\x8A\x8C\x8E\x90\x94\x964\x98\x9A\x9C\x9E\xA0\xA2\xA4\xA6\xA8\xAA\xAC\xAE\xB0\xB2\xB4\xB6\xB9\xBB\xBD\xBF]|\xC5[\x81\x83\x85\x87\x8A\x8C\x8E\x90\x92\x96\x98\x9A\x9C\x9E\xA0\xA2\xA4\xA6\xA8\xAA\xAC\xB0\xB2\xB4\xB6\xB8\xB9\xBB\xBD]";
650
- }
651
-
652
- sub regex_extended_case_expansion {
653
- local($caller, $s) = @_;
654
-
655
- if ($s =~ /\xC3/) {
656
- $s =~ s/\xC3\xA0/\xC3\[\x80\xA0\]/g;
657
- $s =~ s/\xC3\xA1/\xC3\[\x81\xA1\]/g;
658
- $s =~ s/\xC3\xA2/\xC3\[\x82\xA2\]/g;
659
- $s =~ s/\xC3\xA3/\xC3\[\x83\xA3\]/g;
660
- $s =~ s/\xC3\xA4/\xC3\[\x84\xA4\]/g;
661
- $s =~ s/\xC3\xA5/\xC3\[\x85\xA5\]/g;
662
- $s =~ s/\xC3\xA6/\xC3\[\x86\xA6\]/g;
663
- $s =~ s/\xC3\xA7/\xC3\[\x87\xA7\]/g;
664
- $s =~ s/\xC3\xA8/\xC3\[\x88\xA8\]/g;
665
- $s =~ s/\xC3\xA9/\xC3\[\x89\xA9\]/g;
666
- $s =~ s/\xC3\xAA/\xC3\[\x8A\xAA\]/g;
667
- $s =~ s/\xC3\xAB/\xC3\[\x8B\xAB\]/g;
668
- $s =~ s/\xC3\xAC/\xC3\[\x8C\xAC\]/g;
669
- $s =~ s/\xC3\xAD/\xC3\[\x8D\xAD\]/g;
670
- $s =~ s/\xC3\xAE/\xC3\[\x8E\xAE\]/g;
671
- $s =~ s/\xC3\xAF/\xC3\[\x8F\xAF\]/g;
672
- $s =~ s/\xC3\xB0/\xC3\[\x90\xB0\]/g;
673
- $s =~ s/\xC3\xB1/\xC3\[\x91\xB1\]/g;
674
- $s =~ s/\xC3\xB2/\xC3\[\x92\xB2\]/g;
675
- $s =~ s/\xC3\xB3/\xC3\[\x93\xB3\]/g;
676
- $s =~ s/\xC3\xB4/\xC3\[\x94\xB4\]/g;
677
- $s =~ s/\xC3\xB5/\xC3\[\x95\xB5\]/g;
678
- $s =~ s/\xC3\xB6/\xC3\[\x96\xB6\]/g;
679
- $s =~ s/\xC3\xB8/\xC3\[\x98\xB8\]/g;
680
- $s =~ s/\xC3\xB9/\xC3\[\x99\xB9\]/g;
681
- $s =~ s/\xC3\xBA/\xC3\[\x9A\xBA\]/g;
682
- $s =~ s/\xC3\xBB/\xC3\[\x9B\xBB\]/g;
683
- $s =~ s/\xC3\xBC/\xC3\[\x9C\xBC\]/g;
684
- $s =~ s/\xC3\xBD/\xC3\[\x9D\xBD\]/g;
685
- $s =~ s/\xC3\xBE/\xC3\[\x9E\xBE\]/g;
686
- }
687
- if ($s =~ /\xC5/) {
688
- $s =~ s/\xC5\x91/\xC5\[\x90\x91\]/g;
689
- $s =~ s/\xC5\xA1/\xC5\[\xA0\xA1\]/g;
690
- $s =~ s/\xC5\xB1/\xC5\[\xB0\xB1\]/g;
691
- }
692
-
693
- return $s;
694
- }
695
-
696
- sub extended_lower_case {
697
- local($caller, $s) = @_;
698
-
699
- $s =~ tr/A-Z/a-z/;
700
-
701
- # Latin-1
702
- if ($s =~ /\xC3[\x80-\x9F]/) {
703
- $s =~ s/À/à/g;
704
- $s =~ s/Á/á/g;
705
- $s =~ s/Â/â/g;
706
- $s =~ s/Ã/ã/g;
707
- $s =~ s/Ä/ä/g;
708
- $s =~ s/Å/å/g;
709
- $s =~ s/Æ/æ/g;
710
- $s =~ s/Ç/ç/g;
711
- $s =~ s/È/è/g;
712
- $s =~ s/É/é/g;
713
- $s =~ s/Ê/ê/g;
714
- $s =~ s/Ë/ë/g;
715
- $s =~ s/Ì/ì/g;
716
- $s =~ s/Í/í/g;
717
- $s =~ s/Î/î/g;
718
- $s =~ s/Ï/ï/g;
719
- $s =~ s/Ð/ð/g;
720
- $s =~ s/Ñ/ñ/g;
721
- $s =~ s/Ò/ò/g;
722
- $s =~ s/Ó/ó/g;
723
- $s =~ s/Ô/ô/g;
724
- $s =~ s/Õ/õ/g;
725
- $s =~ s/Ö/ö/g;
726
- $s =~ s/Ø/ø/g;
727
- $s =~ s/Ù/ù/g;
728
- $s =~ s/Ú/ú/g;
729
- $s =~ s/Û/û/g;
730
- $s =~ s/Ü/ü/g;
731
- $s =~ s/Ý/ý/g;
732
- $s =~ s/Þ/þ/g;
733
- }
734
- # Latin Extended-A
735
- if ($s =~ /[\xC4-\xC5][\x80-\xBF]/) {
736
- $s =~ s/Ā/ā/g;
737
- $s =~ s/Ă/ă/g;
738
- $s =~ s/Ą/ą/g;
739
- $s =~ s/Ć/ć/g;
740
- $s =~ s/Ĉ/ĉ/g;
741
- $s =~ s/Ċ/ċ/g;
742
- $s =~ s/Č/č/g;
743
- $s =~ s/Ď/ď/g;
744
- $s =~ s/Đ/đ/g;
745
- $s =~ s/Ē/ē/g;
746
- $s =~ s/Ĕ/ĕ/g;
747
- $s =~ s/Ė/ė/g;
748
- $s =~ s/Ę/ę/g;
749
- $s =~ s/Ě/ě/g;
750
- $s =~ s/Ĝ/ĝ/g;
751
- $s =~ s/Ğ/ğ/g;
752
- $s =~ s/Ġ/ġ/g;
753
- $s =~ s/Ģ/ģ/g;
754
- $s =~ s/Ĥ/ĥ/g;
755
- $s =~ s/Ħ/ħ/g;
756
- $s =~ s/Ĩ/ĩ/g;
757
- $s =~ s/Ī/ī/g;
758
- $s =~ s/Ĭ/ĭ/g;
759
- $s =~ s/Į/į/g;
760
- $s =~ s/İ/ı/g;
761
- $s =~ s/IJ/ij/g;
762
- $s =~ s/Ĵ/ĵ/g;
763
- $s =~ s/Ķ/ķ/g;
764
- $s =~ s/Ĺ/ĺ/g;
765
- $s =~ s/Ļ/ļ/g;
766
- $s =~ s/Ľ/ľ/g;
767
- $s =~ s/Ŀ/ŀ/g;
768
- $s =~ s/Ł/ł/g;
769
- $s =~ s/Ń/ń/g;
770
- $s =~ s/Ņ/ņ/g;
771
- $s =~ s/Ň/ň/g;
772
- $s =~ s/Ŋ/ŋ/g;
773
- $s =~ s/Ō/ō/g;
774
- $s =~ s/Ŏ/ŏ/g;
775
- $s =~ s/Ő/ő/g;
776
- $s =~ s/Œ/œ/g;
777
- $s =~ s/Ŕ/ŕ/g;
778
- $s =~ s/Ŗ/ŗ/g;
779
- $s =~ s/Ř/ř/g;
780
- $s =~ s/Ś/ś/g;
781
- $s =~ s/Ŝ/ŝ/g;
782
- $s =~ s/Ş/ş/g;
783
- $s =~ s/Š/š/g;
784
- $s =~ s/Ţ/ţ/g;
785
- $s =~ s/Ť/ť/g;
786
- $s =~ s/Ŧ/ŧ/g;
787
- $s =~ s/Ũ/ũ/g;
788
- $s =~ s/Ū/ū/g;
789
- $s =~ s/Ŭ/ŭ/g;
790
- $s =~ s/Ů/ů/g;
791
- $s =~ s/Ű/ű/g;
792
- $s =~ s/Ų/ų/g;
793
- $s =~ s/Ŵ/ŵ/g;
794
- $s =~ s/Ŷ/ŷ/g;
795
- $s =~ s/Ź/ź/g;
796
- $s =~ s/Ż/ż/g;
797
- $s =~ s/Ž/ž/g;
798
- }
799
- # Greek letters
800
- if ($s =~ /\xCE[\x86-\xAB]/) {
801
- $s =~ s/Α/α/g;
802
- $s =~ s/Β/β/g;
803
- $s =~ s/Γ/γ/g;
804
- $s =~ s/Δ/δ/g;
805
- $s =~ s/Ε/ε/g;
806
- $s =~ s/Ζ/ζ/g;
807
- $s =~ s/Η/η/g;
808
- $s =~ s/Θ/θ/g;
809
- $s =~ s/Ι/ι/g;
810
- $s =~ s/Κ/κ/g;
811
- $s =~ s/Λ/λ/g;
812
- $s =~ s/Μ/μ/g;
813
- $s =~ s/Ν/ν/g;
814
- $s =~ s/Ξ/ξ/g;
815
- $s =~ s/Ο/ο/g;
816
- $s =~ s/Π/π/g;
817
- $s =~ s/Ρ/ρ/g;
818
- $s =~ s/Σ/σ/g;
819
- $s =~ s/Τ/τ/g;
820
- $s =~ s/Υ/υ/g;
821
- $s =~ s/Φ/φ/g;
822
- $s =~ s/Χ/χ/g;
823
- $s =~ s/Ψ/ψ/g;
824
- $s =~ s/Ω/ω/g;
825
- $s =~ s/Ϊ/ϊ/g;
826
- $s =~ s/Ϋ/ϋ/g;
827
- $s =~ s/Ά/ά/g;
828
- $s =~ s/Έ/έ/g;
829
- $s =~ s/Ή/ή/g;
830
- $s =~ s/Ί/ί/g;
831
- $s =~ s/Ό/ό/g;
832
- $s =~ s/Ύ/ύ/g;
833
- $s =~ s/Ώ/ώ/g;
834
- }
835
- # Cyrillic letters
836
- if ($s =~ /\xD0[\x80-\xAF]/) {
837
- $s =~ s/А/а/g;
838
- $s =~ s/Б/б/g;
839
- $s =~ s/В/в/g;
840
- $s =~ s/Г/г/g;
841
- $s =~ s/Д/д/g;
842
- $s =~ s/Е/е/g;
843
- $s =~ s/Ж/ж/g;
844
- $s =~ s/З/з/g;
845
- $s =~ s/И/и/g;
846
- $s =~ s/Й/й/g;
847
- $s =~ s/К/к/g;
848
- $s =~ s/Л/л/g;
849
- $s =~ s/М/м/g;
850
- $s =~ s/Н/н/g;
851
- $s =~ s/О/о/g;
852
- $s =~ s/П/п/g;
853
- $s =~ s/Р/р/g;
854
- $s =~ s/С/с/g;
855
- $s =~ s/Т/т/g;
856
- $s =~ s/У/у/g;
857
- $s =~ s/Ф/ф/g;
858
- $s =~ s/Х/х/g;
859
- $s =~ s/Ц/ц/g;
860
- $s =~ s/Ч/ч/g;
861
- $s =~ s/Ш/ш/g;
862
- $s =~ s/Щ/щ/g;
863
- $s =~ s/Ъ/ъ/g;
864
- $s =~ s/Ы/ы/g;
865
- $s =~ s/Ь/ь/g;
866
- $s =~ s/Э/э/g;
867
- $s =~ s/Ю/ю/g;
868
- $s =~ s/Я/я/g;
869
- $s =~ s/Ѐ/ѐ/g;
870
- $s =~ s/Ё/ё/g;
871
- $s =~ s/Ђ/ђ/g;
872
- $s =~ s/Ѓ/ѓ/g;
873
- $s =~ s/Є/є/g;
874
- $s =~ s/Ѕ/ѕ/g;
875
- $s =~ s/І/і/g;
876
- $s =~ s/Ї/ї/g;
877
- $s =~ s/Ј/ј/g;
878
- $s =~ s/Љ/љ/g;
879
- $s =~ s/Њ/њ/g;
880
- $s =~ s/Ћ/ћ/g;
881
- $s =~ s/Ќ/ќ/g;
882
- $s =~ s/Ѝ/ѝ/g;
883
- $s =~ s/Ў/ў/g;
884
- $s =~ s/Џ/џ/g;
885
- }
886
- # Fullwidth A-Z
887
- if ($s =~ /\xEF\xBC[\xA1-\xBA]/) {
888
- $s =~ s/A/a/g;
889
- $s =~ s/B/b/g;
890
- $s =~ s/C/c/g;
891
- $s =~ s/D/d/g;
892
- $s =~ s/E/e/g;
893
- $s =~ s/F/f/g;
894
- $s =~ s/G/g/g;
895
- $s =~ s/H/h/g;
896
- $s =~ s/I/i/g;
897
- $s =~ s/J/j/g;
898
- $s =~ s/K/k/g;
899
- $s =~ s/L/l/g;
900
- $s =~ s/M/m/g;
901
- $s =~ s/N/n/g;
902
- $s =~ s/O/o/g;
903
- $s =~ s/P/p/g;
904
- $s =~ s/Q/q/g;
905
- $s =~ s/R/r/g;
906
- $s =~ s/S/s/g;
907
- $s =~ s/T/t/g;
908
- $s =~ s/U/u/g;
909
- $s =~ s/V/v/g;
910
- $s =~ s/W/w/g;
911
- $s =~ s/X/x/g;
912
- $s =~ s/Y/y/g;
913
- $s =~ s/Z/z/g;
914
- }
915
-
916
- return $s;
917
- }
918
-
919
- sub extended_upper_case {
920
- local($caller, $s) = @_;
921
-
922
- $s =~ tr/a-z/A-Z/;
923
- return $s unless $s =~ /[\xC3-\xC5][\x80-\xBF]/;
924
-
925
- $s =~ s/\xC3\xA0/\xC3\x80/g;
926
- $s =~ s/\xC3\xA1/\xC3\x81/g;
927
- $s =~ s/\xC3\xA2/\xC3\x82/g;
928
- $s =~ s/\xC3\xA3/\xC3\x83/g;
929
- $s =~ s/\xC3\xA4/\xC3\x84/g;
930
- $s =~ s/\xC3\xA5/\xC3\x85/g;
931
- $s =~ s/\xC3\xA6/\xC3\x86/g;
932
- $s =~ s/\xC3\xA7/\xC3\x87/g;
933
- $s =~ s/\xC3\xA8/\xC3\x88/g;
934
- $s =~ s/\xC3\xA9/\xC3\x89/g;
935
- $s =~ s/\xC3\xAA/\xC3\x8A/g;
936
- $s =~ s/\xC3\xAB/\xC3\x8B/g;
937
- $s =~ s/\xC3\xAC/\xC3\x8C/g;
938
- $s =~ s/\xC3\xAD/\xC3\x8D/g;
939
- $s =~ s/\xC3\xAE/\xC3\x8E/g;
940
- $s =~ s/\xC3\xAF/\xC3\x8F/g;
941
- $s =~ s/\xC3\xB0/\xC3\x90/g;
942
- $s =~ s/\xC3\xB1/\xC3\x91/g;
943
- $s =~ s/\xC3\xB2/\xC3\x92/g;
944
- $s =~ s/\xC3\xB3/\xC3\x93/g;
945
- $s =~ s/\xC3\xB4/\xC3\x94/g;
946
- $s =~ s/\xC3\xB5/\xC3\x95/g;
947
- $s =~ s/\xC3\xB6/\xC3\x96/g;
948
- $s =~ s/\xC3\xB8/\xC3\x98/g;
949
- $s =~ s/\xC3\xB9/\xC3\x99/g;
950
- $s =~ s/\xC3\xBA/\xC3\x9A/g;
951
- $s =~ s/\xC3\xBB/\xC3\x9B/g;
952
- $s =~ s/\xC3\xBC/\xC3\x9C/g;
953
- $s =~ s/\xC3\xBD/\xC3\x9D/g;
954
- $s =~ s/\xC3\xBE/\xC3\x9E/g;
955
-
956
- $s =~ s/\xC5\x91/\xC5\x90/g;
957
- $s =~ s/\xC5\xA1/\xC5\xA0/g;
958
- $s =~ s/\xC5\xB1/\xC5\xB0/g;
959
- return $s unless $s =~ /[\xC3-\xC5][\x80-\xBF]/;
960
-
961
- return $s;
962
- }
963
-
964
- sub extended_first_upper_case {
965
- local($caller, $s) = @_;
966
-
967
- if (($first_char, $rest) = ($s =~ /^([\x00-\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF])(.*)$/)) {
968
- return $caller->extended_upper_case($first_char) . $rest;
969
- } else {
970
- return $s;
971
- }
972
- }
973
-
974
- sub repair_doubly_converted_utf8_strings {
975
- local($caller, $s) = @_;
976
-
977
- if ($s =~ /\xC3[\x82-\x85]\xC2[\x80-\xBF]/) {
978
- $s =~ s/\xC3\x82\xC2([\x80-\xBF])/\xC2$1/g;
979
- $s =~ s/\xC3\x83\xC2([\x80-\xBF])/\xC3$1/g;
980
- $s =~ s/\xC3\x84\xC2([\x80-\xBF])/\xC4$1/g;
981
- $s =~ s/\xC3\x85\xC2([\x80-\xBF])/\xC5$1/g;
982
- }
983
- return $s;
984
- }
985
-
986
- sub repair_misconverted_windows_to_utf8_strings {
987
- local($caller, $s) = @_;
988
-
989
- # correcting conversions of UTF8 using Latin1-to-UTF converter
990
- if ($s =~ /\xC3\xA2\xC2\x80\xC2[\x90-\xEF]/) {
991
- my $result = "";
992
- while (($pre,$last_c,$post) = ($s =~ /^(.*?)\xC3\xA2\xC2\x80\xC2([\x90-\xEF])(.*)$/s)) {
993
- $result .= "$pre\xE2\x80$last_c";
994
- $s = $post;
995
- }
996
- $result .= $s;
997
- $s = $result;
998
- }
999
- # correcting conversions of Windows1252-to-UTF8 using Latin1-to-UTF converter
1000
- if ($s =~ /\xC2[\x80-\x9F]/) {
1001
- my $result = "";
1002
- while (($pre,$c_windows,$post) = ($s =~ /^(.*?)\xC2([\x80-\x9F])(.*)$/s)) {
1003
- $c_utf8 = $caller->windows1252_to_utf8($c_windows, 0);
1004
- $result .= ($c_utf8 eq "?") ? ($pre . "\xC2" . $c_windows) : "$pre$c_utf8";
1005
- $s = $post;
1006
- }
1007
- $result .= $s;
1008
- $s = $result;
1009
- }
1010
- if ($s =~ /\xC3/) {
1011
- $s =~ s/\xC3\xA2\xE2\x80\x9A\xC2\xAC/\xE2\x82\xAC/g; # x80 -> Euro sign
1012
- # x81 codepoint undefined in Windows 1252
1013
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC5\xA1/\xE2\x80\x9A/g; # x82 -> single low-9 quotation mark
1014
- $s =~ s/\xC3\x86\xE2\x80\x99/\xC6\x92/g; # x83 -> Latin small letter f with hook
1015
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC5\xBE/\xE2\x80\x9E/g; # x84 -> double low-9 quotation mark
1016
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA6/\xE2\x80\xA6/g; # x85 -> horizontal ellipsis
1017
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA0/\xE2\x80\xA0/g; # x86 -> dagger
1018
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA1/\xE2\x80\xA1/g; # x87 -> double dagger
1019
- $s =~ s/\xC3\x8B\xE2\x80\xA0/\xCB\x86/g; # x88 -> modifier letter circumflex accent
1020
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xB0/\xE2\x80\xB0/g; # x89 -> per mille sign
1021
- $s =~ s/\xC3\x85\xC2\xA0/\xC5\xA0/g; # x8A -> Latin capital letter S with caron
1022
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xB9/\xE2\x80\xB9/g; # x8B -> single left-pointing angle quotation mark
1023
- $s =~ s/\xC3\x85\xE2\x80\x99/\xC5\x92/g; # x8C -> Latin capital ligature OE
1024
- # x8D codepoint undefined in Windows 1252
1025
- $s =~ s/\xC3\x85\xC2\xBD/\xC5\xBD/g; # x8E -> Latin capital letter Z with caron
1026
- # x8F codepoint undefined in Windows 1252
1027
- # x90 codepoint undefined in Windows 1252
1028
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xCB\x9C/\xE2\x80\x98/g; # x91 a-circumflex+euro+small tilde -> left single quotation mark
1029
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xE2\x84\xA2/\xE2\x80\x99/g; # x92 a-circumflex+euro+trademark -> right single quotation mark
1030
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC5\x93/\xE2\x80\x9C/g; # x93 a-circumflex+euro+Latin small ligature oe -> left double quotation mark
1031
- # x94 maps through undefined intermediate code point
1032
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA2/\xE2\x80\xA2/g; # x95 a-circumflex+euro+cent sign -> bullet
1033
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xE2\x80\x9C/\xE2\x80\x93/g; # x96 a-circumflex+euro+left double quotation mark -> en dash
1034
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xE2\x80\x9D/\xE2\x80\x94/g; # x97 a-circumflex+euro+right double quotation mark -> em dash
1035
- $s =~ s/\xC3\x8B\xC5\x93/\xCB\x9C/g; # x98 Latin capital e diaeresis+Latin small ligature oe -> small tilde
1036
- $s =~ s/\xC3\xA2\xE2\x80\x9E\xC2\xA2/\xE2\x84\xA2/g; # x99 -> trade mark sign
1037
- $s =~ s/\xC3\x85\xC2\xA1/\xC5\xA1/g; # x9A -> Latin small letter s with caron
1038
- $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xBA/\xE2\x80\xBA/g; # x9B -> single right-pointing angle quotation mark
1039
- $s =~ s/\xC3\x85\xE2\x80\x9C/\xC5\x93/g; # x9C -> Latin small ligature oe
1040
- # x9D codepoint undefined in Windows 1252
1041
- $s =~ s/\xC3\x85\xC2\xBE/\xC5\xBE/g; # x9E -> Latin small letter z with caron
1042
- $s =~ s/\xC3\x85\xC2\xB8/\xC5\xB8/g; # x9F -> Latin capital letter Y with diaeresis
1043
- $s =~ s/\xC3\xAF\xC2\xBF\xC2\xBD/\xEF\xBF\xBD/g; # replacement character
1044
- }
1045
-
1046
- return $s;
1047
- }
1048
-
1049
- sub latin1_to_utf {
1050
- local($caller, $s) = @_;
1051
-
1052
- my $result = "";
1053
- while (($pre,$c,$post) = ($s =~ /^(.*?)([\x80-\xFF])(.*)$/s)) {
1054
- $result .= $pre;
1055
- if ($c =~ /^[\x80-\xBF]$/) {
1056
- $result .= "\xC2$c";
1057
- } elsif ($c =~ /^[\xC0-\xFF]$/) {
1058
- $c =~ tr/[\xC0-\xFF]/[\x80-\xBF]/;
1059
- $result .= "\xC3$c";
1060
- }
1061
- $s = $post;
1062
- }
1063
- $result .= $s;
1064
- return $result;
1065
- }
1066
-
1067
- sub character_type_is_letter_type {
1068
- local($caller, $char_type) = @_;
1069
-
1070
- return ($char_type =~ /\b((CJK|hiragana|kana|katakana)\s+character|diacritic|letter|syllable)\b/);
1071
- }
1072
-
1073
- sub character_type {
1074
- local($caller, $c) = @_;
1075
-
1076
- if ($c =~ /^[\x00-\x7F]/) {
1077
- return "XML tag" if $c =~ /^<.*>$/;
1078
- return "ASCII Latin letter" if $c =~ /^[a-z]$/i;
1079
- return "ASCII digit" if $c =~ /^[0-9]$/i;
1080
- return "ASCII whitespace" if $c =~ /^[\x09-\x0D\x20]$/;
1081
- return "ASCII control-character" if $c =~ /^[\x00-\x1F\x7F]$/;
1082
- return "ASCII currency" if $c eq "\$";
1083
- return "ASCII punctuation";
1084
- } elsif ($c =~ /^[\xC0-\xDF]/) {
1085
- return "non-UTF8 (invalid)" unless $c =~ /^[\xC0-\xDF][\x80-\xBF]$/;
1086
- return "non-shortest-UTF8 (invalid)" if $c =~ /[\xC0-\xC1]/;
1087
- return "non-ASCII control-character" if $c =~ /\xC2[\x80-\x9F]/;
1088
- return "non-ASCII whitespace" if $c =~ /\xC2\xA0/;
1089
- return "non-ASCII currency" if $c =~ /\xC2[\xA2-\xA5]/;
1090
- return "fraction" if $c =~ /\xC2[\xBC-\xBE]/; # NEW
1091
- return "superscript digit" if $c =~ /\xC2[\xB2\xB3\xB9]/;
1092
- return "non-ASCII Latin letter" if $c =~ /\xC2\xB5/; # micro sign
1093
- return "non-ASCII punctuation" if $c =~ /\xC2[\xA0-\xBF]/;
1094
- return "non-ASCII punctuation" if $c =~ /\xC3[\x97\xB7]/;
1095
- return "non-ASCII Latin letter" if $c =~ /\xC3[\x80-\xBF]/;
1096
- return "Latin ligature letter" if $c =~ /\xC4[\xB2\xB3]/;
1097
- return "Latin ligature letter" if $c =~ /\xC5[\x92\x93]/;
1098
- return "non-ASCII Latin letter" if $c =~ /[\xC4-\xC8]/;
1099
- return "non-ASCII Latin letter" if $c =~ /\xC9[\x80-\x8F]/;
1100
- return "IPA" if $c =~ /\xC9[\x90-\xBF]/;
1101
- return "IPA" if $c =~ /\xCA[\x80-\xBF]/;
1102
- return "IPA" if $c =~ /\xCB[\x80-\xBF]/;
1103
- return "combining-diacritic" if $c =~ /\xCC[\x80-\xBF]/;
1104
- return "combining-diacritic" if $c =~ /\xCD[\x80-\xAF]/;
1105
- return "Greek punctuation" if $c =~ /\xCD[\xBE]/; # Greek question mark
1106
- return "Greek punctuation" if $c =~ /\xCE[\x87]/; # Greek semicolon
1107
- return "Greek letter" if $c =~ /\xCD[\xB0-\xBF]/;
1108
- return "Greek letter" if $c =~ /\xCE/;
1109
- return "Greek letter" if $c =~ /\xCF[\x80-\xA1\xB3\xB7\xB8\xBA\xBB]/;
1110
- return "Coptic letter" if $c =~ /\xCF[\xA2-\xAF]/;
1111
- return "Cyrillic letter" if $c =~ /[\xD0-\xD3]/;
1112
- return "Cyrillic letter" if $c =~ /\xD4[\x80-\xAF]/;
1113
- return "Armenian punctuation" if $c =~ /\xD5[\x9A-\x9F]/;
1114
- return "Armenian punctuation" if $c =~ /\xD6[\x89-\x8F]/;
1115
- return "Armenian letter" if $c =~ /\xD4[\xB0-\xBF]/;
1116
- return "Armenian letter" if $c =~ /\xD5/;
1117
- return "Armenian letter" if $c =~ /\xD6[\x80-\x8F]/;
1118
- return "Hebrew accent" if $c =~ /\xD6[\x91-\xAE]/;
1119
- return "Hebrew punctuation" if $c =~ /\xD6\xBE/;
1120
- return "Hebrew punctuation" if $c =~ /\xD7[\x80\x83\x86\xB3\xB4]/;
1121
- return "Hebrew point" if $c =~ /\xD6[\xB0-\xBF]/;
1122
- return "Hebrew point" if $c =~ /\xD7[\x81\x82\x87]/;
1123
- return "Hebrew letter" if $c =~ /\xD7[\x90-\xB2]/;
1124
- return "other Hebrew" if $c =~ /\xD6[\x90-\xBF]/;
1125
- return "other Hebrew" if $c =~ /\xD7/;
1126
- return "Arabic currency" if $c =~ /\xD8\x8B/; # Afghani sign
1127
- return "Arabic punctuation" if $c =~ /\xD8[\x89-\x8D\x9B\x9E\x9F]/;
1128
- return "Arabic punctuation" if $c =~ /\xD9[\xAA-\xAD]/;
1129
- return "Arabic punctuation" if $c =~ /\xDB[\x94]/;
1130
- return "Arabic tatweel" if $c =~ /\xD9\x80/;
1131
- return "Arabic letter" if $c =~ /\xD8[\xA0-\xBF]/;
1132
- return "Arabic letter" if $c =~ /\xD9[\x81-\x9F]/;
1133
- return "Arabic letter" if $c =~ /\xD9[\xAE-\xBF]/;
1134
- return "Arabic letter" if $c =~ /\xDA[\x80-\xBF]/;
1135
- return "Arabic letter" if $c =~ /\xDB[\x80-\x95]/;
1136
- return "Arabic Indic digit" if $c =~ /\xD9[\xA0-\xA9]/;
1137
- return "Arabic Indic digit" if $c =~ /\xDB[\xB0-\xB9]/;
1138
- return "other Arabic" if $c =~ /[\xD8-\xDB]/;
1139
- return "Syriac punctuation" if $c =~ /\xDC[\x80-\x8F]/;
1140
- return "Syriac letter" if $c =~ /\xDC[\x90-\xAF]/;
1141
- return "Syriac diacritic" if $c =~ /\xDC[\xB0-\xBF]/;
1142
- return "Syriac diacritic" if $c =~ /\xDD[\x80-\x8A]/;
1143
- return "Thaana letter" if $c =~ /\xDE/;
1144
- } elsif ($c =~ /^[\xE0-\xEF]/) {
1145
- return "non-UTF8 (invalid)" unless $c =~ /^[\xE0-\xEF][\x80-\xBF]{2,2}$/;
1146
- return "non-shortest-UTF8 (invalid)" if $c =~ /\xE0[\x80-\x9F]/;
1147
- return "Arabic letter" if $c =~ /\xE0\xA2[\xA0-\xBF]/; # extended letters
1148
- return "other Arabic" if $c =~ /\xE0\xA3/; # extended characters
1149
- return "Devanagari punctuation" if $c =~ /\xE0\xA5[\xA4\xA5]/; # danda, double danda
1150
- return "Devanagari digit" if $c =~ /\xE0\xA5[\xA6-\xAF]/;
1151
- return "Devanagari letter" if $c =~ /\xE0[\xA4-\xA5]/;
1152
- return "Bengali digit" if $c =~ /\xE0\xA7[\xA6-\xAF]/;
1153
- return "Bengali currency" if $c =~ /\xE0\xA7[\xB2-\xB9]/;
1154
- return "Bengali letter" if $c =~ /\xE0[\xA6-\xA7]/;
1155
- return "Gurmukhi digit" if $c =~ /\xE0\xA9[\xA6-\xAF]/;
1156
- return "Gurmukhi letter" if $c =~ /\xE0[\xA8-\xA9]/;
1157
- return "Gujarati digit" if $c =~ /\xE0\xAB[\xA6-\xAF]/;
1158
- return "Gujarati letter" if $c =~ /\xE0[\xAA-\xAB]/;
1159
- return "Oriya digit" if $c =~ /\xE0\xAD[\xA6-\xAF]/;
1160
- return "Oriya fraction" if $c =~ /\xE0\xAD[\xB2-\xB7]/;
1161
- return "Oriya letter" if $c =~ /\xE0[\xAC-\xAD]/;
1162
- return "Tamil digit" if $c =~ /\xE0\xAF[\xA6-\xAF]/;
1163
- return "Tamil number" if $c =~ /\xE0\xAF[\xB0-\xB2]/; # number (10, 100, 1000)
1164
- return "Tamil letter" if $c =~ /\xE0[\xAE-\xAF]/;
1165
- return "Telegu digit" if $c =~ /\xE0\xB1[\xA6-\xAF]/;
1166
- return "Telegu fraction" if $c =~ /\xE0\xB1[\xB8-\xBE]/;
1167
- return "Telegu letter" if $c =~ /\xE0[\xB0-\xB1]/;
1168
- return "Kannada digit" if $c =~ /\xE0\xB3[\xA6-\xAF]/;
1169
- return "Kannada letter" if $c =~ /\xE0[\xB2-\xB3]/;
1170
- return "Malayalam digit" if $c =~ /\xE0\xB5[\x98-\x9E\xA6-\xB8]/;
1171
- return "Malayalam punctuation" if $c =~ /\xE0\xB5\xB9/; # date mark
1172
- return "Malayalam letter" if $c =~ /\xE0[\xB4-\xB5]/;
1173
- return "Sinhala digit" if $c =~ /\xE0\xB7[\xA6-\xAF]/;
1174
- return "Sinhala punctuation" if $c =~ /\xE0\xB7\xB4/;
1175
- return "Sinhala letter" if $c =~ /\xE0[\xB6-\xB7]/;
1176
- return "Thai currency" if $c =~ /\xE0\xB8\xBF/;
1177
- return "Thai digit" if $c =~ /\xE0\xB9[\x90-\x99]/;
1178
- return "Thai character" if $c =~ /\xE0[\xB8-\xB9]/;
1179
- return "Lao punctuation" if $c =~ /\xE0\xBA\xAF/; # Lao ellipsis
1180
- return "Lao digit" if $c =~ /\xE0\xBB[\x90-\x99]/;
1181
- return "Lao character" if $c =~ /\xE0[\xBA-\xBB]/;
1182
- return "Tibetan punctuation" if $c =~ /\xE0\xBC[\x81-\x94]/;
1183
- return "Tibetan sign" if $c =~ /\xE0\xBC[\x95-\x9F]/;
1184
- return "Tibetan digit" if $c =~ /\xE0\xBC[\xA0-\xB3]/;
1185
- return "Tibetan punctuation" if $c =~ /\xE0\xBC[\xB4-\xBD]/;
1186
- return "Tibetan letter" if $c =~ /\xE0[\xBC-\xBF]/;
1187
- return "Myanmar digit" if $c =~ /\xE1\x81[\x80-\x89]/;
1188
- return "Myanmar digit" if $c =~ /\xE1\x82[\x90-\x99]/; # Myanmar Shan digits
1189
- return "Myanmar punctuation" if $c =~ /\xE1\x81[\x8A-\x8B]/;
1190
- return "Myanmar letter" if $c =~ /\xE1[\x80-\x81]/;
1191
- return "Myanmar letter" if $c =~ /\xE1\x82[\x80-\x9F]/;
1192
- return "Georgian punctuation" if $c =~ /\xE1\x83\xBB/;
1193
- return "Georgian letter" if $c =~ /\xE1\x82[\xA0-\xBF]/;
1194
- return "Georgian letter" if $c =~ /\xE1\x83/;
1195
- return "Georgian letter" if $c =~ /\xE1\xB2[\x90-\xBF]/; # Georgian Mtavruli capital letters
1196
- return "Georgian letter" if $c =~ /\xE2\xB4[\x80-\xAF]/; # Georgian small letters (Khutsuri)
1197
- return "Korean Hangul letter" if $c =~ /\xE1[\x84-\x87]/;
1198
- return "Ethiopic punctuation" if $c =~ /\xE1\x8D[\xA0-\xA8]/;
1199
- return "Ethiopic digit" if $c =~ /\xE1\x8D[\xA9-\xB1]/;
1200
- return "Ethiopic number" if $c =~ /\xE1\x8D[\xB2-\xBC]/;
1201
- return "Ethiopic syllable" if $c =~ /\xE1[\x88-\x8D]/;
1202
- return "Cherokee letter" if $c =~ /\xE1\x8E[\xA0-\xBF]/;
1203
- return "Cherokee letter" if $c =~ /\xE1\x8F/;
1204
- return "Canadian punctuation" if $c =~ /\xE1\x90\x80/; # Canadian Syllabics hyphen
1205
- return "Canadian punctuation" if $c =~ /\xE1\x99\xAE/; # Canadian Syllabics full stop
1206
- return "Canadian syllable" if $c =~ /\xE1[\x90-\x99]/;
1207
- return "Canadian syllable" if $c =~ /\xE1\xA2[\xB0-\xBF]/;
1208
- return "Canadian syllable" if $c =~ /\xE1\xA3/;
1209
- return "Ogham whitespace" if $c =~ /\xE1\x9A\x80/;
1210
- return "Ogham letter" if $c =~ /\xE1\x9A[\x81-\x9A]/;
1211
- return "Ogham punctuation" if $c =~ /\xE1\x9A[\x9B-\x9C]/;
1212
- return "Runic punctuation" if $c =~ /\xE1\x9B[\xAB-\xAD]/;
1213
- return "Runic letter" if $c =~ /\xE1\x9A[\xA0-\xBF]/;
1214
- return "Runic letter" if $c =~ /\xE1\x9B/;
1215
- return "Khmer currency" if $c =~ /\xE1\x9F\x9B/;
1216
- return "Khmer digit" if $c =~ /\xE1\x9F[\xA0-\xA9]/;
1217
- return "Khmer letter" if $c =~ /\xE1[\x9E-\x9F]/;
1218
- return "Mongolian punctuation" if $c =~ /\xE1\xA0[\x80-\x8A]/;
1219
- return "Mongolian digit" if $c =~ /\xE1\xA0[\x90-\x99]/;
1220
- return "Mongolian letter" if $c =~ /\xE1[\xA0-\xA1]/;
1221
- return "Mongolian letter" if $c =~ /\xE1\xA2[\x80-\xAF]/;
1222
- return "Buginese letter" if $c =~ /\xE1\xA8[\x80-\x9B]/;
1223
- return "Buginese punctuation" if $c =~ /\xE1\xA8[\x9E-\x9F]/;
1224
- return "Balinese letter" if $c =~ /\xE1\xAC/;
1225
- return "Balinese letter" if $c =~ /\xE1\xAD[\x80-\x8F]/;
1226
- return "Balinese digit" if $c =~ /\xE1\xAD[\x90-\x99]/;
1227
- return "Balinese puncutation" if $c =~ /\xE1\xAD[\x9A-\xA0]/;
1228
- return "Balinese symbol" if $c =~ /\xE1\xAD[\xA1-\xBF]/;
1229
- return "Sundanese digit" if $c =~ /\xE1\xAE[\xB0-\xB9]/;
1230
- return "Sundanese letter" if $c =~ /\xE1\xAE/;
1231
- return "Cyrillic letter" if $c =~ /\xE1\xB2[\x80-\x8F]/;
1232
- return "Sundanese punctuation" if $c =~ /\xE1\xB3[\x80-\x8F]/;
1233
- return "IPA" if $c =~ /\xE1[\xB4-\xB6]/;
1234
- return "non-ASCII Latin letter" if $c =~ /\xE1[\xB8-\xBB]/;
1235
- return "Greek letter" if $c =~ /\xE1[\xBC-\xBF]/;
1236
- return "non-ASCII whitespace" if $c =~ /\xE2\x80[\x80-\x8A\xAF]/;
1237
- return "zero-width space" if $c =~ /\xE2\x80\x8B/;
1238
- return "zero-width non-space" if $c =~ /\xE2\x80\x8C/;
1239
- return "zero-width joiner" if $c =~ /\xE2\x80\x8D/;
1240
- return "directional mark" if $c =~ /\xE2\x80[\x8E-\x8F\xAA-\xAE]/;
1241
- return "non-ASCII punctuation" if $c =~ /\xE2\x80[\x90-\xBF]/;
1242
- return "non-ASCII punctuation" if $c =~ /\xE2\x81[\x80-\x9E]/;
1243
- return "superscript letter" if $c =~ /\xE2\x81[\xB1\xBF]/;
1244
- return "superscript digit" if $c =~ /\xE2\x81[\xB0-\xB9]/;
1245
- return "superscript punctuation" if $c =~ /\xE2\x81[\xBA-\xBE]/;
1246
- return "subscript digit" if $c =~ /\xE2\x82[\x80-\x89]/;
1247
- return "subscript punctuation" if $c =~ /\xE2\x82[\x8A-\x8E]/;
1248
- return "non-ASCII currency" if $c =~ /\xE2\x82[\xA0-\xBF]/;
1249
- return "letterlike symbol" if $c =~ /\xE2\x84/;
1250
- return "letterlike symbol" if $c =~ /\xE2\x85[\x80-\x8F]/;
1251
- return "fraction" if $c =~ /\xE2\x85[\x90-\x9E]/; # NEW
1252
- return "Roman number" if $c =~ /\xE2\x85[\xA0-\xBF]/; # NEW
1253
- return "arrow symbol" if $c =~ /\xE2\x86[\x90-\xBF]/;
1254
- return "arrow symbol" if $c =~ /\xE2\x87/;
1255
- return "mathematical operator" if $c =~ /\xE2[\x88-\x8B]/;
1256
- return "technical symbol" if $c =~ /\xE2[\x8C-\x8F]/;
1257
- return "enclosed alphanumeric" if $c =~ /\xE2\x91[\xA0-\xBF]/;
1258
- return "enclosed alphanumeric" if $c =~ /\xE2[\x92-\x93]/;
1259
- return "box drawing" if $c =~ /\xE2[\x94-\x95]/;
1260
- return "geometric shape" if $c =~ /\xE2\x96[\xA0-\xBF]/;
1261
- return "geometric shape" if $c =~ /\xE2\x97/;
1262
- return "pictograph" if $c =~ /\xE2[\x98-\x9E]/;
1263
- return "arrow symbol" if $c =~ /\xE2\xAC[\x80-\x91\xB0-\xBF]/;
1264
- return "geometric shape" if $c =~ /\xE2\xAC[\x92-\xAF]/;
1265
- return "arrow symbol" if $c =~ /\xE2\xAD[\x80-\x8F\x9A-\xBF]/;
1266
- return "geometric shape" if $c =~ /\xE2\xAD[\x90-\x99]/;
1267
- return "arrow symbol" if $c =~ /\xE2\xAE[\x80-\xB9]/;
1268
- return "geometric shape" if $c =~ /\xE2\xAE[\xBA-\xBF]/;
1269
- return "geometric shape" if $c =~ /\xE2\xAF[\x80-\x88\x8A-\x8F]/;
1270
- return "symbol" if $c =~ /\xE2[\xAC-\xAF]/;
1271
- return "Coptic fraction" if $c =~ /\xE2\xB3\xBD/;
1272
- return "Coptic punctuation" if $c =~ /\xE2\xB3[\xB9-\xBF]/;
1273
- return "Coptic letter" if $c =~ /\xE2[\xB2-\xB3]/;
1274
- return "Georgian letter" if $c =~ /\xE2\xB4[\x80-\xAF]/;
1275
- return "Tifinagh punctuation" if $c =~ /\xE2\xB5\xB0/;
1276
- return "Tifinagh letter" if $c =~ /\xE2\xB4[\xB0-\xBF]/;
1277
- return "Tifinagh letter" if $c =~ /\xE2\xB5/;
1278
- return "Ethiopic syllable" if $c =~ /\xE2\xB6/;
1279
- return "Ethiopic syllable" if $c =~ /\xE2\xB7[\x80-\x9F]/;
1280
- return "non-ASCII punctuation" if $c =~ /\xE3\x80[\x80-\x91\x94-\x9F\xB0\xBB-\xBD]/;
1281
- return "symbol" if $c =~ /\xE3\x80[\x91\x92\xA0\xB6\xB7]/;
1282
- return "Japanese hiragana character" if $c =~ /\xE3\x81/;
1283
- return "Japanese hiragana character" if $c =~ /\xE3\x82[\x80-\x9F]/;
1284
- return "Japanese katakana character" if $c =~ /\xE3\x82[\xA0-\xBF]/;
1285
- return "Japanese katakana character" if $c =~ /\xE3\x83/;
1286
- return "Bopomofo letter" if $c =~ /\xE3\x84[\x80-\xAF]/;
1287
- return "Korean Hangul letter" if $c =~ /\xE3\x84[\xB0-\xBF]/;
1288
- return "Korean Hangul letter" if $c =~ /\xE3\x85/;
1289
- return "Korean Hangul letter" if $c =~ /\xE3\x86[\x80-\x8F]/;
1290
- return "Bopomofo letter" if $c =~ /\xE3\x86[\xA0-\xBF]/;
1291
- return "CJK stroke" if $c =~ /\xE3\x87[\x80-\xAF]/;
1292
- return "Japanese kana character" if $c =~ /\xE3\x87[\xB0-\xBF]/;
1293
- return "CJK symbol" if $c =~ /\xE3[\x88-\x8B]/;
1294
- return "CJK square Latin abbreviation" if $c =~ /\xE3\x8D[\xB1-\xBA]/;
1295
- return "CJK square Latin abbreviation" if $c =~ /\xE3\x8E/;
1296
- return "CJK square Latin abbreviation" if $c =~ /\xE3\x8F[\x80-\x9F\xBF]/;
1297
- return "CJK character" if $c =~ /\xE4[\xB8-\xBF]/;
1298
- return "CJK character" if $c =~ /[\xE5-\xE9]/;
1299
- return "Yi syllable" if $c =~ /\xEA[\x80-\x92]/;
1300
- return "Lisu letter" if $c =~ /\xEA\x93[\x90-\xBD]/;
1301
- return "Lisu punctuation" if $c =~ /\xEA\x93[\xBE-\xBF]/;
1302
- return "Cyrillic letter" if $c =~ /\xEA\x99/;
1303
- return "Cyrillic letter" if $c =~ /\xEA\x9A[\x80-\x9F]/;
1304
- return "modifier tone" if $c =~ /\xEA\x9C[\x80-\xA1]/;
1305
- return "Javanese punctuation" if $c =~ /\xEA\xA7[\x81-\x8D\x9E-\x9F]/;
1306
- return "Javanese digit" if $c =~ /\xEA\xA7[\x90-\x99]/;
1307
- return "Javanese letter" if $c =~ /\xEA\xA6/;
1308
- return "Javanese letter" if $c =~ /\xEA\xA7[\x80-\x9F]/;
1309
- return "Ethiopic syllable" if $c =~ /\xEA\xAC[\x80-\xAF]/;
1310
- return "Cherokee letter" if $c =~ /\xEA\xAD[\xB0-\xBF]/;
1311
- return "Cherokee letter" if $c =~ /\xEA\xAE/;
1312
- return "Meetai Mayek digit" if $c =~ /\xEA\xAF[\xB0-\xB9]/;
1313
- return "Meetai Mayek letter" if $c =~ /\xEA\xAF/;
1314
- return "Korean Hangul syllable" if $c =~ /\xEA[\xB0-\xBF]/;
1315
- return "Korean Hangul syllable" if $c =~ /[\xEB-\xEC]/;
1316
- return "Korean Hangul syllable" if $c =~ /\xED[\x80-\x9E]/;
1317
- return "Klingon letter" if $c =~ /\xEF\xA3[\x90-\xA9]/;
1318
- return "Klingon digit" if $c =~ /\xEF\xA3[\xB0-\xB9]/;
1319
- return "Klingon punctuation" if $c =~ /\xEF\xA3[\xBD-\xBE]/;
1320
- return "Klingon symbol" if $c =~ /\xEF\xA3\xBF/;
1321
- return "private use character" if $c =~ /\xEE/;
1322
- return "Latin typographic ligature" if $c =~ /\xEF\xAC[\x80-\x86]/;
1323
- return "Hebrew presentation letter" if $c =~ /\xEF\xAC[\x9D-\xBF]/;
1324
- return "Hebrew presentation letter" if $c =~ /\xEF\xAD[\x80-\x8F]/;
1325
- return "Arabic presentation letter" if $c =~ /\xEF\xAD[\x90-\xBF]/;
1326
- return "Arabic presentation letter" if $c =~ /\xEF[\xAE-\xB7]/;
1327
- return "non-ASCII punctuation" if $c =~ /\xEF\xB8[\x90-\x99]/;
1328
- return "non-ASCII punctuation" if $c =~ /\xEF\xB8[\xB0-\xBF]/;
1329
- return "non-ASCII punctuation" if $c =~ /\xEF\xB9[\x80-\xAB]/;
1330
- return "Arabic presentation letter" if $c =~ /\xEF\xB9[\xB0-\xBF]/;
1331
- return "Arabic presentation letter" if $c =~ /\xEF\xBA/;
1332
- return "Arabic presentation letter" if $c =~ /\xEF\xBB[\x80-\xBC]/;
1333
- return "byte-order mark/zero-width no-break space" if $c eq "\xEF\xBB\xBF";
1334
- return "fullwidth currency" if $c =~ /\xEF\xBC\x84/;
1335
- return "fullwidth digit" if $c =~ /\xEF\xBC[\x90-\x99]/;
1336
- return "fullwidth Latin letter" if $c =~ /\xEF\xBC[\xA1-\xBA]/;
1337
- return "fullwidth Latin letter" if $c =~ /\xEF\xBD[\x81-\x9A]/;
1338
- return "fullwidth punctuation" if $c =~ /\xEF\xBC/;
1339
- return "fullwidth punctuation" if $c =~ /\xEF\xBD[\x9B-\xA4]/;
1340
- return "halfwidth Japanese punctuation" if $c =~ /\xEF\xBD[\xA1-\xA4]/;
1341
- return "halfwidth Japanese katakana character" if $c =~ /\xEF\xBD[\xA5-\xBF]/;
1342
- return "halfwidth Japanese katakana character" if $c =~ /\xEF\xBE[\x80-\x9F]/;
1343
- return "fullwidth currency" if $c =~ /\xEF\xBF[\xA0-\xA6]/;
1344
- return "replacement character" if $c eq "\xEF\xBF\xBD";
1345
- } elsif ($c =~ /[\xF0-\xF7]/) {
1346
- return "non-UTF8 (invalid)" unless $c =~ /[\xF0-\xF7][\x80-\xBF]{3,3}$/;
1347
- return "non-shortest-UTF8 (invalid)" if $c =~ /\xF0[\x80-\x8F]/;
1348
- return "Linear B syllable" if $c =~ /\xF0\x90\x80/;
1349
- return "Linear B syllable" if $c =~ /\xF0\x90\x81[\x80-\x8F]/;
1350
- return "Linear B symbol" if $c =~ /\xF0\x90\x81[\x90-\x9F]/;
1351
- return "Linear B ideogram" if $c =~ /\xF0\x90[\x82-\x83]/;
1352
- return "Gothic letter" if $c =~ /\xF0\x90\x8C[\xB0-\xBF]/;
1353
- return "Gothic letter" if $c =~ /\xF0\x90\x8D[\x80-\x8F]/;
1354
- return "Phoenician letter" if $c =~ /\xF0\x90\xA4[\x80-\x95]/;
1355
- return "Phoenician number" if $c =~ /\xF0\x90\xA4[\x96-\x9B]/;
1356
- return "Phoenician punctuation" if $c =~ /\xF0\x90\xA4\x9F/; # word separator
1357
- return "Old Hungarian number" if $c =~ /\xF0\x90\xB3[\xBA-\xBF]/;
1358
- return "Old Hungarian letter" if $c =~ /\xF0\x90[\xB2-\xB3]/;
1359
- return "Cuneiform digit" if $c =~ /\xF0\x92\x90/; # numberic sign
1360
- return "Cuneiform digit" if $c =~ /\xF0\x92\x91[\x80-\xAF]/; # numberic sign
1361
- return "Cuneiform punctuation" if $c =~ /\xF0\x92\x91[\xB0-\xBF]/;
1362
- return "Cuneiform sign" if $c =~ /\xF0\x92[\x80-\x95]/;
1363
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x81\xA8/;
1364
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x82[\xAD-\xB6]/;
1365
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x86[\x90\xBC-\xBF]/;
1366
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x87[\x80-\x84]/;
1367
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x8D[\xA2-\xAB]/;
1368
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x8E[\x86-\x92]/;
1369
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x8F[\xBA-\xBF]/;
1370
- return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x90[\x80-\x83]/;
1371
- return "Egyptian hieroglyph" if $c =~ /\xF0\x93[\x80-\x90]/;
1372
- return "enclosed alphanumeric" if $c =~ /\xF0\x9F[\x84-\x87]/;
1373
- return "Mahjong symbol" if $c =~ /\xF0\x9F\x80[\x80-\xAF]/;
1374
- return "Domino symbol" if $c =~ /\xF0\x9F\x80[\xB0-\xBF]/;
1375
- return "Domino symbol" if $c =~ /\xF0\x9F\x81/;
1376
- return "Domino symbol" if $c =~ /\xF0\x9F\x82[\x80-\x9F]/;
1377
- return "Playing card symbol" if $c =~ /\xF0\x9F\x82[\xA0-\xBF]/;
1378
- return "Playing card symbol" if $c =~ /\xF0\x9F\x83/;
1379
- return "CJK symbol" if $c =~ /\xF0\x9F[\x88-\x8B]/;
1380
- return "pictograph" if $c =~ /\xF0\x9F[\x8C-\x9B]/;
1381
- return "geometric shape" if $c =~ /\xF0\x9F[\x9E-\x9F]/;
1382
- return "non-ASCII punctuation" if $c =~ /\xF0\x9F[\xA0-\xA3]/;
1383
- return "pictograph" if $c =~ /\xF0\x9F[\xA4-\xAB]/;
1384
- return "CJK character" if $c =~ /\xF0[\xA0-\xAF]/;
1385
- return "tag" if $c =~ /\xF3\xA0[\x80-\x81]/;
1386
- return "variation selector" if $c =~ /\xF3\xA0[\x84-\x87]/;
1387
- return "private use character" if $c =~ /\xF3[\xB0-\xBF]/;
1388
- return "private use character" if $c =~ /\xF4[\x80-\x8F]/;
1389
- # ...
1390
- } elsif ($c =~ /[\xF8-\xFB]/) {
1391
- return "non-UTF8 (invalid)" unless $c =~ /[\xF8-\xFB][\x80-\xBF]{4,4}$/;
1392
- } elsif ($c =~ /[\xFC-\xFD]/) {
1393
- return "non-UTF8 (invalid)" unless $c =~ /[\xFC-\xFD][\x80-\xBF]{5,5}$/;
1394
- } elsif ($c =~ /\xFE/) {
1395
- return "non-UTF8 (invalid)" unless $c =~ /\xFE][\x80-\xBF]{6,6}$/;
1396
- } else {
1397
- return "non-UTF8 (invalid)";
1398
- }
1399
- return "other character";
1400
- }
1401
-
1402
- 1;
1403
-
1404
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/js/chunk-vendors.cd7b5e68.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/activations.py DELETED
@@ -1,12 +0,0 @@
1
- from torch import nn
2
-
3
-
4
- def get_activation(act_fn):
5
- if act_fn in ["swish", "silu"]:
6
- return nn.SiLU()
7
- elif act_fn == "mish":
8
- return nn.Mish()
9
- elif act_fn == "gelu":
10
- return nn.GELU()
11
- else:
12
- raise ValueError(f"Unsupported activation function: {act_fn}")
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/attention_processor.py DELETED
@@ -1,1680 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from typing import Callable, Optional, Union
15
-
16
- import torch
17
- import torch.nn.functional as F
18
- from torch import nn
19
-
20
- from ..utils import deprecate, logging, maybe_allow_in_graph
21
- from ..utils.import_utils import is_xformers_available
22
- from .lora import LoRALinearLayer
23
-
24
-
25
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
26
-
27
-
28
- if is_xformers_available():
29
- import xformers
30
- import xformers.ops
31
- else:
32
- xformers = None
33
-
34
-
35
- @maybe_allow_in_graph
36
- class Attention(nn.Module):
37
- r"""
38
- A cross attention layer.
39
-
40
- Parameters:
41
- query_dim (`int`): The number of channels in the query.
42
- cross_attention_dim (`int`, *optional*):
43
- The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
44
- heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.
45
- dim_head (`int`, *optional*, defaults to 64): The number of channels in each head.
46
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
47
- bias (`bool`, *optional*, defaults to False):
48
- Set to `True` for the query, key, and value linear layers to contain a bias parameter.
49
- """
50
-
51
- def __init__(
52
- self,
53
- query_dim: int,
54
- cross_attention_dim: Optional[int] = None,
55
- heads: int = 8,
56
- dim_head: int = 64,
57
- dropout: float = 0.0,
58
- bias=False,
59
- upcast_attention: bool = False,
60
- upcast_softmax: bool = False,
61
- cross_attention_norm: Optional[str] = None,
62
- cross_attention_norm_num_groups: int = 32,
63
- added_kv_proj_dim: Optional[int] = None,
64
- norm_num_groups: Optional[int] = None,
65
- spatial_norm_dim: Optional[int] = None,
66
- out_bias: bool = True,
67
- scale_qk: bool = True,
68
- only_cross_attention: bool = False,
69
- eps: float = 1e-5,
70
- rescale_output_factor: float = 1.0,
71
- residual_connection: bool = False,
72
- _from_deprecated_attn_block=False,
73
- processor: Optional["AttnProcessor"] = None,
74
- ):
75
- super().__init__()
76
- inner_dim = dim_head * heads
77
- cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
78
- self.upcast_attention = upcast_attention
79
- self.upcast_softmax = upcast_softmax
80
- self.rescale_output_factor = rescale_output_factor
81
- self.residual_connection = residual_connection
82
- self.dropout = dropout
83
-
84
- # we make use of this private variable to know whether this class is loaded
85
- # with an deprecated state dict so that we can convert it on the fly
86
- self._from_deprecated_attn_block = _from_deprecated_attn_block
87
-
88
- self.scale_qk = scale_qk
89
- self.scale = dim_head**-0.5 if self.scale_qk else 1.0
90
-
91
- self.heads = heads
92
- # for slice_size > 0 the attention score computation
93
- # is split across the batch axis to save memory
94
- # You can set slice_size with `set_attention_slice`
95
- self.sliceable_head_dim = heads
96
-
97
- self.added_kv_proj_dim = added_kv_proj_dim
98
- self.only_cross_attention = only_cross_attention
99
-
100
- if self.added_kv_proj_dim is None and self.only_cross_attention:
101
- raise ValueError(
102
- "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`."
103
- )
104
-
105
- if norm_num_groups is not None:
106
- self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True)
107
- else:
108
- self.group_norm = None
109
-
110
- if spatial_norm_dim is not None:
111
- self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim)
112
- else:
113
- self.spatial_norm = None
114
-
115
- if cross_attention_norm is None:
116
- self.norm_cross = None
117
- elif cross_attention_norm == "layer_norm":
118
- self.norm_cross = nn.LayerNorm(cross_attention_dim)
119
- elif cross_attention_norm == "group_norm":
120
- if self.added_kv_proj_dim is not None:
121
- # The given `encoder_hidden_states` are initially of shape
122
- # (batch_size, seq_len, added_kv_proj_dim) before being projected
123
- # to (batch_size, seq_len, cross_attention_dim). The norm is applied
124
- # before the projection, so we need to use `added_kv_proj_dim` as
125
- # the number of channels for the group norm.
126
- norm_cross_num_channels = added_kv_proj_dim
127
- else:
128
- norm_cross_num_channels = cross_attention_dim
129
-
130
- self.norm_cross = nn.GroupNorm(
131
- num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True
132
- )
133
- else:
134
- raise ValueError(
135
- f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'"
136
- )
137
-
138
- self.to_q = nn.Linear(query_dim, inner_dim, bias=bias)
139
-
140
- if not self.only_cross_attention:
141
- # only relevant for the `AddedKVProcessor` classes
142
- self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
143
- self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
144
- else:
145
- self.to_k = None
146
- self.to_v = None
147
-
148
- if self.added_kv_proj_dim is not None:
149
- self.add_k_proj = nn.Linear(added_kv_proj_dim, inner_dim)
150
- self.add_v_proj = nn.Linear(added_kv_proj_dim, inner_dim)
151
-
152
- self.to_out = nn.ModuleList([])
153
- self.to_out.append(nn.Linear(inner_dim, query_dim, bias=out_bias))
154
- self.to_out.append(nn.Dropout(dropout))
155
-
156
- # set attention processor
157
- # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
158
- # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
159
- # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
160
- if processor is None:
161
- processor = (
162
- AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor()
163
- )
164
- self.set_processor(processor)
165
-
166
- def set_use_memory_efficient_attention_xformers(
167
- self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None
168
- ):
169
- is_lora = hasattr(self, "processor") and isinstance(
170
- self.processor,
171
- LORA_ATTENTION_PROCESSORS,
172
- )
173
- is_custom_diffusion = hasattr(self, "processor") and isinstance(
174
- self.processor, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor)
175
- )
176
- is_added_kv_processor = hasattr(self, "processor") and isinstance(
177
- self.processor,
178
- (
179
- AttnAddedKVProcessor,
180
- AttnAddedKVProcessor2_0,
181
- SlicedAttnAddedKVProcessor,
182
- XFormersAttnAddedKVProcessor,
183
- LoRAAttnAddedKVProcessor,
184
- ),
185
- )
186
-
187
- if use_memory_efficient_attention_xformers:
188
- if is_added_kv_processor and (is_lora or is_custom_diffusion):
189
- raise NotImplementedError(
190
- f"Memory efficient attention is currently not supported for LoRA or custom diffuson for attention processor type {self.processor}"
191
- )
192
- if not is_xformers_available():
193
- raise ModuleNotFoundError(
194
- (
195
- "Refer to https://github.com/facebookresearch/xformers for more information on how to install"
196
- " xformers"
197
- ),
198
- name="xformers",
199
- )
200
- elif not torch.cuda.is_available():
201
- raise ValueError(
202
- "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is"
203
- " only available for GPU "
204
- )
205
- else:
206
- try:
207
- # Make sure we can run the memory efficient attention
208
- _ = xformers.ops.memory_efficient_attention(
209
- torch.randn((1, 2, 40), device="cuda"),
210
- torch.randn((1, 2, 40), device="cuda"),
211
- torch.randn((1, 2, 40), device="cuda"),
212
- )
213
- except Exception as e:
214
- raise e
215
-
216
- if is_lora:
217
- # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers
218
- # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0?
219
- processor = LoRAXFormersAttnProcessor(
220
- hidden_size=self.processor.hidden_size,
221
- cross_attention_dim=self.processor.cross_attention_dim,
222
- rank=self.processor.rank,
223
- attention_op=attention_op,
224
- )
225
- processor.load_state_dict(self.processor.state_dict())
226
- processor.to(self.processor.to_q_lora.up.weight.device)
227
- elif is_custom_diffusion:
228
- processor = CustomDiffusionXFormersAttnProcessor(
229
- train_kv=self.processor.train_kv,
230
- train_q_out=self.processor.train_q_out,
231
- hidden_size=self.processor.hidden_size,
232
- cross_attention_dim=self.processor.cross_attention_dim,
233
- attention_op=attention_op,
234
- )
235
- processor.load_state_dict(self.processor.state_dict())
236
- if hasattr(self.processor, "to_k_custom_diffusion"):
237
- processor.to(self.processor.to_k_custom_diffusion.weight.device)
238
- elif is_added_kv_processor:
239
- # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP
240
- # which uses this type of cross attention ONLY because the attention mask of format
241
- # [0, ..., -10.000, ..., 0, ...,] is not supported
242
- # throw warning
243
- logger.info(
244
- "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation."
245
- )
246
- processor = XFormersAttnAddedKVProcessor(attention_op=attention_op)
247
- else:
248
- processor = XFormersAttnProcessor(attention_op=attention_op)
249
- else:
250
- if is_lora:
251
- attn_processor_class = (
252
- LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
253
- )
254
- processor = attn_processor_class(
255
- hidden_size=self.processor.hidden_size,
256
- cross_attention_dim=self.processor.cross_attention_dim,
257
- rank=self.processor.rank,
258
- )
259
- processor.load_state_dict(self.processor.state_dict())
260
- processor.to(self.processor.to_q_lora.up.weight.device)
261
- elif is_custom_diffusion:
262
- processor = CustomDiffusionAttnProcessor(
263
- train_kv=self.processor.train_kv,
264
- train_q_out=self.processor.train_q_out,
265
- hidden_size=self.processor.hidden_size,
266
- cross_attention_dim=self.processor.cross_attention_dim,
267
- )
268
- processor.load_state_dict(self.processor.state_dict())
269
- if hasattr(self.processor, "to_k_custom_diffusion"):
270
- processor.to(self.processor.to_k_custom_diffusion.weight.device)
271
- else:
272
- # set attention processor
273
- # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
274
- # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
275
- # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
276
- processor = (
277
- AttnProcessor2_0()
278
- if hasattr(F, "scaled_dot_product_attention") and self.scale_qk
279
- else AttnProcessor()
280
- )
281
-
282
- self.set_processor(processor)
283
-
284
- def set_attention_slice(self, slice_size):
285
- if slice_size is not None and slice_size > self.sliceable_head_dim:
286
- raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.")
287
-
288
- if slice_size is not None and self.added_kv_proj_dim is not None:
289
- processor = SlicedAttnAddedKVProcessor(slice_size)
290
- elif slice_size is not None:
291
- processor = SlicedAttnProcessor(slice_size)
292
- elif self.added_kv_proj_dim is not None:
293
- processor = AttnAddedKVProcessor()
294
- else:
295
- # set attention processor
296
- # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
297
- # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
298
- # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
299
- processor = (
300
- AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor()
301
- )
302
-
303
- self.set_processor(processor)
304
-
305
- def set_processor(self, processor: "AttnProcessor"):
306
- # if current processor is in `self._modules` and if passed `processor` is not, we need to
307
- # pop `processor` from `self._modules`
308
- if (
309
- hasattr(self, "processor")
310
- and isinstance(self.processor, torch.nn.Module)
311
- and not isinstance(processor, torch.nn.Module)
312
- ):
313
- logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}")
314
- self._modules.pop("processor")
315
-
316
- self.processor = processor
317
-
318
- def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, **cross_attention_kwargs):
319
- # The `Attention` class can call different attention processors / attention functions
320
- # here we simply pass along all tensors to the selected processor class
321
- # For standard processors that are defined here, `**cross_attention_kwargs` is empty
322
- return self.processor(
323
- self,
324
- hidden_states,
325
- encoder_hidden_states=encoder_hidden_states,
326
- attention_mask=attention_mask,
327
- **cross_attention_kwargs,
328
- )
329
-
330
- def batch_to_head_dim(self, tensor):
331
- head_size = self.heads
332
- batch_size, seq_len, dim = tensor.shape
333
- tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
334
- tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
335
- return tensor
336
-
337
- def head_to_batch_dim(self, tensor, out_dim=3):
338
- head_size = self.heads
339
- batch_size, seq_len, dim = tensor.shape
340
- tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
341
- tensor = tensor.permute(0, 2, 1, 3)
342
-
343
- if out_dim == 3:
344
- tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
345
-
346
- return tensor
347
-
348
- def get_attention_scores(self, query, key, attention_mask=None):
349
- dtype = query.dtype
350
- if self.upcast_attention:
351
- query = query.float()
352
- key = key.float()
353
-
354
- if attention_mask is None:
355
- baddbmm_input = torch.empty(
356
- query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device
357
- )
358
- beta = 0
359
- else:
360
- baddbmm_input = attention_mask
361
- beta = 1
362
-
363
- attention_scores = torch.baddbmm(
364
- baddbmm_input,
365
- query,
366
- key.transpose(-1, -2),
367
- beta=beta,
368
- alpha=self.scale,
369
- )
370
- del baddbmm_input
371
-
372
- if self.upcast_softmax:
373
- attention_scores = attention_scores.float()
374
-
375
- attention_probs = attention_scores.softmax(dim=-1)
376
- del attention_scores
377
-
378
- attention_probs = attention_probs.to(dtype)
379
-
380
- return attention_probs
381
-
382
- def prepare_attention_mask(self, attention_mask, target_length, batch_size=None, out_dim=3):
383
- if batch_size is None:
384
- deprecate(
385
- "batch_size=None",
386
- "0.0.15",
387
- (
388
- "Not passing the `batch_size` parameter to `prepare_attention_mask` can lead to incorrect"
389
- " attention mask preparation and is deprecated behavior. Please make sure to pass `batch_size` to"
390
- " `prepare_attention_mask` when preparing the attention_mask."
391
- ),
392
- )
393
- batch_size = 1
394
-
395
- head_size = self.heads
396
- if attention_mask is None:
397
- return attention_mask
398
-
399
- current_length: int = attention_mask.shape[-1]
400
- if current_length != target_length:
401
- if attention_mask.device.type == "mps":
402
- # HACK: MPS: Does not support padding by greater than dimension of input tensor.
403
- # Instead, we can manually construct the padding tensor.
404
- padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)
405
- padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)
406
- attention_mask = torch.cat([attention_mask, padding], dim=2)
407
- else:
408
- # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:
409
- # we want to instead pad by (0, remaining_length), where remaining_length is:
410
- # remaining_length: int = target_length - current_length
411
- # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding
412
- attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
413
-
414
- if out_dim == 3:
415
- if attention_mask.shape[0] < batch_size * head_size:
416
- attention_mask = attention_mask.repeat_interleave(head_size, dim=0)
417
- elif out_dim == 4:
418
- attention_mask = attention_mask.unsqueeze(1)
419
- attention_mask = attention_mask.repeat_interleave(head_size, dim=1)
420
-
421
- return attention_mask
422
-
423
- def norm_encoder_hidden_states(self, encoder_hidden_states):
424
- assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states"
425
-
426
- if isinstance(self.norm_cross, nn.LayerNorm):
427
- encoder_hidden_states = self.norm_cross(encoder_hidden_states)
428
- elif isinstance(self.norm_cross, nn.GroupNorm):
429
- # Group norm norms along the channels dimension and expects
430
- # input to be in the shape of (N, C, *). In this case, we want
431
- # to norm along the hidden dimension, so we need to move
432
- # (batch_size, sequence_length, hidden_size) ->
433
- # (batch_size, hidden_size, sequence_length)
434
- encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
435
- encoder_hidden_states = self.norm_cross(encoder_hidden_states)
436
- encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
437
- else:
438
- assert False
439
-
440
- return encoder_hidden_states
441
-
442
-
443
- class AttnProcessor:
444
- r"""
445
- Default processor for performing attention-related computations.
446
- """
447
-
448
- def __call__(
449
- self,
450
- attn: Attention,
451
- hidden_states,
452
- encoder_hidden_states=None,
453
- attention_mask=None,
454
- temb=None,
455
- ):
456
- residual = hidden_states
457
-
458
- if attn.spatial_norm is not None:
459
- hidden_states = attn.spatial_norm(hidden_states, temb)
460
-
461
- input_ndim = hidden_states.ndim
462
-
463
- if input_ndim == 4:
464
- batch_size, channel, height, width = hidden_states.shape
465
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
466
-
467
- batch_size, sequence_length, _ = (
468
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
469
- )
470
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
471
-
472
- if attn.group_norm is not None:
473
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
474
-
475
- query = attn.to_q(hidden_states)
476
-
477
- if encoder_hidden_states is None:
478
- encoder_hidden_states = hidden_states
479
- elif attn.norm_cross:
480
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
481
-
482
- key = attn.to_k(encoder_hidden_states)
483
- value = attn.to_v(encoder_hidden_states)
484
-
485
- query = attn.head_to_batch_dim(query)
486
- key = attn.head_to_batch_dim(key)
487
- value = attn.head_to_batch_dim(value)
488
-
489
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
490
- hidden_states = torch.bmm(attention_probs, value)
491
- hidden_states = attn.batch_to_head_dim(hidden_states)
492
-
493
- # linear proj
494
- hidden_states = attn.to_out[0](hidden_states)
495
- # dropout
496
- hidden_states = attn.to_out[1](hidden_states)
497
-
498
- if input_ndim == 4:
499
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
500
-
501
- if attn.residual_connection:
502
- hidden_states = hidden_states + residual
503
-
504
- hidden_states = hidden_states / attn.rescale_output_factor
505
-
506
- return hidden_states
507
-
508
-
509
- class LoRAAttnProcessor(nn.Module):
510
- r"""
511
- Processor for implementing the LoRA attention mechanism.
512
-
513
- Args:
514
- hidden_size (`int`, *optional*):
515
- The hidden size of the attention layer.
516
- cross_attention_dim (`int`, *optional*):
517
- The number of channels in the `encoder_hidden_states`.
518
- rank (`int`, defaults to 4):
519
- The dimension of the LoRA update matrices.
520
- network_alpha (`int`, *optional*):
521
- Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
522
- """
523
-
524
- def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, **kwargs):
525
- super().__init__()
526
-
527
- self.hidden_size = hidden_size
528
- self.cross_attention_dim = cross_attention_dim
529
- self.rank = rank
530
-
531
- q_rank = kwargs.pop("q_rank", None)
532
- q_hidden_size = kwargs.pop("q_hidden_size", None)
533
- q_rank = q_rank if q_rank is not None else rank
534
- q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size
535
-
536
- v_rank = kwargs.pop("v_rank", None)
537
- v_hidden_size = kwargs.pop("v_hidden_size", None)
538
- v_rank = v_rank if v_rank is not None else rank
539
- v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size
540
-
541
- out_rank = kwargs.pop("out_rank", None)
542
- out_hidden_size = kwargs.pop("out_hidden_size", None)
543
- out_rank = out_rank if out_rank is not None else rank
544
- out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size
545
-
546
- self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha)
547
- self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
548
- self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha)
549
- self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha)
550
-
551
- def __call__(
552
- self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0, temb=None
553
- ):
554
- residual = hidden_states
555
-
556
- if attn.spatial_norm is not None:
557
- hidden_states = attn.spatial_norm(hidden_states, temb)
558
-
559
- input_ndim = hidden_states.ndim
560
-
561
- if input_ndim == 4:
562
- batch_size, channel, height, width = hidden_states.shape
563
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
564
-
565
- batch_size, sequence_length, _ = (
566
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
567
- )
568
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
569
-
570
- if attn.group_norm is not None:
571
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
572
-
573
- query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states)
574
- query = attn.head_to_batch_dim(query)
575
-
576
- if encoder_hidden_states is None:
577
- encoder_hidden_states = hidden_states
578
- elif attn.norm_cross:
579
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
580
-
581
- key = attn.to_k(encoder_hidden_states) + scale * self.to_k_lora(encoder_hidden_states)
582
- value = attn.to_v(encoder_hidden_states) + scale * self.to_v_lora(encoder_hidden_states)
583
-
584
- key = attn.head_to_batch_dim(key)
585
- value = attn.head_to_batch_dim(value)
586
-
587
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
588
- hidden_states = torch.bmm(attention_probs, value)
589
- hidden_states = attn.batch_to_head_dim(hidden_states)
590
-
591
- # linear proj
592
- hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states)
593
- # dropout
594
- hidden_states = attn.to_out[1](hidden_states)
595
-
596
- if input_ndim == 4:
597
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
598
-
599
- if attn.residual_connection:
600
- hidden_states = hidden_states + residual
601
-
602
- hidden_states = hidden_states / attn.rescale_output_factor
603
-
604
- return hidden_states
605
-
606
-
607
- class CustomDiffusionAttnProcessor(nn.Module):
608
- r"""
609
- Processor for implementing attention for the Custom Diffusion method.
610
-
611
- Args:
612
- train_kv (`bool`, defaults to `True`):
613
- Whether to newly train the key and value matrices corresponding to the text features.
614
- train_q_out (`bool`, defaults to `True`):
615
- Whether to newly train query matrices corresponding to the latent image features.
616
- hidden_size (`int`, *optional*, defaults to `None`):
617
- The hidden size of the attention layer.
618
- cross_attention_dim (`int`, *optional*, defaults to `None`):
619
- The number of channels in the `encoder_hidden_states`.
620
- out_bias (`bool`, defaults to `True`):
621
- Whether to include the bias parameter in `train_q_out`.
622
- dropout (`float`, *optional*, defaults to 0.0):
623
- The dropout probability to use.
624
- """
625
-
626
- def __init__(
627
- self,
628
- train_kv=True,
629
- train_q_out=True,
630
- hidden_size=None,
631
- cross_attention_dim=None,
632
- out_bias=True,
633
- dropout=0.0,
634
- ):
635
- super().__init__()
636
- self.train_kv = train_kv
637
- self.train_q_out = train_q_out
638
-
639
- self.hidden_size = hidden_size
640
- self.cross_attention_dim = cross_attention_dim
641
-
642
- # `_custom_diffusion` id for easy serialization and loading.
643
- if self.train_kv:
644
- self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
645
- self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
646
- if self.train_q_out:
647
- self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False)
648
- self.to_out_custom_diffusion = nn.ModuleList([])
649
- self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias))
650
- self.to_out_custom_diffusion.append(nn.Dropout(dropout))
651
-
652
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
653
- batch_size, sequence_length, _ = hidden_states.shape
654
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
655
- if self.train_q_out:
656
- query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype)
657
- else:
658
- query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype))
659
-
660
- if encoder_hidden_states is None:
661
- crossattn = False
662
- encoder_hidden_states = hidden_states
663
- else:
664
- crossattn = True
665
- if attn.norm_cross:
666
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
667
-
668
- if self.train_kv:
669
- key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype))
670
- value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype))
671
- key = key.to(attn.to_q.weight.dtype)
672
- value = value.to(attn.to_q.weight.dtype)
673
- else:
674
- key = attn.to_k(encoder_hidden_states)
675
- value = attn.to_v(encoder_hidden_states)
676
-
677
- if crossattn:
678
- detach = torch.ones_like(key)
679
- detach[:, :1, :] = detach[:, :1, :] * 0.0
680
- key = detach * key + (1 - detach) * key.detach()
681
- value = detach * value + (1 - detach) * value.detach()
682
-
683
- query = attn.head_to_batch_dim(query)
684
- key = attn.head_to_batch_dim(key)
685
- value = attn.head_to_batch_dim(value)
686
-
687
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
688
- hidden_states = torch.bmm(attention_probs, value)
689
- hidden_states = attn.batch_to_head_dim(hidden_states)
690
-
691
- if self.train_q_out:
692
- # linear proj
693
- hidden_states = self.to_out_custom_diffusion[0](hidden_states)
694
- # dropout
695
- hidden_states = self.to_out_custom_diffusion[1](hidden_states)
696
- else:
697
- # linear proj
698
- hidden_states = attn.to_out[0](hidden_states)
699
- # dropout
700
- hidden_states = attn.to_out[1](hidden_states)
701
-
702
- return hidden_states
703
-
704
-
705
- class AttnAddedKVProcessor:
706
- r"""
707
- Processor for performing attention-related computations with extra learnable key and value matrices for the text
708
- encoder.
709
- """
710
-
711
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
712
- residual = hidden_states
713
- hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)
714
- batch_size, sequence_length, _ = hidden_states.shape
715
-
716
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
717
-
718
- if encoder_hidden_states is None:
719
- encoder_hidden_states = hidden_states
720
- elif attn.norm_cross:
721
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
722
-
723
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
724
-
725
- query = attn.to_q(hidden_states)
726
- query = attn.head_to_batch_dim(query)
727
-
728
- encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
729
- encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
730
- encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj)
731
- encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj)
732
-
733
- if not attn.only_cross_attention:
734
- key = attn.to_k(hidden_states)
735
- value = attn.to_v(hidden_states)
736
- key = attn.head_to_batch_dim(key)
737
- value = attn.head_to_batch_dim(value)
738
- key = torch.cat([encoder_hidden_states_key_proj, key], dim=1)
739
- value = torch.cat([encoder_hidden_states_value_proj, value], dim=1)
740
- else:
741
- key = encoder_hidden_states_key_proj
742
- value = encoder_hidden_states_value_proj
743
-
744
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
745
- hidden_states = torch.bmm(attention_probs, value)
746
- hidden_states = attn.batch_to_head_dim(hidden_states)
747
-
748
- # linear proj
749
- hidden_states = attn.to_out[0](hidden_states)
750
- # dropout
751
- hidden_states = attn.to_out[1](hidden_states)
752
-
753
- hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)
754
- hidden_states = hidden_states + residual
755
-
756
- return hidden_states
757
-
758
-
759
- class AttnAddedKVProcessor2_0:
760
- r"""
761
- Processor for performing scaled dot-product attention (enabled by default if you're using PyTorch 2.0), with extra
762
- learnable key and value matrices for the text encoder.
763
- """
764
-
765
- def __init__(self):
766
- if not hasattr(F, "scaled_dot_product_attention"):
767
- raise ImportError(
768
- "AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
769
- )
770
-
771
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
772
- residual = hidden_states
773
- hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)
774
- batch_size, sequence_length, _ = hidden_states.shape
775
-
776
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4)
777
-
778
- if encoder_hidden_states is None:
779
- encoder_hidden_states = hidden_states
780
- elif attn.norm_cross:
781
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
782
-
783
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
784
-
785
- query = attn.to_q(hidden_states)
786
- query = attn.head_to_batch_dim(query, out_dim=4)
787
-
788
- encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
789
- encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
790
- encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4)
791
- encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4)
792
-
793
- if not attn.only_cross_attention:
794
- key = attn.to_k(hidden_states)
795
- value = attn.to_v(hidden_states)
796
- key = attn.head_to_batch_dim(key, out_dim=4)
797
- value = attn.head_to_batch_dim(value, out_dim=4)
798
- key = torch.cat([encoder_hidden_states_key_proj, key], dim=2)
799
- value = torch.cat([encoder_hidden_states_value_proj, value], dim=2)
800
- else:
801
- key = encoder_hidden_states_key_proj
802
- value = encoder_hidden_states_value_proj
803
-
804
- # the output of sdp = (batch, num_heads, seq_len, head_dim)
805
- # TODO: add support for attn.scale when we move to Torch 2.1
806
- hidden_states = F.scaled_dot_product_attention(
807
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
808
- )
809
- hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1])
810
-
811
- # linear proj
812
- hidden_states = attn.to_out[0](hidden_states)
813
- # dropout
814
- hidden_states = attn.to_out[1](hidden_states)
815
-
816
- hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)
817
- hidden_states = hidden_states + residual
818
-
819
- return hidden_states
820
-
821
-
822
- class LoRAAttnAddedKVProcessor(nn.Module):
823
- r"""
824
- Processor for implementing the LoRA attention mechanism with extra learnable key and value matrices for the text
825
- encoder.
826
-
827
- Args:
828
- hidden_size (`int`, *optional*):
829
- The hidden size of the attention layer.
830
- cross_attention_dim (`int`, *optional*, defaults to `None`):
831
- The number of channels in the `encoder_hidden_states`.
832
- rank (`int`, defaults to 4):
833
- The dimension of the LoRA update matrices.
834
-
835
- """
836
-
837
- def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None):
838
- super().__init__()
839
-
840
- self.hidden_size = hidden_size
841
- self.cross_attention_dim = cross_attention_dim
842
- self.rank = rank
843
-
844
- self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
845
- self.add_k_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
846
- self.add_v_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
847
- self.to_k_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
848
- self.to_v_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
849
- self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
850
-
851
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0):
852
- residual = hidden_states
853
- hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)
854
- batch_size, sequence_length, _ = hidden_states.shape
855
-
856
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
857
-
858
- if encoder_hidden_states is None:
859
- encoder_hidden_states = hidden_states
860
- elif attn.norm_cross:
861
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
862
-
863
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
864
-
865
- query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states)
866
- query = attn.head_to_batch_dim(query)
867
-
868
- encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + scale * self.add_k_proj_lora(
869
- encoder_hidden_states
870
- )
871
- encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + scale * self.add_v_proj_lora(
872
- encoder_hidden_states
873
- )
874
- encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj)
875
- encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj)
876
-
877
- if not attn.only_cross_attention:
878
- key = attn.to_k(hidden_states) + scale * self.to_k_lora(hidden_states)
879
- value = attn.to_v(hidden_states) + scale * self.to_v_lora(hidden_states)
880
- key = attn.head_to_batch_dim(key)
881
- value = attn.head_to_batch_dim(value)
882
- key = torch.cat([encoder_hidden_states_key_proj, key], dim=1)
883
- value = torch.cat([encoder_hidden_states_value_proj, value], dim=1)
884
- else:
885
- key = encoder_hidden_states_key_proj
886
- value = encoder_hidden_states_value_proj
887
-
888
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
889
- hidden_states = torch.bmm(attention_probs, value)
890
- hidden_states = attn.batch_to_head_dim(hidden_states)
891
-
892
- # linear proj
893
- hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states)
894
- # dropout
895
- hidden_states = attn.to_out[1](hidden_states)
896
-
897
- hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)
898
- hidden_states = hidden_states + residual
899
-
900
- return hidden_states
901
-
902
-
903
- class XFormersAttnAddedKVProcessor:
904
- r"""
905
- Processor for implementing memory efficient attention using xFormers.
906
-
907
- Args:
908
- attention_op (`Callable`, *optional*, defaults to `None`):
909
- The base
910
- [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to
911
- use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best
912
- operator.
913
- """
914
-
915
- def __init__(self, attention_op: Optional[Callable] = None):
916
- self.attention_op = attention_op
917
-
918
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
919
- residual = hidden_states
920
- hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)
921
- batch_size, sequence_length, _ = hidden_states.shape
922
-
923
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
924
-
925
- if encoder_hidden_states is None:
926
- encoder_hidden_states = hidden_states
927
- elif attn.norm_cross:
928
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
929
-
930
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
931
-
932
- query = attn.to_q(hidden_states)
933
- query = attn.head_to_batch_dim(query)
934
-
935
- encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
936
- encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
937
- encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj)
938
- encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj)
939
-
940
- if not attn.only_cross_attention:
941
- key = attn.to_k(hidden_states)
942
- value = attn.to_v(hidden_states)
943
- key = attn.head_to_batch_dim(key)
944
- value = attn.head_to_batch_dim(value)
945
- key = torch.cat([encoder_hidden_states_key_proj, key], dim=1)
946
- value = torch.cat([encoder_hidden_states_value_proj, value], dim=1)
947
- else:
948
- key = encoder_hidden_states_key_proj
949
- value = encoder_hidden_states_value_proj
950
-
951
- hidden_states = xformers.ops.memory_efficient_attention(
952
- query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale
953
- )
954
- hidden_states = hidden_states.to(query.dtype)
955
- hidden_states = attn.batch_to_head_dim(hidden_states)
956
-
957
- # linear proj
958
- hidden_states = attn.to_out[0](hidden_states)
959
- # dropout
960
- hidden_states = attn.to_out[1](hidden_states)
961
-
962
- hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)
963
- hidden_states = hidden_states + residual
964
-
965
- return hidden_states
966
-
967
-
968
- class XFormersAttnProcessor:
969
- r"""
970
- Processor for implementing memory efficient attention using xFormers.
971
-
972
- Args:
973
- attention_op (`Callable`, *optional*, defaults to `None`):
974
- The base
975
- [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to
976
- use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best
977
- operator.
978
- """
979
-
980
- def __init__(self, attention_op: Optional[Callable] = None):
981
- self.attention_op = attention_op
982
-
983
- def __call__(
984
- self,
985
- attn: Attention,
986
- hidden_states: torch.FloatTensor,
987
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
988
- attention_mask: Optional[torch.FloatTensor] = None,
989
- temb: Optional[torch.FloatTensor] = None,
990
- ):
991
- residual = hidden_states
992
-
993
- if attn.spatial_norm is not None:
994
- hidden_states = attn.spatial_norm(hidden_states, temb)
995
-
996
- input_ndim = hidden_states.ndim
997
-
998
- if input_ndim == 4:
999
- batch_size, channel, height, width = hidden_states.shape
1000
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
1001
-
1002
- batch_size, key_tokens, _ = (
1003
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
1004
- )
1005
-
1006
- attention_mask = attn.prepare_attention_mask(attention_mask, key_tokens, batch_size)
1007
- if attention_mask is not None:
1008
- # expand our mask's singleton query_tokens dimension:
1009
- # [batch*heads, 1, key_tokens] ->
1010
- # [batch*heads, query_tokens, key_tokens]
1011
- # so that it can be added as a bias onto the attention scores that xformers computes:
1012
- # [batch*heads, query_tokens, key_tokens]
1013
- # we do this explicitly because xformers doesn't broadcast the singleton dimension for us.
1014
- _, query_tokens, _ = hidden_states.shape
1015
- attention_mask = attention_mask.expand(-1, query_tokens, -1)
1016
-
1017
- if attn.group_norm is not None:
1018
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
1019
-
1020
- query = attn.to_q(hidden_states)
1021
-
1022
- if encoder_hidden_states is None:
1023
- encoder_hidden_states = hidden_states
1024
- elif attn.norm_cross:
1025
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1026
-
1027
- key = attn.to_k(encoder_hidden_states)
1028
- value = attn.to_v(encoder_hidden_states)
1029
-
1030
- query = attn.head_to_batch_dim(query).contiguous()
1031
- key = attn.head_to_batch_dim(key).contiguous()
1032
- value = attn.head_to_batch_dim(value).contiguous()
1033
-
1034
- hidden_states = xformers.ops.memory_efficient_attention(
1035
- query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale
1036
- )
1037
- hidden_states = hidden_states.to(query.dtype)
1038
- hidden_states = attn.batch_to_head_dim(hidden_states)
1039
-
1040
- # linear proj
1041
- hidden_states = attn.to_out[0](hidden_states)
1042
- # dropout
1043
- hidden_states = attn.to_out[1](hidden_states)
1044
-
1045
- if input_ndim == 4:
1046
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
1047
-
1048
- if attn.residual_connection:
1049
- hidden_states = hidden_states + residual
1050
-
1051
- hidden_states = hidden_states / attn.rescale_output_factor
1052
-
1053
- return hidden_states
1054
-
1055
-
1056
- class AttnProcessor2_0:
1057
- r"""
1058
- Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
1059
- """
1060
-
1061
- def __init__(self):
1062
- if not hasattr(F, "scaled_dot_product_attention"):
1063
- raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
1064
-
1065
- def __call__(
1066
- self,
1067
- attn: Attention,
1068
- hidden_states,
1069
- encoder_hidden_states=None,
1070
- attention_mask=None,
1071
- temb=None,
1072
- ):
1073
- residual = hidden_states
1074
-
1075
- if attn.spatial_norm is not None:
1076
- hidden_states = attn.spatial_norm(hidden_states, temb)
1077
-
1078
- input_ndim = hidden_states.ndim
1079
-
1080
- if input_ndim == 4:
1081
- batch_size, channel, height, width = hidden_states.shape
1082
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
1083
-
1084
- batch_size, sequence_length, _ = (
1085
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
1086
- )
1087
-
1088
- if attention_mask is not None:
1089
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
1090
- # scaled_dot_product_attention expects attention_mask shape to be
1091
- # (batch, heads, source_length, target_length)
1092
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
1093
-
1094
- if attn.group_norm is not None:
1095
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
1096
-
1097
- query = attn.to_q(hidden_states)
1098
-
1099
- if encoder_hidden_states is None:
1100
- encoder_hidden_states = hidden_states
1101
- elif attn.norm_cross:
1102
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1103
-
1104
- key = attn.to_k(encoder_hidden_states)
1105
- value = attn.to_v(encoder_hidden_states)
1106
-
1107
- inner_dim = key.shape[-1]
1108
- head_dim = inner_dim // attn.heads
1109
-
1110
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1111
-
1112
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1113
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1114
-
1115
- # the output of sdp = (batch, num_heads, seq_len, head_dim)
1116
- # TODO: add support for attn.scale when we move to Torch 2.1
1117
- hidden_states = F.scaled_dot_product_attention(
1118
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
1119
- )
1120
-
1121
- hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
1122
- hidden_states = hidden_states.to(query.dtype)
1123
-
1124
- # linear proj
1125
- hidden_states = attn.to_out[0](hidden_states)
1126
- # dropout
1127
- hidden_states = attn.to_out[1](hidden_states)
1128
-
1129
- if input_ndim == 4:
1130
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
1131
-
1132
- if attn.residual_connection:
1133
- hidden_states = hidden_states + residual
1134
-
1135
- hidden_states = hidden_states / attn.rescale_output_factor
1136
-
1137
- return hidden_states
1138
-
1139
-
1140
- class LoRAXFormersAttnProcessor(nn.Module):
1141
- r"""
1142
- Processor for implementing the LoRA attention mechanism with memory efficient attention using xFormers.
1143
-
1144
- Args:
1145
- hidden_size (`int`, *optional*):
1146
- The hidden size of the attention layer.
1147
- cross_attention_dim (`int`, *optional*):
1148
- The number of channels in the `encoder_hidden_states`.
1149
- rank (`int`, defaults to 4):
1150
- The dimension of the LoRA update matrices.
1151
- attention_op (`Callable`, *optional*, defaults to `None`):
1152
- The base
1153
- [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to
1154
- use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best
1155
- operator.
1156
- network_alpha (`int`, *optional*):
1157
- Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
1158
-
1159
- """
1160
-
1161
- def __init__(
1162
- self,
1163
- hidden_size,
1164
- cross_attention_dim,
1165
- rank=4,
1166
- attention_op: Optional[Callable] = None,
1167
- network_alpha=None,
1168
- **kwargs,
1169
- ):
1170
- super().__init__()
1171
-
1172
- self.hidden_size = hidden_size
1173
- self.cross_attention_dim = cross_attention_dim
1174
- self.rank = rank
1175
- self.attention_op = attention_op
1176
-
1177
- q_rank = kwargs.pop("q_rank", None)
1178
- q_hidden_size = kwargs.pop("q_hidden_size", None)
1179
- q_rank = q_rank if q_rank is not None else rank
1180
- q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size
1181
-
1182
- v_rank = kwargs.pop("v_rank", None)
1183
- v_hidden_size = kwargs.pop("v_hidden_size", None)
1184
- v_rank = v_rank if v_rank is not None else rank
1185
- v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size
1186
-
1187
- out_rank = kwargs.pop("out_rank", None)
1188
- out_hidden_size = kwargs.pop("out_hidden_size", None)
1189
- out_rank = out_rank if out_rank is not None else rank
1190
- out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size
1191
-
1192
- self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha)
1193
- self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
1194
- self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha)
1195
- self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha)
1196
-
1197
- def __call__(
1198
- self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0, temb=None
1199
- ):
1200
- residual = hidden_states
1201
-
1202
- if attn.spatial_norm is not None:
1203
- hidden_states = attn.spatial_norm(hidden_states, temb)
1204
-
1205
- input_ndim = hidden_states.ndim
1206
-
1207
- if input_ndim == 4:
1208
- batch_size, channel, height, width = hidden_states.shape
1209
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
1210
-
1211
- batch_size, sequence_length, _ = (
1212
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
1213
- )
1214
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
1215
-
1216
- if attn.group_norm is not None:
1217
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
1218
-
1219
- query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states)
1220
- query = attn.head_to_batch_dim(query).contiguous()
1221
-
1222
- if encoder_hidden_states is None:
1223
- encoder_hidden_states = hidden_states
1224
- elif attn.norm_cross:
1225
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1226
-
1227
- key = attn.to_k(encoder_hidden_states) + scale * self.to_k_lora(encoder_hidden_states)
1228
- value = attn.to_v(encoder_hidden_states) + scale * self.to_v_lora(encoder_hidden_states)
1229
-
1230
- key = attn.head_to_batch_dim(key).contiguous()
1231
- value = attn.head_to_batch_dim(value).contiguous()
1232
-
1233
- hidden_states = xformers.ops.memory_efficient_attention(
1234
- query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale
1235
- )
1236
- hidden_states = attn.batch_to_head_dim(hidden_states)
1237
-
1238
- # linear proj
1239
- hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states)
1240
- # dropout
1241
- hidden_states = attn.to_out[1](hidden_states)
1242
-
1243
- if input_ndim == 4:
1244
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
1245
-
1246
- if attn.residual_connection:
1247
- hidden_states = hidden_states + residual
1248
-
1249
- hidden_states = hidden_states / attn.rescale_output_factor
1250
-
1251
- return hidden_states
1252
-
1253
-
1254
- class LoRAAttnProcessor2_0(nn.Module):
1255
- r"""
1256
- Processor for implementing the LoRA attention mechanism using PyTorch 2.0's memory-efficient scaled dot-product
1257
- attention.
1258
-
1259
- Args:
1260
- hidden_size (`int`):
1261
- The hidden size of the attention layer.
1262
- cross_attention_dim (`int`, *optional*):
1263
- The number of channels in the `encoder_hidden_states`.
1264
- rank (`int`, defaults to 4):
1265
- The dimension of the LoRA update matrices.
1266
- network_alpha (`int`, *optional*):
1267
- Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
1268
- """
1269
-
1270
- def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, **kwargs):
1271
- super().__init__()
1272
- if not hasattr(F, "scaled_dot_product_attention"):
1273
- raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
1274
-
1275
- self.hidden_size = hidden_size
1276
- self.cross_attention_dim = cross_attention_dim
1277
- self.rank = rank
1278
-
1279
- q_rank = kwargs.pop("q_rank", None)
1280
- q_hidden_size = kwargs.pop("q_hidden_size", None)
1281
- q_rank = q_rank if q_rank is not None else rank
1282
- q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size
1283
-
1284
- v_rank = kwargs.pop("v_rank", None)
1285
- v_hidden_size = kwargs.pop("v_hidden_size", None)
1286
- v_rank = v_rank if v_rank is not None else rank
1287
- v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size
1288
-
1289
- out_rank = kwargs.pop("out_rank", None)
1290
- out_hidden_size = kwargs.pop("out_hidden_size", None)
1291
- out_rank = out_rank if out_rank is not None else rank
1292
- out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size
1293
-
1294
- self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha)
1295
- self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
1296
- self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha)
1297
- self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha)
1298
-
1299
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0):
1300
- residual = hidden_states
1301
-
1302
- input_ndim = hidden_states.ndim
1303
-
1304
- if input_ndim == 4:
1305
- batch_size, channel, height, width = hidden_states.shape
1306
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
1307
-
1308
- batch_size, sequence_length, _ = (
1309
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
1310
- )
1311
- inner_dim = hidden_states.shape[-1]
1312
-
1313
- if attention_mask is not None:
1314
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
1315
- # scaled_dot_product_attention expects attention_mask shape to be
1316
- # (batch, heads, source_length, target_length)
1317
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
1318
-
1319
- if attn.group_norm is not None:
1320
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
1321
-
1322
- query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states)
1323
-
1324
- if encoder_hidden_states is None:
1325
- encoder_hidden_states = hidden_states
1326
- elif attn.norm_cross:
1327
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1328
-
1329
- key = attn.to_k(encoder_hidden_states) + scale * self.to_k_lora(encoder_hidden_states)
1330
- value = attn.to_v(encoder_hidden_states) + scale * self.to_v_lora(encoder_hidden_states)
1331
-
1332
- head_dim = inner_dim // attn.heads
1333
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1334
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1335
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1336
-
1337
- # TODO: add support for attn.scale when we move to Torch 2.1
1338
- hidden_states = F.scaled_dot_product_attention(
1339
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
1340
- )
1341
- hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
1342
- hidden_states = hidden_states.to(query.dtype)
1343
-
1344
- # linear proj
1345
- hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states)
1346
- # dropout
1347
- hidden_states = attn.to_out[1](hidden_states)
1348
-
1349
- if input_ndim == 4:
1350
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
1351
-
1352
- if attn.residual_connection:
1353
- hidden_states = hidden_states + residual
1354
-
1355
- hidden_states = hidden_states / attn.rescale_output_factor
1356
-
1357
- return hidden_states
1358
-
1359
-
1360
- class CustomDiffusionXFormersAttnProcessor(nn.Module):
1361
- r"""
1362
- Processor for implementing memory efficient attention using xFormers for the Custom Diffusion method.
1363
-
1364
- Args:
1365
- train_kv (`bool`, defaults to `True`):
1366
- Whether to newly train the key and value matrices corresponding to the text features.
1367
- train_q_out (`bool`, defaults to `True`):
1368
- Whether to newly train query matrices corresponding to the latent image features.
1369
- hidden_size (`int`, *optional*, defaults to `None`):
1370
- The hidden size of the attention layer.
1371
- cross_attention_dim (`int`, *optional*, defaults to `None`):
1372
- The number of channels in the `encoder_hidden_states`.
1373
- out_bias (`bool`, defaults to `True`):
1374
- Whether to include the bias parameter in `train_q_out`.
1375
- dropout (`float`, *optional*, defaults to 0.0):
1376
- The dropout probability to use.
1377
- attention_op (`Callable`, *optional*, defaults to `None`):
1378
- The base
1379
- [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use
1380
- as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator.
1381
- """
1382
-
1383
- def __init__(
1384
- self,
1385
- train_kv=True,
1386
- train_q_out=False,
1387
- hidden_size=None,
1388
- cross_attention_dim=None,
1389
- out_bias=True,
1390
- dropout=0.0,
1391
- attention_op: Optional[Callable] = None,
1392
- ):
1393
- super().__init__()
1394
- self.train_kv = train_kv
1395
- self.train_q_out = train_q_out
1396
-
1397
- self.hidden_size = hidden_size
1398
- self.cross_attention_dim = cross_attention_dim
1399
- self.attention_op = attention_op
1400
-
1401
- # `_custom_diffusion` id for easy serialization and loading.
1402
- if self.train_kv:
1403
- self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
1404
- self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
1405
- if self.train_q_out:
1406
- self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False)
1407
- self.to_out_custom_diffusion = nn.ModuleList([])
1408
- self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias))
1409
- self.to_out_custom_diffusion.append(nn.Dropout(dropout))
1410
-
1411
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
1412
- batch_size, sequence_length, _ = (
1413
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
1414
- )
1415
-
1416
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
1417
-
1418
- if self.train_q_out:
1419
- query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype)
1420
- else:
1421
- query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype))
1422
-
1423
- if encoder_hidden_states is None:
1424
- crossattn = False
1425
- encoder_hidden_states = hidden_states
1426
- else:
1427
- crossattn = True
1428
- if attn.norm_cross:
1429
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1430
-
1431
- if self.train_kv:
1432
- key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype))
1433
- value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype))
1434
- key = key.to(attn.to_q.weight.dtype)
1435
- value = value.to(attn.to_q.weight.dtype)
1436
- else:
1437
- key = attn.to_k(encoder_hidden_states)
1438
- value = attn.to_v(encoder_hidden_states)
1439
-
1440
- if crossattn:
1441
- detach = torch.ones_like(key)
1442
- detach[:, :1, :] = detach[:, :1, :] * 0.0
1443
- key = detach * key + (1 - detach) * key.detach()
1444
- value = detach * value + (1 - detach) * value.detach()
1445
-
1446
- query = attn.head_to_batch_dim(query).contiguous()
1447
- key = attn.head_to_batch_dim(key).contiguous()
1448
- value = attn.head_to_batch_dim(value).contiguous()
1449
-
1450
- hidden_states = xformers.ops.memory_efficient_attention(
1451
- query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale
1452
- )
1453
- hidden_states = hidden_states.to(query.dtype)
1454
- hidden_states = attn.batch_to_head_dim(hidden_states)
1455
-
1456
- if self.train_q_out:
1457
- # linear proj
1458
- hidden_states = self.to_out_custom_diffusion[0](hidden_states)
1459
- # dropout
1460
- hidden_states = self.to_out_custom_diffusion[1](hidden_states)
1461
- else:
1462
- # linear proj
1463
- hidden_states = attn.to_out[0](hidden_states)
1464
- # dropout
1465
- hidden_states = attn.to_out[1](hidden_states)
1466
- return hidden_states
1467
-
1468
-
1469
- class SlicedAttnProcessor:
1470
- r"""
1471
- Processor for implementing sliced attention.
1472
-
1473
- Args:
1474
- slice_size (`int`, *optional*):
1475
- The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and
1476
- `attention_head_dim` must be a multiple of the `slice_size`.
1477
- """
1478
-
1479
- def __init__(self, slice_size):
1480
- self.slice_size = slice_size
1481
-
1482
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
1483
- residual = hidden_states
1484
-
1485
- input_ndim = hidden_states.ndim
1486
-
1487
- if input_ndim == 4:
1488
- batch_size, channel, height, width = hidden_states.shape
1489
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
1490
-
1491
- batch_size, sequence_length, _ = (
1492
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
1493
- )
1494
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
1495
-
1496
- if attn.group_norm is not None:
1497
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
1498
-
1499
- query = attn.to_q(hidden_states)
1500
- dim = query.shape[-1]
1501
- query = attn.head_to_batch_dim(query)
1502
-
1503
- if encoder_hidden_states is None:
1504
- encoder_hidden_states = hidden_states
1505
- elif attn.norm_cross:
1506
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1507
-
1508
- key = attn.to_k(encoder_hidden_states)
1509
- value = attn.to_v(encoder_hidden_states)
1510
- key = attn.head_to_batch_dim(key)
1511
- value = attn.head_to_batch_dim(value)
1512
-
1513
- batch_size_attention, query_tokens, _ = query.shape
1514
- hidden_states = torch.zeros(
1515
- (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype
1516
- )
1517
-
1518
- for i in range(batch_size_attention // self.slice_size):
1519
- start_idx = i * self.slice_size
1520
- end_idx = (i + 1) * self.slice_size
1521
-
1522
- query_slice = query[start_idx:end_idx]
1523
- key_slice = key[start_idx:end_idx]
1524
- attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None
1525
-
1526
- attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
1527
-
1528
- attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
1529
-
1530
- hidden_states[start_idx:end_idx] = attn_slice
1531
-
1532
- hidden_states = attn.batch_to_head_dim(hidden_states)
1533
-
1534
- # linear proj
1535
- hidden_states = attn.to_out[0](hidden_states)
1536
- # dropout
1537
- hidden_states = attn.to_out[1](hidden_states)
1538
-
1539
- if input_ndim == 4:
1540
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
1541
-
1542
- if attn.residual_connection:
1543
- hidden_states = hidden_states + residual
1544
-
1545
- hidden_states = hidden_states / attn.rescale_output_factor
1546
-
1547
- return hidden_states
1548
-
1549
-
1550
- class SlicedAttnAddedKVProcessor:
1551
- r"""
1552
- Processor for implementing sliced attention with extra learnable key and value matrices for the text encoder.
1553
-
1554
- Args:
1555
- slice_size (`int`, *optional*):
1556
- The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and
1557
- `attention_head_dim` must be a multiple of the `slice_size`.
1558
- """
1559
-
1560
- def __init__(self, slice_size):
1561
- self.slice_size = slice_size
1562
-
1563
- def __call__(self, attn: "Attention", hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):
1564
- residual = hidden_states
1565
-
1566
- if attn.spatial_norm is not None:
1567
- hidden_states = attn.spatial_norm(hidden_states, temb)
1568
-
1569
- hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)
1570
-
1571
- batch_size, sequence_length, _ = hidden_states.shape
1572
-
1573
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
1574
-
1575
- if encoder_hidden_states is None:
1576
- encoder_hidden_states = hidden_states
1577
- elif attn.norm_cross:
1578
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
1579
-
1580
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
1581
-
1582
- query = attn.to_q(hidden_states)
1583
- dim = query.shape[-1]
1584
- query = attn.head_to_batch_dim(query)
1585
-
1586
- encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
1587
- encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
1588
-
1589
- encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj)
1590
- encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj)
1591
-
1592
- if not attn.only_cross_attention:
1593
- key = attn.to_k(hidden_states)
1594
- value = attn.to_v(hidden_states)
1595
- key = attn.head_to_batch_dim(key)
1596
- value = attn.head_to_batch_dim(value)
1597
- key = torch.cat([encoder_hidden_states_key_proj, key], dim=1)
1598
- value = torch.cat([encoder_hidden_states_value_proj, value], dim=1)
1599
- else:
1600
- key = encoder_hidden_states_key_proj
1601
- value = encoder_hidden_states_value_proj
1602
-
1603
- batch_size_attention, query_tokens, _ = query.shape
1604
- hidden_states = torch.zeros(
1605
- (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype
1606
- )
1607
-
1608
- for i in range(batch_size_attention // self.slice_size):
1609
- start_idx = i * self.slice_size
1610
- end_idx = (i + 1) * self.slice_size
1611
-
1612
- query_slice = query[start_idx:end_idx]
1613
- key_slice = key[start_idx:end_idx]
1614
- attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None
1615
-
1616
- attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
1617
-
1618
- attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
1619
-
1620
- hidden_states[start_idx:end_idx] = attn_slice
1621
-
1622
- hidden_states = attn.batch_to_head_dim(hidden_states)
1623
-
1624
- # linear proj
1625
- hidden_states = attn.to_out[0](hidden_states)
1626
- # dropout
1627
- hidden_states = attn.to_out[1](hidden_states)
1628
-
1629
- hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)
1630
- hidden_states = hidden_states + residual
1631
-
1632
- return hidden_states
1633
-
1634
-
1635
- AttentionProcessor = Union[
1636
- AttnProcessor,
1637
- AttnProcessor2_0,
1638
- XFormersAttnProcessor,
1639
- SlicedAttnProcessor,
1640
- AttnAddedKVProcessor,
1641
- SlicedAttnAddedKVProcessor,
1642
- AttnAddedKVProcessor2_0,
1643
- XFormersAttnAddedKVProcessor,
1644
- LoRAAttnProcessor,
1645
- LoRAXFormersAttnProcessor,
1646
- LoRAAttnProcessor2_0,
1647
- LoRAAttnAddedKVProcessor,
1648
- CustomDiffusionAttnProcessor,
1649
- CustomDiffusionXFormersAttnProcessor,
1650
- ]
1651
-
1652
- LORA_ATTENTION_PROCESSORS = (
1653
- LoRAAttnProcessor,
1654
- LoRAAttnProcessor2_0,
1655
- LoRAXFormersAttnProcessor,
1656
- LoRAAttnAddedKVProcessor,
1657
- )
1658
-
1659
-
1660
- class SpatialNorm(nn.Module):
1661
- """
1662
- Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002
1663
- """
1664
-
1665
- def __init__(
1666
- self,
1667
- f_channels,
1668
- zq_channels,
1669
- ):
1670
- super().__init__()
1671
- self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-6, affine=True)
1672
- self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0)
1673
- self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0)
1674
-
1675
- def forward(self, f, zq):
1676
- f_size = f.shape[-2:]
1677
- zq = F.interpolate(zq, size=f_size, mode="nearest")
1678
- norm_f = self.norm_layer(f)
1679
- new_f = norm_f * self.conv_y(zq) + self.conv_b(zq)
1680
- return new_f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/__init__.py DELETED
@@ -1,122 +0,0 @@
1
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import os
17
-
18
- from packaging import version
19
-
20
- from .. import __version__
21
- from .accelerate_utils import apply_forward_hook
22
- from .constants import (
23
- CONFIG_NAME,
24
- DEPRECATED_REVISION_ARGS,
25
- DIFFUSERS_CACHE,
26
- DIFFUSERS_DYNAMIC_MODULE_NAME,
27
- FLAX_WEIGHTS_NAME,
28
- HF_MODULES_CACHE,
29
- HUGGINGFACE_CO_RESOLVE_ENDPOINT,
30
- ONNX_EXTERNAL_WEIGHTS_NAME,
31
- ONNX_WEIGHTS_NAME,
32
- SAFETENSORS_WEIGHTS_NAME,
33
- WEIGHTS_NAME,
34
- )
35
- from .deprecation_utils import deprecate
36
- from .doc_utils import replace_example_docstring
37
- from .dynamic_modules_utils import get_class_from_dynamic_module
38
- from .hub_utils import (
39
- HF_HUB_OFFLINE,
40
- _add_variant,
41
- _get_model_file,
42
- extract_commit_hash,
43
- http_user_agent,
44
- )
45
- from .import_utils import (
46
- BACKENDS_MAPPING,
47
- ENV_VARS_TRUE_AND_AUTO_VALUES,
48
- ENV_VARS_TRUE_VALUES,
49
- USE_JAX,
50
- USE_TF,
51
- USE_TORCH,
52
- DummyObject,
53
- OptionalDependencyNotAvailable,
54
- is_accelerate_available,
55
- is_accelerate_version,
56
- is_bs4_available,
57
- is_flax_available,
58
- is_ftfy_available,
59
- is_inflect_available,
60
- is_invisible_watermark_available,
61
- is_k_diffusion_available,
62
- is_k_diffusion_version,
63
- is_librosa_available,
64
- is_note_seq_available,
65
- is_omegaconf_available,
66
- is_onnx_available,
67
- is_safetensors_available,
68
- is_scipy_available,
69
- is_tensorboard_available,
70
- is_tf_available,
71
- is_torch_available,
72
- is_torch_version,
73
- is_torchsde_available,
74
- is_transformers_available,
75
- is_transformers_version,
76
- is_unidecode_available,
77
- is_wandb_available,
78
- is_xformers_available,
79
- requires_backends,
80
- )
81
- from .logging import get_logger
82
- from .outputs import BaseOutput
83
- from .pil_utils import PIL_INTERPOLATION, numpy_to_pil, pt_to_pil
84
- from .torch_utils import is_compiled_module, randn_tensor
85
-
86
-
87
- if is_torch_available():
88
- from .testing_utils import (
89
- floats_tensor,
90
- load_hf_numpy,
91
- load_image,
92
- load_numpy,
93
- load_pt,
94
- nightly,
95
- parse_flag_from_env,
96
- print_tensor_test,
97
- require_torch_2,
98
- require_torch_gpu,
99
- skip_mps,
100
- slow,
101
- torch_all_close,
102
- torch_device,
103
- )
104
- from .torch_utils import maybe_allow_in_graph
105
-
106
- from .testing_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
107
-
108
-
109
- logger = get_logger(__name__)
110
-
111
-
112
- def check_min_version(min_version):
113
- if version.parse(__version__) < version.parse(min_version):
114
- if "dev" in min_version:
115
- error_message = (
116
- "This example requires a source install from HuggingFace diffusers (see "
117
- "`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
118
- )
119
- else:
120
- error_message = f"This example requires a minimum version of {min_version},"
121
- error_message += f" but the version found is {__version__}.\n"
122
- raise ImportError(error_message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_discrete.py DELETED
@@ -1,132 +0,0 @@
1
- import torch
2
-
3
- from diffusers import KDPM2DiscreteScheduler
4
- from diffusers.utils import torch_device
5
-
6
- from .test_schedulers import SchedulerCommonTest
7
-
8
-
9
- class KDPM2DiscreteSchedulerTest(SchedulerCommonTest):
10
- scheduler_classes = (KDPM2DiscreteScheduler,)
11
- num_inference_steps = 10
12
-
13
- def get_scheduler_config(self, **kwargs):
14
- config = {
15
- "num_train_timesteps": 1100,
16
- "beta_start": 0.0001,
17
- "beta_end": 0.02,
18
- "beta_schedule": "linear",
19
- }
20
-
21
- config.update(**kwargs)
22
- return config
23
-
24
- def test_timesteps(self):
25
- for timesteps in [10, 50, 100, 1000]:
26
- self.check_over_configs(num_train_timesteps=timesteps)
27
-
28
- def test_betas(self):
29
- for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
30
- self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
31
-
32
- def test_schedules(self):
33
- for schedule in ["linear", "scaled_linear"]:
34
- self.check_over_configs(beta_schedule=schedule)
35
-
36
- def test_prediction_type(self):
37
- for prediction_type in ["epsilon", "v_prediction"]:
38
- self.check_over_configs(prediction_type=prediction_type)
39
-
40
- def test_full_loop_with_v_prediction(self):
41
- scheduler_class = self.scheduler_classes[0]
42
- scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
43
- scheduler = scheduler_class(**scheduler_config)
44
-
45
- scheduler.set_timesteps(self.num_inference_steps)
46
-
47
- model = self.dummy_model()
48
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma
49
- sample = sample.to(torch_device)
50
-
51
- for i, t in enumerate(scheduler.timesteps):
52
- sample = scheduler.scale_model_input(sample, t)
53
-
54
- model_output = model(sample, t)
55
-
56
- output = scheduler.step(model_output, t, sample)
57
- sample = output.prev_sample
58
-
59
- result_sum = torch.sum(torch.abs(sample))
60
- result_mean = torch.mean(torch.abs(sample))
61
-
62
- if torch_device in ["cpu", "mps"]:
63
- assert abs(result_sum.item() - 4.6934e-07) < 1e-2
64
- assert abs(result_mean.item() - 6.1112e-10) < 1e-3
65
- else:
66
- # CUDA
67
- assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2
68
- assert abs(result_mean.item() - 0.0002) < 1e-3
69
-
70
- def test_full_loop_no_noise(self):
71
- if torch_device == "mps":
72
- return
73
- scheduler_class = self.scheduler_classes[0]
74
- scheduler_config = self.get_scheduler_config()
75
- scheduler = scheduler_class(**scheduler_config)
76
-
77
- scheduler.set_timesteps(self.num_inference_steps)
78
-
79
- model = self.dummy_model()
80
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma
81
- sample = sample.to(torch_device)
82
-
83
- for i, t in enumerate(scheduler.timesteps):
84
- sample = scheduler.scale_model_input(sample, t)
85
-
86
- model_output = model(sample, t)
87
-
88
- output = scheduler.step(model_output, t, sample)
89
- sample = output.prev_sample
90
-
91
- result_sum = torch.sum(torch.abs(sample))
92
- result_mean = torch.mean(torch.abs(sample))
93
-
94
- if torch_device in ["cpu", "mps"]:
95
- assert abs(result_sum.item() - 20.4125) < 1e-2
96
- assert abs(result_mean.item() - 0.0266) < 1e-3
97
- else:
98
- # CUDA
99
- assert abs(result_sum.item() - 20.4125) < 1e-2
100
- assert abs(result_mean.item() - 0.0266) < 1e-3
101
-
102
- def test_full_loop_device(self):
103
- if torch_device == "mps":
104
- return
105
- scheduler_class = self.scheduler_classes[0]
106
- scheduler_config = self.get_scheduler_config()
107
- scheduler = scheduler_class(**scheduler_config)
108
-
109
- scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
110
-
111
- model = self.dummy_model()
112
- sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
113
-
114
- for t in scheduler.timesteps:
115
- sample = scheduler.scale_model_input(sample, t)
116
-
117
- model_output = model(sample, t)
118
-
119
- output = scheduler.step(model_output, t, sample)
120
- sample = output.prev_sample
121
-
122
- result_sum = torch.sum(torch.abs(sample))
123
- result_mean = torch.mean(torch.abs(sample))
124
-
125
- if str(torch_device).startswith("cpu"):
126
- # The following sum varies between 148 and 156 on mps. Why?
127
- assert abs(result_sum.item() - 20.4125) < 1e-2
128
- assert abs(result_mean.item() - 0.0266) < 1e-3
129
- else:
130
- # CUDA
131
- assert abs(result_sum.item() - 20.4125) < 1e-2
132
- assert abs(result_mean.item() - 0.0266) < 1e-3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/check_inits.py DELETED
@@ -1,299 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import collections
17
- import importlib.util
18
- import os
19
- import re
20
- from pathlib import Path
21
-
22
-
23
- PATH_TO_TRANSFORMERS = "src/transformers"
24
-
25
-
26
- # Matches is_xxx_available()
27
- _re_backend = re.compile(r"is\_([a-z_]*)_available()")
28
- # Catches a one-line _import_struct = {xxx}
29
- _re_one_line_import_struct = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
30
- # Catches a line with a key-values pattern: "bla": ["foo", "bar"]
31
- _re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
32
- # Catches a line if not is_foo_available
33
- _re_test_backend = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
34
- # Catches a line _import_struct["bla"].append("foo")
35
- _re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
36
- # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
37
- _re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
38
- # Catches a line with an object between quotes and a comma: "MyModel",
39
- _re_quote_object = re.compile('^\s+"([^"]+)",')
40
- # Catches a line with objects between brackets only: ["foo", "bar"],
41
- _re_between_brackets = re.compile("^\s+\[([^\]]+)\]")
42
- # Catches a line with from foo import bar, bla, boo
43
- _re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
44
- # Catches a line with try:
45
- _re_try = re.compile(r"^\s*try:")
46
- # Catches a line with else:
47
- _re_else = re.compile(r"^\s*else:")
48
-
49
-
50
- def find_backend(line):
51
- """Find one (or multiple) backend in a code line of the init."""
52
- if _re_test_backend.search(line) is None:
53
- return None
54
- backends = [b[0] for b in _re_backend.findall(line)]
55
- backends.sort()
56
- return "_and_".join(backends)
57
-
58
-
59
- def parse_init(init_file):
60
- """
61
- Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects
62
- defined
63
- """
64
- with open(init_file, "r", encoding="utf-8", newline="\n") as f:
65
- lines = f.readlines()
66
-
67
- line_index = 0
68
- while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"):
69
- line_index += 1
70
-
71
- # If this is a traditional init, just return.
72
- if line_index >= len(lines):
73
- return None
74
-
75
- # First grab the objects without a specific backend in _import_structure
76
- objects = []
77
- while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
78
- line = lines[line_index]
79
- # If we have everything on a single line, let's deal with it.
80
- if _re_one_line_import_struct.search(line):
81
- content = _re_one_line_import_struct.search(line).groups()[0]
82
- imports = re.findall("\[([^\]]+)\]", content)
83
- for imp in imports:
84
- objects.extend([obj[1:-1] for obj in imp.split(", ")])
85
- line_index += 1
86
- continue
87
- single_line_import_search = _re_import_struct_key_value.search(line)
88
- if single_line_import_search is not None:
89
- imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0]
90
- objects.extend(imports)
91
- elif line.startswith(" " * 8 + '"'):
92
- objects.append(line[9:-3])
93
- line_index += 1
94
-
95
- import_dict_objects = {"none": objects}
96
- # Let's continue with backend-specific objects in _import_structure
97
- while not lines[line_index].startswith("if TYPE_CHECKING"):
98
- # If the line is an if not is_backend_available, we grab all objects associated.
99
- backend = find_backend(lines[line_index])
100
- # Check if the backend declaration is inside a try block:
101
- if _re_try.search(lines[line_index - 1]) is None:
102
- backend = None
103
-
104
- if backend is not None:
105
- line_index += 1
106
-
107
- # Scroll until we hit the else block of try-except-else
108
- while _re_else.search(lines[line_index]) is None:
109
- line_index += 1
110
-
111
- line_index += 1
112
-
113
- objects = []
114
- # Until we unindent, add backend objects to the list
115
- while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
116
- line = lines[line_index]
117
- if _re_import_struct_add_one.search(line) is not None:
118
- objects.append(_re_import_struct_add_one.search(line).groups()[0])
119
- elif _re_import_struct_add_many.search(line) is not None:
120
- imports = _re_import_struct_add_many.search(line).groups()[0].split(", ")
121
- imports = [obj[1:-1] for obj in imports if len(obj) > 0]
122
- objects.extend(imports)
123
- elif _re_between_brackets.search(line) is not None:
124
- imports = _re_between_brackets.search(line).groups()[0].split(", ")
125
- imports = [obj[1:-1] for obj in imports if len(obj) > 0]
126
- objects.extend(imports)
127
- elif _re_quote_object.search(line) is not None:
128
- objects.append(_re_quote_object.search(line).groups()[0])
129
- elif line.startswith(" " * 8 + '"'):
130
- objects.append(line[9:-3])
131
- elif line.startswith(" " * 12 + '"'):
132
- objects.append(line[13:-3])
133
- line_index += 1
134
-
135
- import_dict_objects[backend] = objects
136
- else:
137
- line_index += 1
138
-
139
- # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
140
- objects = []
141
- while (
142
- line_index < len(lines)
143
- and find_backend(lines[line_index]) is None
144
- and not lines[line_index].startswith("else")
145
- ):
146
- line = lines[line_index]
147
- single_line_import_search = _re_import.search(line)
148
- if single_line_import_search is not None:
149
- objects.extend(single_line_import_search.groups()[0].split(", "))
150
- elif line.startswith(" " * 8):
151
- objects.append(line[8:-2])
152
- line_index += 1
153
-
154
- type_hint_objects = {"none": objects}
155
- # Let's continue with backend-specific objects
156
- while line_index < len(lines):
157
- # If the line is an if is_backend_available, we grab all objects associated.
158
- backend = find_backend(lines[line_index])
159
- # Check if the backend declaration is inside a try block:
160
- if _re_try.search(lines[line_index - 1]) is None:
161
- backend = None
162
-
163
- if backend is not None:
164
- line_index += 1
165
-
166
- # Scroll until we hit the else block of try-except-else
167
- while _re_else.search(lines[line_index]) is None:
168
- line_index += 1
169
-
170
- line_index += 1
171
-
172
- objects = []
173
- # Until we unindent, add backend objects to the list
174
- while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
175
- line = lines[line_index]
176
- single_line_import_search = _re_import.search(line)
177
- if single_line_import_search is not None:
178
- objects.extend(single_line_import_search.groups()[0].split(", "))
179
- elif line.startswith(" " * 12):
180
- objects.append(line[12:-2])
181
- line_index += 1
182
-
183
- type_hint_objects[backend] = objects
184
- else:
185
- line_index += 1
186
-
187
- return import_dict_objects, type_hint_objects
188
-
189
-
190
- def analyze_results(import_dict_objects, type_hint_objects):
191
- """
192
- Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
193
- """
194
-
195
- def find_duplicates(seq):
196
- return [k for k, v in collections.Counter(seq).items() if v > 1]
197
-
198
- if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
199
- return ["Both sides of the init do not have the same backends!"]
200
-
201
- errors = []
202
- for key in import_dict_objects.keys():
203
- duplicate_imports = find_duplicates(import_dict_objects[key])
204
- if duplicate_imports:
205
- errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
206
- duplicate_type_hints = find_duplicates(type_hint_objects[key])
207
- if duplicate_type_hints:
208
- errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
209
-
210
- if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
211
- name = "base imports" if key == "none" else f"{key} backend"
212
- errors.append(f"Differences for {name}:")
213
- for a in type_hint_objects[key]:
214
- if a not in import_dict_objects[key]:
215
- errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
216
- for a in import_dict_objects[key]:
217
- if a not in type_hint_objects[key]:
218
- errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
219
- return errors
220
-
221
-
222
- def check_all_inits():
223
- """
224
- Check all inits in the transformers repo and raise an error if at least one does not define the same objects in
225
- both halves.
226
- """
227
- failures = []
228
- for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
229
- if "__init__.py" in files:
230
- fname = os.path.join(root, "__init__.py")
231
- objects = parse_init(fname)
232
- if objects is not None:
233
- errors = analyze_results(*objects)
234
- if len(errors) > 0:
235
- errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
236
- failures.append("\n".join(errors))
237
- if len(failures) > 0:
238
- raise ValueError("\n\n".join(failures))
239
-
240
-
241
- def get_transformers_submodules():
242
- """
243
- Returns the list of Transformers submodules.
244
- """
245
- submodules = []
246
- for path, directories, files in os.walk(PATH_TO_TRANSFORMERS):
247
- for folder in directories:
248
- # Ignore private modules
249
- if folder.startswith("_"):
250
- directories.remove(folder)
251
- continue
252
- # Ignore leftovers from branches (empty folders apart from pycache)
253
- if len(list((Path(path) / folder).glob("*.py"))) == 0:
254
- continue
255
- short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS))
256
- submodule = short_path.replace(os.path.sep, ".")
257
- submodules.append(submodule)
258
- for fname in files:
259
- if fname == "__init__.py":
260
- continue
261
- short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS))
262
- submodule = short_path.replace(".py", "").replace(os.path.sep, ".")
263
- if len(submodule.split(".")) == 1:
264
- submodules.append(submodule)
265
- return submodules
266
-
267
-
268
- IGNORE_SUBMODULES = [
269
- "convert_pytorch_checkpoint_to_tf2",
270
- "modeling_flax_pytorch_utils",
271
- ]
272
-
273
-
274
- def check_submodules():
275
- # This is to make sure the transformers module imported is the one in the repo.
276
- spec = importlib.util.spec_from_file_location(
277
- "transformers",
278
- os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
279
- submodule_search_locations=[PATH_TO_TRANSFORMERS],
280
- )
281
- transformers = spec.loader.load_module()
282
-
283
- module_not_registered = [
284
- module
285
- for module in get_transformers_submodules()
286
- if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
287
- ]
288
- if len(module_not_registered) > 0:
289
- list_of_modules = "\n".join(f"- {module}" for module in module_not_registered)
290
- raise ValueError(
291
- "The following submodules are not properly registered in the main init of Transformers:\n"
292
- f"{list_of_modules}\n"
293
- "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value."
294
- )
295
-
296
-
297
- if __name__ == "__main__":
298
- check_all_inits()
299
- check_submodules()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py DELETED
@@ -1,28 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
2
- img_norm_cfg = dict(
3
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4
- train_pipeline = [
5
- dict(type='LoadImageFromFile'),
6
- dict(
7
- type='InstaBoost',
8
- action_candidate=('normal', 'horizontal', 'skip'),
9
- action_prob=(1, 0, 0),
10
- scale=(0.8, 1.2),
11
- dx=15,
12
- dy=15,
13
- theta=(-1, 1),
14
- color_prob=0.5,
15
- hflag=False,
16
- aug_ratio=0.5),
17
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
18
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
19
- dict(type='RandomFlip', flip_ratio=0.5),
20
- dict(type='Normalize', **img_norm_cfg),
21
- dict(type='Pad', size_divisor=32),
22
- dict(type='DefaultFormatBundle'),
23
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
24
- ]
25
- data = dict(train=dict(pipeline=train_pipeline))
26
- # learning policy
27
- lr_config = dict(step=[32, 44])
28
- runner = dict(type='EpochBasedRunner', max_epochs=48)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r50_fpn_20e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './scnet_r50_fpn_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 19])
4
- runner = dict(type='EpochBasedRunner', max_epochs=20)
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/README.md DELETED
@@ -1,75 +0,0 @@
1
- # Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @inproceedings{deeplabv3plus2018,
9
- title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation},
10
- author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam},
11
- booktitle={ECCV},
12
- year={2018}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- Note:
19
- `D-8`/`D-16` here corresponding to the output stride 8/16 setting for DeepLab series.
20
- `MG-124` stands for multi-grid dilation in the last stage of ResNet.
21
-
22
- ### Cityscapes
23
-
24
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
25
- | ---------- | --------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
26
- | DeepLabV3+ | R-50-D8 | 512x1024 | 40000 | 7.5 | 3.94 | 79.61 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610.log.json) |
27
- | DeepLabV3+ | R-101-D8 | 512x1024 | 40000 | 11 | 2.60 | 80.21 | 81.82 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614.log.json) |
28
- | DeepLabV3+ | R-50-D8 | 769x769 | 40000 | 8.5 | 1.72 | 78.97 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143.log.json) |
29
- | DeepLabV3+ | R-101-D8 | 769x769 | 40000 | 12.5 | 1.15 | 79.46 | 80.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304.log.json) |
30
- | DeepLabV3+ | R-18-D8 | 512x1024 | 80000 | 2.2 | 14.27 | 76.89 | 78.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes-20201226_080942.log.json) |
31
- | DeepLabV3+ | R-50-D8 | 512x1024 | 80000 | - | - | 80.09 | 81.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049.log.json) |
32
- | DeepLabV3+ | R-101-D8 | 512x1024 | 80000 | - | - | 80.97 | 82.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143.log.json) |
33
- | DeepLabV3+ | R-18-D8 | 769x769 | 80000 | 2.5 | 5.74 | 76.26 | 77.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes_20201226_083346-f326e06a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes-20201226_083346.log.json) |
34
- | DeepLabV3+ | R-50-D8 | 769x769 | 80000 | - | - | 79.83 | 81.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233.log.json) |
35
- | DeepLabV3+ | R-101-D8 | 769x769 | 80000 | - | - | 80.98 | 82.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405.log.json) |
36
- | DeepLabV3+ | R-101-D16-MG124 | 512x1024 | 40000 | 5.8 | 7.48 | 79.09 | 80.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes-20200908_005644.log.json) |
37
- | DeepLabV3+ | R-101-D16-MG124 | 512x1024 | 80000 | 9.9 | - | 79.90 | 81.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-ee6158e0.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes-20200908_005644.log.json) |
38
- | DeepLabV3+ | R-18b-D8 | 512x1024 | 80000 | 2.1 | 14.95 | 75.87 | 77.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes_20201226_090828-e451abd9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes-20201226_090828.log.json) |
39
- | DeepLabV3+ | R-50b-D8 | 512x1024 | 80000 | 7.4 | 3.94 | 80.28 | 81.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes_20201225_213645-a97e4e43.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes-20201225_213645.log.json) |
40
- | DeepLabV3+ | R-101b-D8 | 512x1024 | 80000 | 10.9 | 2.60 | 80.16 | 81.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes_20201226_190843-9c3c93a4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes-20201226_190843.log.json) |
41
- | DeepLabV3+ | R-18b-D8 | 769x769 | 80000 | 2.4 | 5.96 | 76.36 | 78.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes_20201226_151312-2c868aff.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes-20201226_151312.log.json) |
42
- | DeepLabV3+ | R-50b-D8 | 769x769 | 80000 | 8.4 | 1.72 | 79.41 | 80.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes_20201225_224655-8b596d1c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes-20201225_224655.log.json) |
43
- | DeepLabV3+ | R-101b-D8 | 769x769 | 80000 | 12.3 | 1.10 | 79.88 | 81.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes_20201226_205041-227cdf7c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes-20201226_205041.log.json) |
44
-
45
- ### ADE20K
46
-
47
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
48
- | ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
49
- | DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 10.6 | 21.01 | 42.72 | 43.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) |
50
- | DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 14.1 | 14.16 | 44.60 | 46.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139.log.json) |
51
- | DeepLabV3+ | R-50-D8 | 512x512 | 160000 | - | - | 43.95 | 44.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504.log.json) |
52
- | DeepLabV3+ | R-101-D8 | 512x512 | 160000 | - | - | 45.47 | 46.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232.log.json) |
53
-
54
- #### Pascal VOC 2012 + Aug
55
-
56
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
57
- | ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
58
- | DeepLabV3+ | R-50-D8 | 512x512 | 20000 | 7.6 | 21 | 75.93 | 77.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323.log.json) |
59
- | DeepLabV3+ | R-101-D8 | 512x512 | 20000 | 11 | 13.88 | 77.22 | 78.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345.log.json) |
60
- | DeepLabV3+ | R-50-D8 | 512x512 | 40000 | - | - | 76.81 | 77.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759.log.json) |
61
- | DeepLabV3+ | R-101-D8 | 512x512 | 40000 | - | - | 78.62 | 79.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333.log.json) |
62
-
63
- #### Pascal Context
64
-
65
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
66
- | ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
67
- | DeepLabV3+ | R-101-D8 | 480x480 | 40000 | - | 9.09 | 47.30 | 48.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context_20200911_165459-d3c8a29e.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context-20200911_165459.log.json) |
68
- | DeepLabV3+ | R-101-D8 | 480x480 | 80000 | - | - | 47.23 | 48.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context_20200911_155322-145d3ee8.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context-20200911_155322.log.json) |
69
-
70
- #### Pascal Context 59
71
-
72
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
73
- | ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
74
- | DeepLabV3+ | R-101-D8 | 480x480 | 40000 | - | - | 52.86 | 54.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59_20210416_111233-ed937f15.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59-20210416_111233.log.json) |
75
- | DeepLabV3+ | R-101-D8 | 480x480 | 80000 | - | - | 53.2 | 54.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59_20210416_111127-7ca0331d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59-20210416_111127.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Annelisseishere/Streamlit_GPT/app.py DELETED
@@ -1,142 +0,0 @@
1
- from dotenv import load_dotenv
2
- import os
3
- import streamlit as st
4
- from PyPDF2 import PdfFileReader
5
- from langchain.text_splitter import CharacterTextSplitter
6
- from langchain.embeddings.openai import OpenAIEmbeddings
7
- from langchain.vectorstores import FAISS
8
- from langchain.chains.question_answering import load_qa_chain
9
- from langchain.llms import OpenAI as LLMSOpenAI
10
- from langchain.llms import AzureOpenAI
11
- from langchain.callbacks import get_openai_callback
12
- from langchain.chat_models import ChatOpenAI
13
- from docx import Document
14
- from openpyxl import load_workbook
15
- import pdfplumber
16
-
17
-
18
- def extract_text_from_pdf(pdf_file):
19
- with pdfplumber.open(pdf_file) as pdf:
20
- text = ""
21
- for page in pdf.pages:
22
- text += page.extract_text()
23
- return text
24
-
25
-
26
- def extract_text_from_docx(docx_file):
27
- doc = Document(docx_file)
28
- paragraphs = [paragraph.text for paragraph in doc.paragraphs]
29
- return "\n".join(paragraphs)
30
-
31
-
32
- def extract_text_from_excel(excel_file):
33
- workbook = load_workbook(excel_file)
34
- text = ""
35
- for sheet in workbook.sheetnames:
36
- worksheet = workbook[sheet]
37
- for row in worksheet.iter_rows():
38
- for cell in row:
39
- if cell.value:
40
- text += str(cell.value) + "\n"
41
- return text
42
-
43
-
44
- def split_text_into_chunks(text):
45
- text_splitter = CharacterTextSplitter(
46
- separator="\n",
47
- chunk_size=1000,
48
- chunk_overlap=200,
49
- length_function=len
50
- )
51
- return text_splitter.split_text(text)
52
-
53
-
54
- def create_knowledge_base(chunks, api_key=None):
55
- embeddings = OpenAIEmbeddings(openai_api_key=api_key)
56
- knowledge_base = FAISS.from_texts(chunks, embeddings)
57
- return knowledge_base
58
-
59
-
60
- def answer_question(question, knowledge_base, model):
61
- docs = knowledge_base.similarity_search(question)
62
- llm = model(model_name="gpt-3.5-turbo", openai_api_key=st.session_state.api_key)
63
- chain = load_qa_chain(llm, chain_type="stuff")
64
- with get_openai_callback() as cb:
65
- response = chain.run(input_documents=docs, question=question)
66
- return response
67
-
68
-
69
- def save_api_key(api_key):
70
- st.session_state.api_key = api_key
71
-
72
-
73
- def main():
74
- load_dotenv()
75
- st.set_page_config(page_title="Ask Your PDF", layout="wide")
76
-
77
- # Sidebar
78
- st.sidebar.title("Settings")
79
-
80
- # API Key input
81
- st.sidebar.subheader("API Key")
82
- api_key = st.sidebar.text_input("Insert your API Key", type="password")
83
- st.sidebar.button("Save API Key", on_click=save_api_key, args=(api_key,))
84
-
85
- model_type = st.sidebar.selectbox("Select Language Model", ["OpenAI", "AzureOpenAI"])
86
- if model_type == "AzureOpenAI":
87
- model = AzureOpenAI
88
- else:
89
- model = ChatOpenAI
90
-
91
- chunk_size = st.sidebar.slider("Chunk Size", min_value=500, max_value=2000, value=1000, step=100)
92
- chunk_overlap = st.sidebar.slider("Chunk Overlap", min_value=100, max_value=500, value=200, step=50)
93
- show_content = st.sidebar.checkbox("Show Document Content")
94
- show_answers = st.sidebar.checkbox("Show Previous Answers")
95
-
96
- # Main content
97
- st.title("Ask Your Document 💭")
98
- file_format = st.selectbox("Select File Format", ["PDF", "docx", "xlsx"])
99
- document = st.file_uploader("Upload Document", type=[file_format.lower()])
100
-
101
- if not hasattr(st.session_state, "api_key") or not st.session_state.api_key:
102
- st.warning("You need to insert your API Key first.")
103
- elif document is not None:
104
- if file_format == "PDF":
105
- text = extract_text_from_pdf(document)
106
- elif file_format == "docx":
107
- text = extract_text_from_docx(document)
108
- elif file_format == "xlsx":
109
- text = extract_text_from_excel(document)
110
- else:
111
- text = ""
112
-
113
- if show_content:
114
- st.subheader("Document Text:")
115
- st.text_area("Content", value=text, height=300)
116
-
117
- chunks = split_text_into_chunks(text)
118
- knowledge_base = create_knowledge_base(chunks, api_key=st.session_state.api_key)
119
-
120
- user_question = st.text_input("Ask a question based on the document content:")
121
-
122
- if user_question:
123
- response = answer_question(user_question, knowledge_base, model)
124
- st.subheader("Answer:")
125
- st.write(response)
126
-
127
- # Store and display previous answers
128
- if "answers" not in st.session_state:
129
- st.session_state.answers = []
130
- st.session_state.answers.append((user_question, response))
131
-
132
- if show_answers:
133
- st.subheader("Previous Answers:")
134
- for question, answer in st.session_state.answers:
135
- st.write(f"Question: {question}")
136
- st.write(f"Answer: {answer}")
137
- st.write("------")
138
-
139
-
140
- if __name__ == '__main__':
141
- main()
142
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/dataset.py DELETED
@@ -1,48 +0,0 @@
1
- import fastai
2
- from fastai import *
3
- from fastai.core import *
4
- from fastai.vision.transform import get_transforms
5
- from fastai.vision.data import ImageImageList, ImageDataBunch, imagenet_stats
6
- from .augs import noisify
7
-
8
-
9
- def get_colorize_data(
10
- sz: int,
11
- bs: int,
12
- crappy_path: Path,
13
- good_path: Path,
14
- random_seed: int = None,
15
- keep_pct: float = 1.0,
16
- num_workers: int = 8,
17
- stats: tuple = imagenet_stats,
18
- xtra_tfms=[],
19
- ) -> ImageDataBunch:
20
-
21
- src = (
22
- ImageImageList.from_folder(crappy_path, convert_mode='RGB')
23
- .use_partial_data(sample_pct=keep_pct, seed=random_seed)
24
- .split_by_rand_pct(0.1, seed=random_seed)
25
- )
26
-
27
- data = (
28
- src.label_from_func(lambda x: good_path / x.relative_to(crappy_path))
29
- .transform(
30
- get_transforms(
31
- max_zoom=1.2, max_lighting=0.5, max_warp=0.25, xtra_tfms=xtra_tfms
32
- ),
33
- size=sz,
34
- tfm_y=True,
35
- )
36
- .databunch(bs=bs, num_workers=num_workers, no_check=True)
37
- .normalize(stats, do_y=True)
38
- )
39
-
40
- data.c = 3
41
- return data
42
-
43
-
44
- def get_dummy_databunch() -> ImageDataBunch:
45
- path = Path('./assets/dummy/')
46
- return get_colorize_data(
47
- sz=1, bs=1, crappy_path=path, good_path=path, keep_pct=0.001
48
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arijit-hazra/my-image-captioner/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: My Image Captioner
3
- emoji: 👀
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ashwanthram/myGenVoiceBot/app.py DELETED
@@ -1,164 +0,0 @@
1
- import os
2
- import re
3
- import requests
4
- import json
5
- import gradio as gr
6
- from langchain.chat_models import ChatOpenAI
7
- from langchain import LLMChain, PromptTemplate
8
- from langchain.memory import ConversationBufferMemory
9
-
10
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
11
- PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY')
12
- PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID')
13
-
14
- PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID')
15
- play_ht_api_get_audio_url = "https://play.ht/api/v2/tts"
16
-
17
-
18
- template = """You are a helpful assistant to answer user queries.
19
- {chat_history}
20
- User: {user_message}
21
- Chatbot:"""
22
-
23
- prompt = PromptTemplate(
24
- input_variables=["chat_history", "user_message"], template=template
25
- )
26
-
27
- memory = ConversationBufferMemory(memory_key="chat_history")
28
-
29
- llm_chain = LLMChain(
30
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
31
- prompt=prompt,
32
- verbose=True,
33
- memory=memory,
34
- )
35
-
36
- headers = {
37
- "accept": "text/event-stream",
38
- "content-type": "application/json",
39
- "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY,
40
- "X-USER-ID": PLAY_HT_USER_ID
41
- }
42
-
43
-
44
- def get_payload(text):
45
- return {
46
- "text": text,
47
- "voice": PLAY_HT_VOICE_ID,
48
- "quality": "medium",
49
- "output_format": "mp3",
50
- "speed": 1,
51
- "sample_rate": 24000,
52
- "seed": None,
53
- "temperature": None
54
- }
55
-
56
- def get_generated_audio(text):
57
- payload = get_payload(text)
58
- generated_response = {}
59
- try:
60
- response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers)
61
- response.raise_for_status()
62
- generated_response["type"]= 'SUCCESS'
63
- generated_response["response"] = response.text
64
- except requests.exceptions.RequestException as e:
65
- generated_response["type"]= 'ERROR'
66
- try:
67
- response_text = json.loads(response.text)
68
- if response_text['error_message']:
69
- generated_response["response"] = response_text['error_message']
70
- else:
71
- generated_response["response"] = response.text
72
- except Exception as e:
73
- generated_response["response"] = response.text
74
- except Exception as e:
75
- generated_response["type"]= 'ERROR'
76
- generated_response["response"] = response.text
77
- return generated_response
78
-
79
- def extract_urls(text):
80
- # Define the regex pattern for URLs
81
- url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*'
82
-
83
- # Find all occurrences of URLs in the text
84
- urls = re.findall(url_pattern, text)
85
-
86
- return urls
87
-
88
- def get_audio_reply_for_question(text):
89
- generated_audio_event = get_generated_audio(text)
90
- #From get_generated_audio, you will get events in a string format, from that we need to extract the url
91
- final_response = {
92
- "audio_url": '',
93
- "message": ''
94
- }
95
- if generated_audio_event["type"] == 'SUCCESS':
96
- audio_urls = extract_urls(generated_audio_event["response"])
97
- if len(audio_urls) == 0:
98
- final_response['message'] = "No audio file link found in generated event"
99
- else:
100
- final_response['audio_url'] = audio_urls[-1]
101
- else:
102
- final_response['message'] = generated_audio_event['response']
103
- return final_response
104
-
105
- def download_url(url):
106
- try:
107
- # Send a GET request to the URL to fetch the content
108
- final_response = {
109
- 'content':'',
110
- 'error':''
111
- }
112
- response = requests.get(url)
113
- # Check if the request was successful (status code 200)
114
- if response.status_code == 200:
115
- final_response['content'] = response.content
116
- else:
117
- final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}"
118
- except Exception as e:
119
- final_response['error'] = f"Failed to download the URL. Error: {e}"
120
- return final_response
121
-
122
- def get_filename_from_url(url):
123
- # Use os.path.basename() to extract the file name from the URL
124
- file_name = os.path.basename(url)
125
- return file_name
126
-
127
- def get_text_response(user_message):
128
- response = llm_chain.predict(user_message = user_message)
129
- return response
130
-
131
- def get_text_response_and_audio_response(user_message):
132
- response = get_text_response(user_message) # Getting the reply from Open AI
133
- audio_reply_for_question_response = get_audio_reply_for_question(response)
134
- final_response = {
135
- 'output_file_path': '',
136
- 'message':''
137
- }
138
- audio_url = audio_reply_for_question_response['audio_url']
139
- if audio_url:
140
- output_file_path=get_filename_from_url(audio_url)
141
- download_url_response = download_url(audio_url)
142
- audio_content = download_url_response['content']
143
- if audio_content:
144
- with open(output_file_path, "wb") as audio_file:
145
- audio_file.write(audio_content)
146
- final_response['output_file_path'] = output_file_path
147
- else:
148
- final_response['message'] = download_url_response['error']
149
- else:
150
- final_response['message'] = audio_reply_for_question_response['message']
151
- return final_response
152
-
153
- def chat_bot_response(message, history):
154
- text_and_audio_response = get_text_response_and_audio_response(message)
155
- output_file_path = text_and_audio_response['output_file_path']
156
- if output_file_path:
157
- return (text_and_audio_response['output_file_path'],)
158
- else:
159
- return text_and_audio_response['message']
160
-
161
- demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"])
162
-
163
- if __name__ == "__main__":
164
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langhebrewmodel.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/unicode.py DELETED
@@ -1,352 +0,0 @@
1
- # unicode.py
2
-
3
- import sys
4
- from itertools import filterfalse
5
- from typing import List, Tuple, Union
6
-
7
-
8
- class _lazyclassproperty:
9
- def __init__(self, fn):
10
- self.fn = fn
11
- self.__doc__ = fn.__doc__
12
- self.__name__ = fn.__name__
13
-
14
- def __get__(self, obj, cls):
15
- if cls is None:
16
- cls = type(obj)
17
- if not hasattr(cls, "_intern") or any(
18
- cls._intern is getattr(superclass, "_intern", [])
19
- for superclass in cls.__mro__[1:]
20
- ):
21
- cls._intern = {}
22
- attrname = self.fn.__name__
23
- if attrname not in cls._intern:
24
- cls._intern[attrname] = self.fn(cls)
25
- return cls._intern[attrname]
26
-
27
-
28
- UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
29
-
30
-
31
- class unicode_set:
32
- """
33
- A set of Unicode characters, for language-specific strings for
34
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
35
- A unicode_set is defined by a list of ranges in the Unicode character
36
- set, in a class attribute ``_ranges``. Ranges can be specified using
37
- 2-tuples or a 1-tuple, such as::
38
-
39
- _ranges = [
40
- (0x0020, 0x007e),
41
- (0x00a0, 0x00ff),
42
- (0x0100,),
43
- ]
44
-
45
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
46
-
47
- A unicode set can also be defined using multiple inheritance of other unicode sets::
48
-
49
- class CJK(Chinese, Japanese, Korean):
50
- pass
51
- """
52
-
53
- _ranges: UnicodeRangeList = []
54
-
55
- @_lazyclassproperty
56
- def _chars_for_ranges(cls):
57
- ret = []
58
- for cc in cls.__mro__:
59
- if cc is unicode_set:
60
- break
61
- for rr in getattr(cc, "_ranges", ()):
62
- ret.extend(range(rr[0], rr[-1] + 1))
63
- return [chr(c) for c in sorted(set(ret))]
64
-
65
- @_lazyclassproperty
66
- def printables(cls):
67
- "all non-whitespace characters in this range"
68
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
69
-
70
- @_lazyclassproperty
71
- def alphas(cls):
72
- "all alphabetic characters in this range"
73
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
74
-
75
- @_lazyclassproperty
76
- def nums(cls):
77
- "all numeric digit characters in this range"
78
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
79
-
80
- @_lazyclassproperty
81
- def alphanums(cls):
82
- "all alphanumeric characters in this range"
83
- return cls.alphas + cls.nums
84
-
85
- @_lazyclassproperty
86
- def identchars(cls):
87
- "all characters in this range that are valid identifier characters, plus underscore '_'"
88
- return "".join(
89
- sorted(
90
- set(
91
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
92
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
93
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
94
- + "_"
95
- )
96
- )
97
- )
98
-
99
- @_lazyclassproperty
100
- def identbodychars(cls):
101
- """
102
- all characters in this range that are valid identifier body characters,
103
- plus the digits 0-9
104
- """
105
- return "".join(
106
- sorted(
107
- set(
108
- cls.identchars
109
- + "0123456789"
110
- + "".join(
111
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
112
- )
113
- )
114
- )
115
- )
116
-
117
-
118
- class pyparsing_unicode(unicode_set):
119
- """
120
- A namespace class for defining common language unicode_sets.
121
- """
122
-
123
- # fmt: off
124
-
125
- # define ranges in language character sets
126
- _ranges: UnicodeRangeList = [
127
- (0x0020, sys.maxunicode),
128
- ]
129
-
130
- class BasicMultilingualPlane(unicode_set):
131
- "Unicode set for the Basic Multilingual Plane"
132
- _ranges: UnicodeRangeList = [
133
- (0x0020, 0xFFFF),
134
- ]
135
-
136
- class Latin1(unicode_set):
137
- "Unicode set for Latin-1 Unicode Character Range"
138
- _ranges: UnicodeRangeList = [
139
- (0x0020, 0x007E),
140
- (0x00A0, 0x00FF),
141
- ]
142
-
143
- class LatinA(unicode_set):
144
- "Unicode set for Latin-A Unicode Character Range"
145
- _ranges: UnicodeRangeList = [
146
- (0x0100, 0x017F),
147
- ]
148
-
149
- class LatinB(unicode_set):
150
- "Unicode set for Latin-B Unicode Character Range"
151
- _ranges: UnicodeRangeList = [
152
- (0x0180, 0x024F),
153
- ]
154
-
155
- class Greek(unicode_set):
156
- "Unicode set for Greek Unicode Character Ranges"
157
- _ranges: UnicodeRangeList = [
158
- (0x0342, 0x0345),
159
- (0x0370, 0x0377),
160
- (0x037A, 0x037F),
161
- (0x0384, 0x038A),
162
- (0x038C,),
163
- (0x038E, 0x03A1),
164
- (0x03A3, 0x03E1),
165
- (0x03F0, 0x03FF),
166
- (0x1D26, 0x1D2A),
167
- (0x1D5E,),
168
- (0x1D60,),
169
- (0x1D66, 0x1D6A),
170
- (0x1F00, 0x1F15),
171
- (0x1F18, 0x1F1D),
172
- (0x1F20, 0x1F45),
173
- (0x1F48, 0x1F4D),
174
- (0x1F50, 0x1F57),
175
- (0x1F59,),
176
- (0x1F5B,),
177
- (0x1F5D,),
178
- (0x1F5F, 0x1F7D),
179
- (0x1F80, 0x1FB4),
180
- (0x1FB6, 0x1FC4),
181
- (0x1FC6, 0x1FD3),
182
- (0x1FD6, 0x1FDB),
183
- (0x1FDD, 0x1FEF),
184
- (0x1FF2, 0x1FF4),
185
- (0x1FF6, 0x1FFE),
186
- (0x2129,),
187
- (0x2719, 0x271A),
188
- (0xAB65,),
189
- (0x10140, 0x1018D),
190
- (0x101A0,),
191
- (0x1D200, 0x1D245),
192
- (0x1F7A1, 0x1F7A7),
193
- ]
194
-
195
- class Cyrillic(unicode_set):
196
- "Unicode set for Cyrillic Unicode Character Range"
197
- _ranges: UnicodeRangeList = [
198
- (0x0400, 0x052F),
199
- (0x1C80, 0x1C88),
200
- (0x1D2B,),
201
- (0x1D78,),
202
- (0x2DE0, 0x2DFF),
203
- (0xA640, 0xA672),
204
- (0xA674, 0xA69F),
205
- (0xFE2E, 0xFE2F),
206
- ]
207
-
208
- class Chinese(unicode_set):
209
- "Unicode set for Chinese Unicode Character Range"
210
- _ranges: UnicodeRangeList = [
211
- (0x2E80, 0x2E99),
212
- (0x2E9B, 0x2EF3),
213
- (0x31C0, 0x31E3),
214
- (0x3400, 0x4DB5),
215
- (0x4E00, 0x9FEF),
216
- (0xA700, 0xA707),
217
- (0xF900, 0xFA6D),
218
- (0xFA70, 0xFAD9),
219
- (0x16FE2, 0x16FE3),
220
- (0x1F210, 0x1F212),
221
- (0x1F214, 0x1F23B),
222
- (0x1F240, 0x1F248),
223
- (0x20000, 0x2A6D6),
224
- (0x2A700, 0x2B734),
225
- (0x2B740, 0x2B81D),
226
- (0x2B820, 0x2CEA1),
227
- (0x2CEB0, 0x2EBE0),
228
- (0x2F800, 0x2FA1D),
229
- ]
230
-
231
- class Japanese(unicode_set):
232
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
233
- _ranges: UnicodeRangeList = []
234
-
235
- class Kanji(unicode_set):
236
- "Unicode set for Kanji Unicode Character Range"
237
- _ranges: UnicodeRangeList = [
238
- (0x4E00, 0x9FBF),
239
- (0x3000, 0x303F),
240
- ]
241
-
242
- class Hiragana(unicode_set):
243
- "Unicode set for Hiragana Unicode Character Range"
244
- _ranges: UnicodeRangeList = [
245
- (0x3041, 0x3096),
246
- (0x3099, 0x30A0),
247
- (0x30FC,),
248
- (0xFF70,),
249
- (0x1B001,),
250
- (0x1B150, 0x1B152),
251
- (0x1F200,),
252
- ]
253
-
254
- class Katakana(unicode_set):
255
- "Unicode set for Katakana Unicode Character Range"
256
- _ranges: UnicodeRangeList = [
257
- (0x3099, 0x309C),
258
- (0x30A0, 0x30FF),
259
- (0x31F0, 0x31FF),
260
- (0x32D0, 0x32FE),
261
- (0xFF65, 0xFF9F),
262
- (0x1B000,),
263
- (0x1B164, 0x1B167),
264
- (0x1F201, 0x1F202),
265
- (0x1F213,),
266
- ]
267
-
268
- class Hangul(unicode_set):
269
- "Unicode set for Hangul (Korean) Unicode Character Range"
270
- _ranges: UnicodeRangeList = [
271
- (0x1100, 0x11FF),
272
- (0x302E, 0x302F),
273
- (0x3131, 0x318E),
274
- (0x3200, 0x321C),
275
- (0x3260, 0x327B),
276
- (0x327E,),
277
- (0xA960, 0xA97C),
278
- (0xAC00, 0xD7A3),
279
- (0xD7B0, 0xD7C6),
280
- (0xD7CB, 0xD7FB),
281
- (0xFFA0, 0xFFBE),
282
- (0xFFC2, 0xFFC7),
283
- (0xFFCA, 0xFFCF),
284
- (0xFFD2, 0xFFD7),
285
- (0xFFDA, 0xFFDC),
286
- ]
287
-
288
- Korean = Hangul
289
-
290
- class CJK(Chinese, Japanese, Hangul):
291
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
292
-
293
- class Thai(unicode_set):
294
- "Unicode set for Thai Unicode Character Range"
295
- _ranges: UnicodeRangeList = [
296
- (0x0E01, 0x0E3A),
297
- (0x0E3F, 0x0E5B)
298
- ]
299
-
300
- class Arabic(unicode_set):
301
- "Unicode set for Arabic Unicode Character Range"
302
- _ranges: UnicodeRangeList = [
303
- (0x0600, 0x061B),
304
- (0x061E, 0x06FF),
305
- (0x0700, 0x077F),
306
- ]
307
-
308
- class Hebrew(unicode_set):
309
- "Unicode set for Hebrew Unicode Character Range"
310
- _ranges: UnicodeRangeList = [
311
- (0x0591, 0x05C7),
312
- (0x05D0, 0x05EA),
313
- (0x05EF, 0x05F4),
314
- (0xFB1D, 0xFB36),
315
- (0xFB38, 0xFB3C),
316
- (0xFB3E,),
317
- (0xFB40, 0xFB41),
318
- (0xFB43, 0xFB44),
319
- (0xFB46, 0xFB4F),
320
- ]
321
-
322
- class Devanagari(unicode_set):
323
- "Unicode set for Devanagari Unicode Character Range"
324
- _ranges: UnicodeRangeList = [
325
- (0x0900, 0x097F),
326
- (0xA8E0, 0xA8FF)
327
- ]
328
-
329
- # fmt: on
330
-
331
-
332
- pyparsing_unicode.Japanese._ranges = (
333
- pyparsing_unicode.Japanese.Kanji._ranges
334
- + pyparsing_unicode.Japanese.Hiragana._ranges
335
- + pyparsing_unicode.Japanese.Katakana._ranges
336
- )
337
-
338
- pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
339
-
340
- # add language identifiers using language Unicode
341
- pyparsing_unicode.العربية = pyparsing_unicode.Arabic
342
- pyparsing_unicode.中文 = pyparsing_unicode.Chinese
343
- pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
344
- pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
345
- pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
346
- pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
347
- pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
348
- pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
349
- pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
350
- pyparsing_unicode.한국어 = pyparsing_unicode.Korean
351
- pyparsing_unicode.ไทย = pyparsing_unicode.Thai
352
- pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/unicode_utils.py DELETED
@@ -1,42 +0,0 @@
1
- import unicodedata
2
- import sys
3
-
4
-
5
- # HFS Plus uses decomposed UTF-8
6
- def decompose(path):
7
- if isinstance(path, str):
8
- return unicodedata.normalize('NFD', path)
9
- try:
10
- path = path.decode('utf-8')
11
- path = unicodedata.normalize('NFD', path)
12
- path = path.encode('utf-8')
13
- except UnicodeError:
14
- pass # Not UTF-8
15
- return path
16
-
17
-
18
- def filesys_decode(path):
19
- """
20
- Ensure that the given path is decoded,
21
- NONE when no expected encoding works
22
- """
23
-
24
- if isinstance(path, str):
25
- return path
26
-
27
- fs_enc = sys.getfilesystemencoding() or 'utf-8'
28
- candidates = fs_enc, 'utf-8'
29
-
30
- for enc in candidates:
31
- try:
32
- return path.decode(enc)
33
- except UnicodeDecodeError:
34
- continue
35
-
36
-
37
- def try_encode(string, enc):
38
- "turn unicode encoding into a functional routine"
39
- try:
40
- return string.encode(enc)
41
- except UnicodeEncodeError:
42
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/gradio_examples.py DELETED
@@ -1,16 +0,0 @@
1
- from pathlib import Path
2
-
3
- CURR_DIR = Path(__file__).resolve().parent
4
-
5
- EXAMPLES_DIR = CURR_DIR / "examples"
6
-
7
- EXAMPLES = [
8
- [EXAMPLES_DIR / "acoustic_guitar.wav", "acoustic guitar"],
9
- [EXAMPLES_DIR / "laughing.wav", "laughing"],
10
- [
11
- EXAMPLES_DIR / "ticktok_piano.wav",
12
- "A ticktock sound playing at the same rhythm with piano.",
13
- ],
14
- [EXAMPLES_DIR / "water_drops.wav", "water drops"],
15
- [EXAMPLES_DIR / "noisy_speech.wav", "speech"],
16
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/op/fused_act.py DELETED
@@ -1,127 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from torch.autograd import Function
7
- from torch.utils.cpp_extension import load
8
-
9
-
10
- module_path = os.path.dirname(__file__)
11
- fused = load(
12
- "fused",
13
- sources=[
14
- os.path.join(module_path, "fused_bias_act.cpp"),
15
- os.path.join(module_path, "fused_bias_act_kernel.cu"),
16
- ],
17
- )
18
-
19
-
20
- class FusedLeakyReLUFunctionBackward(Function):
21
- @staticmethod
22
- def forward(ctx, grad_output, out, bias, negative_slope, scale):
23
- ctx.save_for_backward(out)
24
- ctx.negative_slope = negative_slope
25
- ctx.scale = scale
26
-
27
- empty = grad_output.new_empty(0)
28
-
29
- grad_input = fused.fused_bias_act(
30
- grad_output.contiguous(), empty, out, 3, 1, negative_slope, scale
31
- )
32
-
33
- dim = [0]
34
-
35
- if grad_input.ndim > 2:
36
- dim += list(range(2, grad_input.ndim))
37
-
38
- if bias:
39
- grad_bias = grad_input.sum(dim).detach()
40
-
41
- else:
42
- grad_bias = empty
43
-
44
- return grad_input, grad_bias
45
-
46
- @staticmethod
47
- def backward(ctx, gradgrad_input, gradgrad_bias):
48
- out, = ctx.saved_tensors
49
- gradgrad_out = fused.fused_bias_act(
50
- gradgrad_input.contiguous(),
51
- gradgrad_bias,
52
- out,
53
- 3,
54
- 1,
55
- ctx.negative_slope,
56
- ctx.scale,
57
- )
58
-
59
- return gradgrad_out, None, None, None, None
60
-
61
-
62
- class FusedLeakyReLUFunction(Function):
63
- @staticmethod
64
- def forward(ctx, input, bias, negative_slope, scale):
65
- empty = input.new_empty(0)
66
-
67
- ctx.bias = bias is not None
68
-
69
- if bias is None:
70
- bias = empty
71
-
72
- out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
73
- ctx.save_for_backward(out)
74
- ctx.negative_slope = negative_slope
75
- ctx.scale = scale
76
-
77
- return out
78
-
79
- @staticmethod
80
- def backward(ctx, grad_output):
81
- out, = ctx.saved_tensors
82
-
83
- grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
84
- grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale
85
- )
86
-
87
- if not ctx.bias:
88
- grad_bias = None
89
-
90
- return grad_input, grad_bias, None, None
91
-
92
-
93
- class FusedLeakyReLU(nn.Module):
94
- def __init__(self, channel, bias=True, negative_slope=0.2, scale=2 ** 0.5):
95
- super().__init__()
96
-
97
- if bias:
98
- self.bias = nn.Parameter(torch.zeros(channel))
99
-
100
- else:
101
- self.bias = None
102
-
103
- self.negative_slope = negative_slope
104
- self.scale = scale
105
-
106
- def forward(self, input):
107
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
108
-
109
-
110
- def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
111
- if input.device.type == "cpu":
112
- if bias is not None:
113
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
114
- return (
115
- F.leaky_relu(
116
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
117
- )
118
- * scale
119
- )
120
-
121
- else:
122
- return F.leaky_relu(input, negative_slope=0.2) * scale
123
-
124
- else:
125
- return FusedLeakyReLUFunction.apply(
126
- input.contiguous(), bias, negative_slope, scale
127
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BaddaAshok0265/AshokGenAI/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """ Hello, meet Ashok Badda, your youthful and witty personal assistant! At 20 years old, He's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_537238KB.py DELETED
@@ -1,123 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn.functional as F
4
- from torch import nn
5
-
6
- from . import layers_537238KB as layers
7
-
8
-
9
- class BaseASPPNet(nn.Module):
10
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
11
- super(BaseASPPNet, self).__init__()
12
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
13
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
14
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
15
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
16
-
17
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
18
-
19
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
20
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
21
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
22
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
23
-
24
- def __call__(self, x):
25
- h, e1 = self.enc1(x)
26
- h, e2 = self.enc2(h)
27
- h, e3 = self.enc3(h)
28
- h, e4 = self.enc4(h)
29
-
30
- h = self.aspp(h)
31
-
32
- h = self.dec4(h, e4)
33
- h = self.dec3(h, e3)
34
- h = self.dec2(h, e2)
35
- h = self.dec1(h, e1)
36
-
37
- return h
38
-
39
-
40
- class CascadedASPPNet(nn.Module):
41
- def __init__(self, n_fft):
42
- super(CascadedASPPNet, self).__init__()
43
- self.stg1_low_band_net = BaseASPPNet(2, 64)
44
- self.stg1_high_band_net = BaseASPPNet(2, 64)
45
-
46
- self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
47
- self.stg2_full_band_net = BaseASPPNet(32, 64)
48
-
49
- self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
50
- self.stg3_full_band_net = BaseASPPNet(64, 128)
51
-
52
- self.out = nn.Conv2d(128, 2, 1, bias=False)
53
- self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
54
- self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
55
-
56
- self.max_bin = n_fft // 2
57
- self.output_bin = n_fft // 2 + 1
58
-
59
- self.offset = 128
60
-
61
- def forward(self, x, aggressiveness=None):
62
- mix = x.detach()
63
- x = x.clone()
64
-
65
- x = x[:, :, : self.max_bin]
66
-
67
- bandw = x.size()[2] // 2
68
- aux1 = torch.cat(
69
- [
70
- self.stg1_low_band_net(x[:, :, :bandw]),
71
- self.stg1_high_band_net(x[:, :, bandw:]),
72
- ],
73
- dim=2,
74
- )
75
-
76
- h = torch.cat([x, aux1], dim=1)
77
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
78
-
79
- h = torch.cat([x, aux1, aux2], dim=1)
80
- h = self.stg3_full_band_net(self.stg3_bridge(h))
81
-
82
- mask = torch.sigmoid(self.out(h))
83
- mask = F.pad(
84
- input=mask,
85
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
86
- mode="replicate",
87
- )
88
-
89
- if self.training:
90
- aux1 = torch.sigmoid(self.aux1_out(aux1))
91
- aux1 = F.pad(
92
- input=aux1,
93
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
94
- mode="replicate",
95
- )
96
- aux2 = torch.sigmoid(self.aux2_out(aux2))
97
- aux2 = F.pad(
98
- input=aux2,
99
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
100
- mode="replicate",
101
- )
102
- return mask * mix, aux1 * mix, aux2 * mix
103
- else:
104
- if aggressiveness:
105
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
106
- mask[:, :, : aggressiveness["split_bin"]],
107
- 1 + aggressiveness["value"] / 3,
108
- )
109
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
110
- mask[:, :, aggressiveness["split_bin"] :],
111
- 1 + aggressiveness["value"],
112
- )
113
-
114
- return mask * mix
115
-
116
- def predict(self, x_mag, aggressiveness=None):
117
- h = self.forward(x_mag, aggressiveness)
118
-
119
- if self.offset > 0:
120
- h = h[:, :, :, self.offset : -self.offset]
121
- assert h.size()[3] > 0
122
-
123
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Candy Crush Soda Saga No Download.md DELETED
@@ -1,145 +0,0 @@
1
-
2
- <h1>Candy Crush Soda Saga: Un juego de puzzle dulce y efervescente</h1>
3
- <p>Si eres un fan de los juegos de puzzle de match-3, probablemente hayas escuchado o jugado a Candy Crush Saga, uno de los juegos más populares y adictivos de este género. Pero ¿sabías que hay una secuela de este juego que ofrece aún más diversión y desafíos? Se llama Candy Crush Soda Saga, y es un juego que puedes jugar online gratis en tu PC o dispositivo móvil, sin descargar nada. En este artículo, te contaremos todo lo que necesitas saber sobre Candy Crush Soda Saga, cómo jugarlo online y cuáles son algunas de las alternativas a este juego. </p>
4
- <h2>¿Qué es Candy Crush Soda Saga? </h2>
5
- <p>Candy Crush Soda Saga es un juego de puzzle desarrollado por King, la misma compañía que creó Candy Crush Saga, Diamond Digger Saga, Farm Heroes Saga y muchos otros juegos populares. Fue lanzado en 2014 como un spin-off de Candy Crush Saga, y desde entonces se ha convertido en uno de los juegos más jugados en Facebook, Android, iOS, Windows Phone y Windows 10. </p>
6
- <h2>candy crush soda saga no download</h2><br /><p><b><b>Download File</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://bltlly.com/2v6JBZ">https://bltlly.com/2v6JBZ</a></b></p><br /><br />
7
- <h3>La secuela de la popular Candy Crush Saga</h3>
8
- <p>Candy Crush Soda Saga es una secuela de Candy Crush Saga, lo que significa que sigue el mismo juego básico de combinar tres o más caramelos del mismo color para eliminarlos del tablero y completar varios objetivos. Sin embargo, Candy Crush Soda Saga también introduce algunos nuevos elementos y giros que lo hacen diferente de su predecesor. Por ejemplo, en Candy Crush Soda Saga, encontrarás nuevos tipos de dulces, como botellas de refresco, caramelos de pescado, caramelos para colorear, caramelos de panal y dulces de mermelada. También explorarás nuevos mundos y niveles con diferentes temas y orígenes, como lagos de soda, nubes de algodón de azúcar, islas de glaseado, jardines de miel y fábricas de mermelada. También conocerás nuevos personajes y amigos a lo largo de tu viaje, como Kimmy, la hermana de Tiffi que está buscando a su hermano perdido. </p>
9
- <h3>El juego y las características de Candy Crush Soda Saga</h3>
10
-
11
- <ul>
12
- <li>Más de 10000 niveles de Sodalicious que pondrán a prueba tus habilidades y estrategia. </li>
13
- <li>Temporadas mensuales durante todo el año, llenas de misiones desafiantes y un pase de temporada alimentado por recompensas.</li>
14
- <li>Modos de juego burbujeando con diversión y dulces únicos: <ul>
15
- <li>Soda - Cambiar las botellas y hacer coincidir los caramelos para liberar la soda púrpura y guardar los Candy Bears.</li>
16
- <li>Glaseado - Partido de dulces para romper el hielo y establecer los osos de caramelo libre. </li>
17
- <li>Honeycomb - Coincidir con los dulces al lado de nido de abeja para liberar los osos de caramelo atrapados.</li>
18
- <li>Jam - Difundir la mermelada en todo el tablero. </li>
19
- </ul>
20
- </li>
21
- <li>Dulces únicos y deliciosas nuevas combinaciones a juego: <ul>
22
- <li>Partido 4 dulces en un cuadrado para hacer un pescado sueco! </li>
23
- <li>Partido 7 caramelos para el todo nuevo caramelo para colorear! </li>
24
- </ul>
25
- </li>
26
- <li>¡Explora mundos y niveles jugosos con aún más personajes! </li>
27
- <h3>Los beneficios de jugar Candy Crush Soda Saga en línea</h3>
28
- <p>Una de las mejores cosas acerca de Candy Crush Soda Saga es que se puede jugar en línea de forma gratuita en su PC o dispositivo móvil, sin descargar nada. Esto significa que puede disfrutar del juego en cualquier momento y en cualquier lugar, siempre y cuando tenga una conexión a Internet. Jugar a Candy Crush Soda Saga en línea también tiene algunos otros beneficios, como:</p>
29
- <ul>
30
- <li>Puedes sincronizar el progreso del juego en todos tus dispositivos, para que puedas continuar donde lo dejaste. </li>
31
- <li>Puedes conectarte con tus amigos de Facebook y ver sus puntajes y progreso en las tablas de clasificación. </li>
32
- <li>Puedes enviar y recibir vidas y refuerzos de tus amigos para ayudarse mutuamente. </li>
33
- <li>Puedes unirte a un equipo o crear el tuyo propio y chatear con otros jugadores. </li>
34
- <li>Puedes participar en eventos especiales y desafíos y ganar recompensas exclusivas. </li>
35
- </ul>
36
- <h2>Cómo jugar Candy Crush Soda Saga en línea gratis en PC y móvil</h2>
37
-
38
- <h3>El sitio web oficial de King.com</h3>
39
- <p>El sitio web oficial de King.com es el mejor lugar para jugar Candy Crush Soda Saga en línea, ya que es la fuente oficial del juego. Puede acceder al sitio web desde cualquier navegador de su PC o dispositivo móvil, y puede jugar el juego en modo de pantalla completa. También puedes iniciar sesión con tu cuenta de Facebook o crear una cuenta de King para sincronizar tu progreso y acceder a todas las funciones del juego. Para jugar Candy Crush Soda Saga online en King.com, sigue estos pasos:</p>
40
- <ol>
41
- <li>Visite <a href=">https://king.com/game/candycrushsoda</a> desde su navegador. </li>
42
- <li>Haga clic en el botón "Jugar ahora" para iniciar el juego. </li>
43
- <li>Si desea iniciar sesión con su cuenta de Facebook o crear una cuenta de King, haga clic en el botón "Conectar" en la esquina superior derecha de la pantalla. </li>
44
- <li>Disfruta jugando Candy Crush Soda Saga en línea! </li>
45
- </ol>
46
- <h3>Las plataformas de juego en línea de Y8.com, ahora.gg, y Games.lol</h3>
47
- <p>Si quieres jugar Candy Crush Soda Saga en línea en otros sitios web, también puedes probar algunas de las plataformas de juegos en línea que ofrecen el juego, como Y8.com, now.gg y Games.lol. Estos sitios web le permiten jugar Candy Crush Soda Saga en línea sin iniciar sesión o crear una cuenta, pero pueden no tener todas las características y actualizaciones del sitio web oficial. Para jugar Candy Crush Soda Saga en línea en estos sitios web, siga estos pasos:</p>
48
- <ol>
49
- <li>Visite uno de estos sitios web desde su navegador: <ul>
50
- <li><a href=">https://www.y8.com/games/candy_crush_soda_saga</a></li>
51
- <li><a href=">https://www.now.gg/play/candy-crush-soda-saga</a></li>
52
- <li><a href="">https://games.lol/candy-crush-soda-saga/</a></li>
53
- </ul>
54
- </li>
55
- <li>Haga clic en el botón "Play" para iniciar el juego. </li>
56
- <li>Disfruta jugando Candy Crush Soda Saga en línea! </li>
57
- </ol>
58
- <h3>Los consejos y trucos para dominar Candy Crush Soda Saga</h3>
59
-
60
- <ul>
61
- <li>Presta atención al objetivo de cada nivel y planifica tus movimientos en consecuencia. </li>
62
- <li>Combina dulces cerca de la parte inferior del tablero para crear cascadas y limpiar más dulces. </li>
63
- <li>Usa dulces especiales y potenciadores sabiamente y guárdalos para niveles difíciles. </li>
64
- <li>Aprende a hacer diferentes combinaciones de dulces especiales, como rayas + envuelto, rayas + pescado, envuelto + pescado, colorante + pescado, etc.</li>
65
- <li>Sepa cómo tratar con diferentes tipos de bloqueadores, como chocolate, regaliz, hielo, panal, etc.</li>
66
- <li>Mantén un ojo en el nivel de soda y trata de llenarlo o bajarlo dependiendo del modo. </li>
67
- <li>No malgastes movimientos y trata de obtener tantas estrellas como sea posible. </li>
68
- arriba. Siempre puedes reproducir los niveles o pedir ayuda a tus amigos. </li>
69
- </ul>
70
- <h2>¿Cuáles son las alternativas a Candy Crush Soda Saga? </h2>
71
- <p>Candy Crush Soda Saga es un gran juego, pero no es el único de su tipo. Si quieres probar otros juegos similares a Candy Crush Soda Saga, tienes muchas opciones para elegir. Estas son algunas de las alternativas a Candy Crush Soda Saga que te pueden gustar:</p>
72
- <h3>Los otros juegos de la franquicia Candy Crush</h3>
73
- <p>Si te gusta Candy Crush Soda Saga, también te pueden encantar los otros juegos de la misma franquicia, como:</p>
74
- <p></p>
75
- <ul>
76
- <li>Candy Crush Saga: El original y clásico juego de puzzle match-3 que comenzó todo. </li>
77
- <li>Candy Crush Jelly Saga: La tercera entrega de la franquicia, donde tienes que untar jalea y competir con la Jelly Queen.</li>
78
- <li>Candy Crush Friends Saga: La cuarta y última entrega de la franquicia, donde tienes que combinar dulces y recoger a tus amigos. </li>
79
- </ul>
80
- <p>Todos estos juegos son gratis para jugar en línea en King.com o en sus dispositivos móviles, y tienen un juego similar y características como Candy Crush Soda Saga, pero con diferentes giros y desafíos. </p>
81
- <h3>Los juegos de rompecabezas de match-3 similares de otros desarrolladores</h3>
82
-
83
- <ul>
84
- <li>Bejeweled: El clásico y original juego de puzzle match-3 que inspiró a muchos otros. </li>
85
- <li>Cookie Jam: Un juego de puzzle de partido 3 delicioso y colorido donde tienes que hornear galletas y pasteles. </li>
86
- <li>Gummy Drop: Un dulce y aventurero juego de puzzle match-3 donde tienes que viajar alrededor del mundo y reconstruir puntos de referencia. </li>
87
- <li>Homescapes: Un relajante y divertido juego de puzzle match-3 donde tienes que renovar una mansión y ayudar a una familia. </li>
88
- <li>Toon Blast: Un juego de rompecabezas de dibujos animados y explosivos match-3 donde tienes que destruir cubos y crear combos. </li>
89
- </ul>
90
- <p>Todos estos juegos son gratis para jugar en línea en varios sitios web o en sus dispositivos móviles, y tienen un juego similar y características como Candy Crush Soda Saga, pero con diferentes temas e historias. </p>
91
- <h3>Los pros y los contras de jugar alternativas a Candy Crush Soda Saga</h3>
92
- <p>Jugar alternativas a Candy Crush Soda Saga puede ser una buena manera de darle vida a tu experiencia de juego y probar algo nuevo. Sin embargo, también hay algunos pros y contras de jugar alternativas a Candy Crush Soda Saga, como:</p>
93
- <tabla>
94
- <tr><th>Pros</th><th>Contras</th></tr>
95
- <tr><td>Puedes descubrir nuevos juegos y géneros que puedes disfrutar. </td><td>Puedes confundirte o sentirte abrumado por demasiadas opciones. </td></tr>
96
- <tr><td>Puedes comparar y contrastar diferentes juegos y encontrar tu favorito. </td><td>Usted puede perder interés o motivación en jugar Candy Crush Soda Saga.</td></tr>
97
- <tr><td>Puede desafiarse a sí mismo con diferentes niveles y modos. </td><td>Puede encontrar algunos juegos demasiado fáciles o demasiado difíciles para su gusto. </td></tr>
98
- <tr><td>Puedes tener más diversión y variedad en tu tiempo de juego. </td><td>Puedes gastar demasiado tiempo o dinero en juegos. </td></tr>
99
- </tabla>
100
- <h2>Conclusión</h2>
101
-
102
- <h3>Preguntas frecuentes</h3>
103
- <p>Aquí están algunas de las preguntas más frecuentes sobre Candy Crush Soda Saga:</p>
104
- <ol>
105
- <li>¿Cómo puedo obtener más vidas en Candy Crush Soda Saga? </li>
106
- <p>Puedes obtener más vidas en Candy Crush Soda Saga haciendo una de las siguientes: <ul>
107
- <li>Espera 30 minutos para que cada vida se regenere automáticamente. </li>
108
- <li>Comprar más vidas con barras de oro, la moneda premium del juego. </li>
109
- <li>Cambiar la configuración de fecha y hora en su dispositivo para engañar al juego para que le dé más vidas. </li>
110
- </ul>
111
- </p>
112
- <li>¿Cómo puedo obtener más barras de oro en Candy Crush Soda Saga? </li>
113
- <p>Puede obtener más barras de oro en Candy Crush Soda Saga haciendo una de las siguientes: <ul>
114
- <li>Completa las misiones y desafíos diarios y gana recompensas. </li>
115
- <li>Subir de nivel su pase de temporada y desbloquear barras de oro y otras ventajas. </li>
116
- <li>Únete a un equipo o crea tu propio equipo y gana eventos y competiciones. </li>
117
- <li>Conecte su juego a su cuenta de Facebook y obtener barras de oro gratis. </li>
118
- <li>Comprar más barras de oro con dinero real a través de compras en la aplicación. </li>
119
- </ul>
120
- </p>
121
- <li>¿Cómo puedo obtener más potenciadores en Candy Crush Soda Saga? </li>
122
- <p>Puede obtener más potenciadores en Candy Crush Soda Saga haciendo uno de los siguientes: <ul>
123
- <li> Girar la rueda de refuerzo diario y ganar un refuerzo al azar todos los días. </li>
124
- <li>Juega el evento de Bubblegum Hill y gana boosters y otros premios. </li>
125
- <li>Recoger estrellas y llenar el medidor Star Chaser para obtener boosters gratis. </li>
126
- <li>Ver anuncios de vídeo y obtener refuerzos gratis. </li>
127
- <li>Comprar más potenciadores con barras de oro o dinero real a través de compras en la aplicación. </li>
128
- </ul>
129
- </p>
130
- <li>¿Cómo puedo desbloquear nuevos episodios en Candy Crush Soda Saga? </li>
131
- <p>Puede desbloquear nuevos episodios en Candy Crush Soda Saga haciendo uno de los siguientes: <ul>
132
- <li>Completa todos los niveles en el episodio anterior. </li>
133
- <li>Pídele a tus amigos o miembros del equipo de Facebook que te envíen entradas. </li>
134
-
135
- </ul>
136
- </p>
137
- <li>¿Cómo puedo contactar al equipo de soporte de Candy Crush Soda Saga? </li>
138
- <p>Puede ponerse en contacto con el equipo de soporte de Candy Crush Soda Saga haciendo una de las siguientes: <ul>
139
- <li>Visite el sitio web oficial de King.com y haga clic en el botón "Contáctenos" en la parte inferior de la página. </li>
140
- <li>Visita la página oficial de Facebook de Candy Crush Soda Saga y envía un mensaje a la página. </li>
141
- <li>Visite el foro oficial de King.com y publique su pregunta o problema en la sección correspondiente. </li>
142
- </ul>
143
- </p> 64aa2da5cf<br />
144
- <br />
145
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BertChristiaens/blip-diffusion/README.md DELETED
@@ -1,8 +0,0 @@
1
- ---
2
- license: openrail
3
- title: Blip Diffusion
4
- sdk: streamlit
5
- emoji: 🚀
6
- colorFrom: yellow
7
- colorTo: green
8
- ---
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/bcdoc/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- __version__ = '0.16.0'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py DELETED
@@ -1,547 +0,0 @@
1
- import collections
2
- import itertools
3
- import operator
4
-
5
- from .providers import AbstractResolver
6
- from .structs import DirectedGraph, IteratorMapping, build_iter_view
7
-
8
- RequirementInformation = collections.namedtuple(
9
- "RequirementInformation", ["requirement", "parent"]
10
- )
11
-
12
-
13
- class ResolverException(Exception):
14
- """A base class for all exceptions raised by this module.
15
-
16
- Exceptions derived by this class should all be handled in this module. Any
17
- bubbling pass the resolver should be treated as a bug.
18
- """
19
-
20
-
21
- class RequirementsConflicted(ResolverException):
22
- def __init__(self, criterion):
23
- super(RequirementsConflicted, self).__init__(criterion)
24
- self.criterion = criterion
25
-
26
- def __str__(self):
27
- return "Requirements conflict: {}".format(
28
- ", ".join(repr(r) for r in self.criterion.iter_requirement()),
29
- )
30
-
31
-
32
- class InconsistentCandidate(ResolverException):
33
- def __init__(self, candidate, criterion):
34
- super(InconsistentCandidate, self).__init__(candidate, criterion)
35
- self.candidate = candidate
36
- self.criterion = criterion
37
-
38
- def __str__(self):
39
- return "Provided candidate {!r} does not satisfy {}".format(
40
- self.candidate,
41
- ", ".join(repr(r) for r in self.criterion.iter_requirement()),
42
- )
43
-
44
-
45
- class Criterion(object):
46
- """Representation of possible resolution results of a package.
47
-
48
- This holds three attributes:
49
-
50
- * `information` is a collection of `RequirementInformation` pairs.
51
- Each pair is a requirement contributing to this criterion, and the
52
- candidate that provides the requirement.
53
- * `incompatibilities` is a collection of all known not-to-work candidates
54
- to exclude from consideration.
55
- * `candidates` is a collection containing all possible candidates deducted
56
- from the union of contributing requirements and known incompatibilities.
57
- It should never be empty, except when the criterion is an attribute of a
58
- raised `RequirementsConflicted` (in which case it is always empty).
59
-
60
- .. note::
61
- This class is intended to be externally immutable. **Do not** mutate
62
- any of its attribute containers.
63
- """
64
-
65
- def __init__(self, candidates, information, incompatibilities):
66
- self.candidates = candidates
67
- self.information = information
68
- self.incompatibilities = incompatibilities
69
-
70
- def __repr__(self):
71
- requirements = ", ".join(
72
- "({!r}, via={!r})".format(req, parent)
73
- for req, parent in self.information
74
- )
75
- return "Criterion({})".format(requirements)
76
-
77
- def iter_requirement(self):
78
- return (i.requirement for i in self.information)
79
-
80
- def iter_parent(self):
81
- return (i.parent for i in self.information)
82
-
83
-
84
- class ResolutionError(ResolverException):
85
- pass
86
-
87
-
88
- class ResolutionImpossible(ResolutionError):
89
- def __init__(self, causes):
90
- super(ResolutionImpossible, self).__init__(causes)
91
- # causes is a list of RequirementInformation objects
92
- self.causes = causes
93
-
94
-
95
- class ResolutionTooDeep(ResolutionError):
96
- def __init__(self, round_count):
97
- super(ResolutionTooDeep, self).__init__(round_count)
98
- self.round_count = round_count
99
-
100
-
101
- # Resolution state in a round.
102
- State = collections.namedtuple("State", "mapping criteria backtrack_causes")
103
-
104
-
105
- class Resolution(object):
106
- """Stateful resolution object.
107
-
108
- This is designed as a one-off object that holds information to kick start
109
- the resolution process, and holds the results afterwards.
110
- """
111
-
112
- def __init__(self, provider, reporter):
113
- self._p = provider
114
- self._r = reporter
115
- self._states = []
116
-
117
- @property
118
- def state(self):
119
- try:
120
- return self._states[-1]
121
- except IndexError:
122
- raise AttributeError("state")
123
-
124
- def _push_new_state(self):
125
- """Push a new state into history.
126
-
127
- This new state will be used to hold resolution results of the next
128
- coming round.
129
- """
130
- base = self._states[-1]
131
- state = State(
132
- mapping=base.mapping.copy(),
133
- criteria=base.criteria.copy(),
134
- backtrack_causes=base.backtrack_causes[:],
135
- )
136
- self._states.append(state)
137
-
138
- def _add_to_criteria(self, criteria, requirement, parent):
139
- self._r.adding_requirement(requirement=requirement, parent=parent)
140
-
141
- identifier = self._p.identify(requirement_or_candidate=requirement)
142
- criterion = criteria.get(identifier)
143
- if criterion:
144
- incompatibilities = list(criterion.incompatibilities)
145
- else:
146
- incompatibilities = []
147
-
148
- matches = self._p.find_matches(
149
- identifier=identifier,
150
- requirements=IteratorMapping(
151
- criteria,
152
- operator.methodcaller("iter_requirement"),
153
- {identifier: [requirement]},
154
- ),
155
- incompatibilities=IteratorMapping(
156
- criteria,
157
- operator.attrgetter("incompatibilities"),
158
- {identifier: incompatibilities},
159
- ),
160
- )
161
-
162
- if criterion:
163
- information = list(criterion.information)
164
- information.append(RequirementInformation(requirement, parent))
165
- else:
166
- information = [RequirementInformation(requirement, parent)]
167
-
168
- criterion = Criterion(
169
- candidates=build_iter_view(matches),
170
- information=information,
171
- incompatibilities=incompatibilities,
172
- )
173
- if not criterion.candidates:
174
- raise RequirementsConflicted(criterion)
175
- criteria[identifier] = criterion
176
-
177
- def _remove_information_from_criteria(self, criteria, parents):
178
- """Remove information from parents of criteria.
179
-
180
- Concretely, removes all values from each criterion's ``information``
181
- field that have one of ``parents`` as provider of the requirement.
182
-
183
- :param criteria: The criteria to update.
184
- :param parents: Identifiers for which to remove information from all criteria.
185
- """
186
- if not parents:
187
- return
188
- for key, criterion in criteria.items():
189
- criteria[key] = Criterion(
190
- criterion.candidates,
191
- [
192
- information
193
- for information in criterion.information
194
- if (
195
- information.parent is None
196
- or self._p.identify(information.parent) not in parents
197
- )
198
- ],
199
- criterion.incompatibilities,
200
- )
201
-
202
- def _get_preference(self, name):
203
- return self._p.get_preference(
204
- identifier=name,
205
- resolutions=self.state.mapping,
206
- candidates=IteratorMapping(
207
- self.state.criteria,
208
- operator.attrgetter("candidates"),
209
- ),
210
- information=IteratorMapping(
211
- self.state.criteria,
212
- operator.attrgetter("information"),
213
- ),
214
- backtrack_causes=self.state.backtrack_causes,
215
- )
216
-
217
- def _is_current_pin_satisfying(self, name, criterion):
218
- try:
219
- current_pin = self.state.mapping[name]
220
- except KeyError:
221
- return False
222
- return all(
223
- self._p.is_satisfied_by(requirement=r, candidate=current_pin)
224
- for r in criterion.iter_requirement()
225
- )
226
-
227
- def _get_updated_criteria(self, candidate):
228
- criteria = self.state.criteria.copy()
229
- for requirement in self._p.get_dependencies(candidate=candidate):
230
- self._add_to_criteria(criteria, requirement, parent=candidate)
231
- return criteria
232
-
233
- def _attempt_to_pin_criterion(self, name):
234
- criterion = self.state.criteria[name]
235
-
236
- causes = []
237
- for candidate in criterion.candidates:
238
- try:
239
- criteria = self._get_updated_criteria(candidate)
240
- except RequirementsConflicted as e:
241
- self._r.rejecting_candidate(e.criterion, candidate)
242
- causes.append(e.criterion)
243
- continue
244
-
245
- # Check the newly-pinned candidate actually works. This should
246
- # always pass under normal circumstances, but in the case of a
247
- # faulty provider, we will raise an error to notify the implementer
248
- # to fix find_matches() and/or is_satisfied_by().
249
- satisfied = all(
250
- self._p.is_satisfied_by(requirement=r, candidate=candidate)
251
- for r in criterion.iter_requirement()
252
- )
253
- if not satisfied:
254
- raise InconsistentCandidate(candidate, criterion)
255
-
256
- self._r.pinning(candidate=candidate)
257
- self.state.criteria.update(criteria)
258
-
259
- # Put newly-pinned candidate at the end. This is essential because
260
- # backtracking looks at this mapping to get the last pin.
261
- self.state.mapping.pop(name, None)
262
- self.state.mapping[name] = candidate
263
-
264
- return []
265
-
266
- # All candidates tried, nothing works. This criterion is a dead
267
- # end, signal for backtracking.
268
- return causes
269
-
270
- def _backjump(self, causes):
271
- """Perform backjumping.
272
-
273
- When we enter here, the stack is like this::
274
-
275
- [ state Z ]
276
- [ state Y ]
277
- [ state X ]
278
- .... earlier states are irrelevant.
279
-
280
- 1. No pins worked for Z, so it does not have a pin.
281
- 2. We want to reset state Y to unpinned, and pin another candidate.
282
- 3. State X holds what state Y was before the pin, but does not
283
- have the incompatibility information gathered in state Y.
284
-
285
- Each iteration of the loop will:
286
-
287
- 1. Identify Z. The incompatibility is not always caused by the latest
288
- state. For example, given three requirements A, B and C, with
289
- dependencies A1, B1 and C1, where A1 and B1 are incompatible: the
290
- last state might be related to C, so we want to discard the
291
- previous state.
292
- 2. Discard Z.
293
- 3. Discard Y but remember its incompatibility information gathered
294
- previously, and the failure we're dealing with right now.
295
- 4. Push a new state Y' based on X, and apply the incompatibility
296
- information from Y to Y'.
297
- 5a. If this causes Y' to conflict, we need to backtrack again. Make Y'
298
- the new Z and go back to step 2.
299
- 5b. If the incompatibilities apply cleanly, end backtracking.
300
- """
301
- incompatible_reqs = itertools.chain(
302
- (c.parent for c in causes if c.parent is not None),
303
- (c.requirement for c in causes),
304
- )
305
- incompatible_deps = {self._p.identify(r) for r in incompatible_reqs}
306
- while len(self._states) >= 3:
307
- # Remove the state that triggered backtracking.
308
- del self._states[-1]
309
-
310
- # Ensure to backtrack to a state that caused the incompatibility
311
- incompatible_state = False
312
- while not incompatible_state:
313
- # Retrieve the last candidate pin and known incompatibilities.
314
- try:
315
- broken_state = self._states.pop()
316
- name, candidate = broken_state.mapping.popitem()
317
- except (IndexError, KeyError):
318
- raise ResolutionImpossible(causes)
319
- current_dependencies = {
320
- self._p.identify(d)
321
- for d in self._p.get_dependencies(candidate)
322
- }
323
- incompatible_state = not current_dependencies.isdisjoint(
324
- incompatible_deps
325
- )
326
-
327
- incompatibilities_from_broken = [
328
- (k, list(v.incompatibilities))
329
- for k, v in broken_state.criteria.items()
330
- ]
331
-
332
- # Also mark the newly known incompatibility.
333
- incompatibilities_from_broken.append((name, [candidate]))
334
-
335
- # Create a new state from the last known-to-work one, and apply
336
- # the previously gathered incompatibility information.
337
- def _patch_criteria():
338
- for k, incompatibilities in incompatibilities_from_broken:
339
- if not incompatibilities:
340
- continue
341
- try:
342
- criterion = self.state.criteria[k]
343
- except KeyError:
344
- continue
345
- matches = self._p.find_matches(
346
- identifier=k,
347
- requirements=IteratorMapping(
348
- self.state.criteria,
349
- operator.methodcaller("iter_requirement"),
350
- ),
351
- incompatibilities=IteratorMapping(
352
- self.state.criteria,
353
- operator.attrgetter("incompatibilities"),
354
- {k: incompatibilities},
355
- ),
356
- )
357
- candidates = build_iter_view(matches)
358
- if not candidates:
359
- return False
360
- incompatibilities.extend(criterion.incompatibilities)
361
- self.state.criteria[k] = Criterion(
362
- candidates=candidates,
363
- information=list(criterion.information),
364
- incompatibilities=incompatibilities,
365
- )
366
- return True
367
-
368
- self._push_new_state()
369
- success = _patch_criteria()
370
-
371
- # It works! Let's work on this new state.
372
- if success:
373
- return True
374
-
375
- # State does not work after applying known incompatibilities.
376
- # Try the still previous state.
377
-
378
- # No way to backtrack anymore.
379
- return False
380
-
381
- def resolve(self, requirements, max_rounds):
382
- if self._states:
383
- raise RuntimeError("already resolved")
384
-
385
- self._r.starting()
386
-
387
- # Initialize the root state.
388
- self._states = [
389
- State(
390
- mapping=collections.OrderedDict(),
391
- criteria={},
392
- backtrack_causes=[],
393
- )
394
- ]
395
- for r in requirements:
396
- try:
397
- self._add_to_criteria(self.state.criteria, r, parent=None)
398
- except RequirementsConflicted as e:
399
- raise ResolutionImpossible(e.criterion.information)
400
-
401
- # The root state is saved as a sentinel so the first ever pin can have
402
- # something to backtrack to if it fails. The root state is basically
403
- # pinning the virtual "root" package in the graph.
404
- self._push_new_state()
405
-
406
- for round_index in range(max_rounds):
407
- self._r.starting_round(index=round_index)
408
-
409
- unsatisfied_names = [
410
- key
411
- for key, criterion in self.state.criteria.items()
412
- if not self._is_current_pin_satisfying(key, criterion)
413
- ]
414
-
415
- # All criteria are accounted for. Nothing more to pin, we are done!
416
- if not unsatisfied_names:
417
- self._r.ending(state=self.state)
418
- return self.state
419
-
420
- # keep track of satisfied names to calculate diff after pinning
421
- satisfied_names = set(self.state.criteria.keys()) - set(
422
- unsatisfied_names
423
- )
424
-
425
- # Choose the most preferred unpinned criterion to try.
426
- name = min(unsatisfied_names, key=self._get_preference)
427
- failure_causes = self._attempt_to_pin_criterion(name)
428
-
429
- if failure_causes:
430
- causes = [i for c in failure_causes for i in c.information]
431
- # Backjump if pinning fails. The backjump process puts us in
432
- # an unpinned state, so we can work on it in the next round.
433
- self._r.resolving_conflicts(causes=causes)
434
- success = self._backjump(causes)
435
- self.state.backtrack_causes[:] = causes
436
-
437
- # Dead ends everywhere. Give up.
438
- if not success:
439
- raise ResolutionImpossible(self.state.backtrack_causes)
440
- else:
441
- # discard as information sources any invalidated names
442
- # (unsatisfied names that were previously satisfied)
443
- newly_unsatisfied_names = {
444
- key
445
- for key, criterion in self.state.criteria.items()
446
- if key in satisfied_names
447
- and not self._is_current_pin_satisfying(key, criterion)
448
- }
449
- self._remove_information_from_criteria(
450
- self.state.criteria, newly_unsatisfied_names
451
- )
452
- # Pinning was successful. Push a new state to do another pin.
453
- self._push_new_state()
454
-
455
- self._r.ending_round(index=round_index, state=self.state)
456
-
457
- raise ResolutionTooDeep(max_rounds)
458
-
459
-
460
- def _has_route_to_root(criteria, key, all_keys, connected):
461
- if key in connected:
462
- return True
463
- if key not in criteria:
464
- return False
465
- for p in criteria[key].iter_parent():
466
- try:
467
- pkey = all_keys[id(p)]
468
- except KeyError:
469
- continue
470
- if pkey in connected:
471
- connected.add(key)
472
- return True
473
- if _has_route_to_root(criteria, pkey, all_keys, connected):
474
- connected.add(key)
475
- return True
476
- return False
477
-
478
-
479
- Result = collections.namedtuple("Result", "mapping graph criteria")
480
-
481
-
482
- def _build_result(state):
483
- mapping = state.mapping
484
- all_keys = {id(v): k for k, v in mapping.items()}
485
- all_keys[id(None)] = None
486
-
487
- graph = DirectedGraph()
488
- graph.add(None) # Sentinel as root dependencies' parent.
489
-
490
- connected = {None}
491
- for key, criterion in state.criteria.items():
492
- if not _has_route_to_root(state.criteria, key, all_keys, connected):
493
- continue
494
- if key not in graph:
495
- graph.add(key)
496
- for p in criterion.iter_parent():
497
- try:
498
- pkey = all_keys[id(p)]
499
- except KeyError:
500
- continue
501
- if pkey not in graph:
502
- graph.add(pkey)
503
- graph.connect(pkey, key)
504
-
505
- return Result(
506
- mapping={k: v for k, v in mapping.items() if k in connected},
507
- graph=graph,
508
- criteria=state.criteria,
509
- )
510
-
511
-
512
- class Resolver(AbstractResolver):
513
- """The thing that performs the actual resolution work."""
514
-
515
- base_exception = ResolverException
516
-
517
- def resolve(self, requirements, max_rounds=100):
518
- """Take a collection of constraints, spit out the resolution result.
519
-
520
- The return value is a representation to the final resolution result. It
521
- is a tuple subclass with three public members:
522
-
523
- * `mapping`: A dict of resolved candidates. Each key is an identifier
524
- of a requirement (as returned by the provider's `identify` method),
525
- and the value is the resolved candidate.
526
- * `graph`: A `DirectedGraph` instance representing the dependency tree.
527
- The vertices are keys of `mapping`, and each edge represents *why*
528
- a particular package is included. A special vertex `None` is
529
- included to represent parents of user-supplied requirements.
530
- * `criteria`: A dict of "criteria" that hold detailed information on
531
- how edges in the graph are derived. Each key is an identifier of a
532
- requirement, and the value is a `Criterion` instance.
533
-
534
- The following exceptions may be raised if a resolution cannot be found:
535
-
536
- * `ResolutionImpossible`: A resolution cannot be found for the given
537
- combination of requirements. The `causes` attribute of the
538
- exception is a list of (requirement, parent), giving the
539
- requirements that could not be satisfied.
540
- * `ResolutionTooDeep`: The dependency tree is too deeply nested and
541
- the resolver gave up. This is usually caused by a circular
542
- dependency, but you can try to resolve this by increasing the
543
- `max_rounds` argument.
544
- """
545
- resolution = Resolution(self.provider, self.reporter)
546
- state = resolution.resolve(requirements, max_rounds=max_rounds)
547
- return _build_result(state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/zipp.py DELETED
@@ -1,329 +0,0 @@
1
- import io
2
- import posixpath
3
- import zipfile
4
- import itertools
5
- import contextlib
6
- import sys
7
- import pathlib
8
-
9
- if sys.version_info < (3, 7):
10
- from collections import OrderedDict
11
- else:
12
- OrderedDict = dict
13
-
14
-
15
- __all__ = ['Path']
16
-
17
-
18
- def _parents(path):
19
- """
20
- Given a path with elements separated by
21
- posixpath.sep, generate all parents of that path.
22
-
23
- >>> list(_parents('b/d'))
24
- ['b']
25
- >>> list(_parents('/b/d/'))
26
- ['/b']
27
- >>> list(_parents('b/d/f/'))
28
- ['b/d', 'b']
29
- >>> list(_parents('b'))
30
- []
31
- >>> list(_parents(''))
32
- []
33
- """
34
- return itertools.islice(_ancestry(path), 1, None)
35
-
36
-
37
- def _ancestry(path):
38
- """
39
- Given a path with elements separated by
40
- posixpath.sep, generate all elements of that path
41
-
42
- >>> list(_ancestry('b/d'))
43
- ['b/d', 'b']
44
- >>> list(_ancestry('/b/d/'))
45
- ['/b/d', '/b']
46
- >>> list(_ancestry('b/d/f/'))
47
- ['b/d/f', 'b/d', 'b']
48
- >>> list(_ancestry('b'))
49
- ['b']
50
- >>> list(_ancestry(''))
51
- []
52
- """
53
- path = path.rstrip(posixpath.sep)
54
- while path and path != posixpath.sep:
55
- yield path
56
- path, tail = posixpath.split(path)
57
-
58
-
59
- _dedupe = OrderedDict.fromkeys
60
- """Deduplicate an iterable in original order"""
61
-
62
-
63
- def _difference(minuend, subtrahend):
64
- """
65
- Return items in minuend not in subtrahend, retaining order
66
- with O(1) lookup.
67
- """
68
- return itertools.filterfalse(set(subtrahend).__contains__, minuend)
69
-
70
-
71
- class CompleteDirs(zipfile.ZipFile):
72
- """
73
- A ZipFile subclass that ensures that implied directories
74
- are always included in the namelist.
75
- """
76
-
77
- @staticmethod
78
- def _implied_dirs(names):
79
- parents = itertools.chain.from_iterable(map(_parents, names))
80
- as_dirs = (p + posixpath.sep for p in parents)
81
- return _dedupe(_difference(as_dirs, names))
82
-
83
- def namelist(self):
84
- names = super(CompleteDirs, self).namelist()
85
- return names + list(self._implied_dirs(names))
86
-
87
- def _name_set(self):
88
- return set(self.namelist())
89
-
90
- def resolve_dir(self, name):
91
- """
92
- If the name represents a directory, return that name
93
- as a directory (with the trailing slash).
94
- """
95
- names = self._name_set()
96
- dirname = name + '/'
97
- dir_match = name not in names and dirname in names
98
- return dirname if dir_match else name
99
-
100
- @classmethod
101
- def make(cls, source):
102
- """
103
- Given a source (filename or zipfile), return an
104
- appropriate CompleteDirs subclass.
105
- """
106
- if isinstance(source, CompleteDirs):
107
- return source
108
-
109
- if not isinstance(source, zipfile.ZipFile):
110
- return cls(_pathlib_compat(source))
111
-
112
- # Only allow for FastLookup when supplied zipfile is read-only
113
- if 'r' not in source.mode:
114
- cls = CompleteDirs
115
-
116
- source.__class__ = cls
117
- return source
118
-
119
-
120
- class FastLookup(CompleteDirs):
121
- """
122
- ZipFile subclass to ensure implicit
123
- dirs exist and are resolved rapidly.
124
- """
125
-
126
- def namelist(self):
127
- with contextlib.suppress(AttributeError):
128
- return self.__names
129
- self.__names = super(FastLookup, self).namelist()
130
- return self.__names
131
-
132
- def _name_set(self):
133
- with contextlib.suppress(AttributeError):
134
- return self.__lookup
135
- self.__lookup = super(FastLookup, self)._name_set()
136
- return self.__lookup
137
-
138
-
139
- def _pathlib_compat(path):
140
- """
141
- For path-like objects, convert to a filename for compatibility
142
- on Python 3.6.1 and earlier.
143
- """
144
- try:
145
- return path.__fspath__()
146
- except AttributeError:
147
- return str(path)
148
-
149
-
150
- class Path:
151
- """
152
- A pathlib-compatible interface for zip files.
153
-
154
- Consider a zip file with this structure::
155
-
156
- .
157
- ├── a.txt
158
- └── b
159
- ├── c.txt
160
- └── d
161
- └── e.txt
162
-
163
- >>> data = io.BytesIO()
164
- >>> zf = zipfile.ZipFile(data, 'w')
165
- >>> zf.writestr('a.txt', 'content of a')
166
- >>> zf.writestr('b/c.txt', 'content of c')
167
- >>> zf.writestr('b/d/e.txt', 'content of e')
168
- >>> zf.filename = 'mem/abcde.zip'
169
-
170
- Path accepts the zipfile object itself or a filename
171
-
172
- >>> root = Path(zf)
173
-
174
- From there, several path operations are available.
175
-
176
- Directory iteration (including the zip file itself):
177
-
178
- >>> a, b = root.iterdir()
179
- >>> a
180
- Path('mem/abcde.zip', 'a.txt')
181
- >>> b
182
- Path('mem/abcde.zip', 'b/')
183
-
184
- name property:
185
-
186
- >>> b.name
187
- 'b'
188
-
189
- join with divide operator:
190
-
191
- >>> c = b / 'c.txt'
192
- >>> c
193
- Path('mem/abcde.zip', 'b/c.txt')
194
- >>> c.name
195
- 'c.txt'
196
-
197
- Read text:
198
-
199
- >>> c.read_text()
200
- 'content of c'
201
-
202
- existence:
203
-
204
- >>> c.exists()
205
- True
206
- >>> (b / 'missing.txt').exists()
207
- False
208
-
209
- Coercion to string:
210
-
211
- >>> import os
212
- >>> str(c).replace(os.sep, posixpath.sep)
213
- 'mem/abcde.zip/b/c.txt'
214
-
215
- At the root, ``name``, ``filename``, and ``parent``
216
- resolve to the zipfile. Note these attributes are not
217
- valid and will raise a ``ValueError`` if the zipfile
218
- has no filename.
219
-
220
- >>> root.name
221
- 'abcde.zip'
222
- >>> str(root.filename).replace(os.sep, posixpath.sep)
223
- 'mem/abcde.zip'
224
- >>> str(root.parent)
225
- 'mem'
226
- """
227
-
228
- __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
229
-
230
- def __init__(self, root, at=""):
231
- """
232
- Construct a Path from a ZipFile or filename.
233
-
234
- Note: When the source is an existing ZipFile object,
235
- its type (__class__) will be mutated to a
236
- specialized type. If the caller wishes to retain the
237
- original type, the caller should either create a
238
- separate ZipFile object or pass a filename.
239
- """
240
- self.root = FastLookup.make(root)
241
- self.at = at
242
-
243
- def open(self, mode='r', *args, pwd=None, **kwargs):
244
- """
245
- Open this entry as text or binary following the semantics
246
- of ``pathlib.Path.open()`` by passing arguments through
247
- to io.TextIOWrapper().
248
- """
249
- if self.is_dir():
250
- raise IsADirectoryError(self)
251
- zip_mode = mode[0]
252
- if not self.exists() and zip_mode == 'r':
253
- raise FileNotFoundError(self)
254
- stream = self.root.open(self.at, zip_mode, pwd=pwd)
255
- if 'b' in mode:
256
- if args or kwargs:
257
- raise ValueError("encoding args invalid for binary operation")
258
- return stream
259
- return io.TextIOWrapper(stream, *args, **kwargs)
260
-
261
- @property
262
- def name(self):
263
- return pathlib.Path(self.at).name or self.filename.name
264
-
265
- @property
266
- def suffix(self):
267
- return pathlib.Path(self.at).suffix or self.filename.suffix
268
-
269
- @property
270
- def suffixes(self):
271
- return pathlib.Path(self.at).suffixes or self.filename.suffixes
272
-
273
- @property
274
- def stem(self):
275
- return pathlib.Path(self.at).stem or self.filename.stem
276
-
277
- @property
278
- def filename(self):
279
- return pathlib.Path(self.root.filename).joinpath(self.at)
280
-
281
- def read_text(self, *args, **kwargs):
282
- with self.open('r', *args, **kwargs) as strm:
283
- return strm.read()
284
-
285
- def read_bytes(self):
286
- with self.open('rb') as strm:
287
- return strm.read()
288
-
289
- def _is_child(self, path):
290
- return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
291
-
292
- def _next(self, at):
293
- return self.__class__(self.root, at)
294
-
295
- def is_dir(self):
296
- return not self.at or self.at.endswith("/")
297
-
298
- def is_file(self):
299
- return self.exists() and not self.is_dir()
300
-
301
- def exists(self):
302
- return self.at in self.root._name_set()
303
-
304
- def iterdir(self):
305
- if not self.is_dir():
306
- raise ValueError("Can't listdir a file")
307
- subs = map(self._next, self.root.namelist())
308
- return filter(self._is_child, subs)
309
-
310
- def __str__(self):
311
- return posixpath.join(self.root.filename, self.at)
312
-
313
- def __repr__(self):
314
- return self.__repr.format(self=self)
315
-
316
- def joinpath(self, *other):
317
- next = posixpath.join(self.at, *map(_pathlib_compat, other))
318
- return self._next(self.root.resolve_dir(next))
319
-
320
- __truediv__ = joinpath
321
-
322
- @property
323
- def parent(self):
324
- if not self.at:
325
- return self.filename.parent
326
- parent_at = posixpath.dirname(self.at.rstrip('/'))
327
- if parent_at:
328
- parent_at += '/'
329
- return self._next(parent_at)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BillBojangeles2000/bart-large-cnn-samsum/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Bart Large Cnn Samsum
3
- emoji: 🏢
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.33.1
8
- app_file: app.py
9
- pinned: false
10
- license: bigcode-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brasd99/TTS-Voice-Cloner/app.py DELETED
@@ -1,101 +0,0 @@
1
- from TTS.api import TTS
2
- from bs4 import BeautifulSoup
3
- import requests
4
- import streamlit as st
5
- import tempfile
6
- import os
7
- import json
8
- import datetime
9
-
10
- with open('config.json', 'r') as f:
11
- config = json.load(f)
12
-
13
- APP_NAME = config['APP_NAME']
14
- APP_LOGO = config['APP_LOGO']
15
- APP_DESCRIPTION = config['APP_DESCRIPTION']
16
- LANGUAGES_URL = config['LANGUAGES_URL']
17
-
18
- def contains_only_ascii(input_string):
19
- return all(ord(char) < 128 for char in input_string)
20
-
21
- def get_iso_languages():
22
- response = requests.get(LANGUAGES_URL)
23
- soup = BeautifulSoup(response.text, 'html.parser')
24
-
25
- p_tags = soup.find_all('p')
26
-
27
- iso_language_dict = {}
28
-
29
- for p_tag in p_tags[1:]: # Skipping the first <p> which contains the header
30
- parts = p_tag.get_text().split()
31
- if len(parts) == 2:
32
- iso_code, language_name = parts
33
- if contains_only_ascii(language_name):
34
- iso_language_dict[language_name] = iso_code
35
-
36
- return iso_language_dict
37
-
38
- def create_temp_file(input_wav):
39
- temp_file = tempfile.NamedTemporaryFile(delete=False)
40
- temp_file.write(input_wav.read())
41
- return temp_file
42
-
43
- def remove_temp_file(temp_file):
44
- temp_file.close()
45
- os.remove(temp_file.name)
46
-
47
- def update_progress(percent, text):
48
- progress_bar.progress(percent)
49
- status_text.text(text)
50
-
51
- iso_languages = get_iso_languages()
52
- languages = list(iso_languages.keys())
53
-
54
- st.set_page_config(page_title=APP_NAME)
55
- st.title(APP_NAME)
56
- st.image(APP_LOGO, use_column_width=True)
57
- st.markdown(APP_DESCRIPTION)
58
-
59
- language = st.selectbox('Select a language', languages)
60
- prompt = st.text_input('Enter your prompt')
61
- input_wav = st.file_uploader("Upload a WAV file", type=["wav"])
62
-
63
- if input_wav:
64
- if not input_wav or input_wav is None:
65
- st.error('Please upload wav input audio')
66
- elif not prompt:
67
- st.error('Please write prompt')
68
- else:
69
- progress_bar = st.progress(0)
70
- status_text = st.empty()
71
-
72
- current_datetime = datetime.datetime.now()
73
- formatted_datetime = current_datetime.strftime("%Y-%m-%d_%H%M%S")
74
- output_filename = f"recording_{formatted_datetime}.wav"
75
-
76
- temp_file = create_temp_file(input_wav)
77
-
78
- iso_code = iso_languages[language]
79
-
80
- print(f'Language: {language}, prompt: {prompt}')
81
-
82
- update_progress(0, 'Loading TTS model...')
83
- api = TTS(f"tts_models/{iso_code}/fairseq/vits")
84
-
85
- update_progress(50, 'Generating audio...')
86
- api.tts_with_vc_to_file(
87
- prompt,
88
- speaker_wav=temp_file.name,
89
- file_path=output_filename
90
- )
91
-
92
- remove_temp_file(temp_file)
93
-
94
- audio_file = open(output_filename, 'rb')
95
- audio_bytes = audio_file.read()
96
-
97
- update_progress(100, 'Audio generated successfully!')
98
-
99
- st.audio(audio_bytes, format='audio/wav')
100
-
101
- st.download_button('Download WAV', data=audio_bytes, file_name='output.wav')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CMU-80100/80-100-Pre-Writing-Chatbot-Section-H/hf_streaming_chatbot.py DELETED
@@ -1,112 +0,0 @@
1
- import openai
2
- import os
3
- import os.path
4
- import gradio
5
- from datetime import date
6
- from datetime import datetime
7
- import _thread
8
-
9
- # import the prompts here:
10
- from prompts import debate_prompt_1
11
-
12
-
13
- #########################################
14
-
15
- openai.api_key = os.getenv("OPENAI_API_KEY")
16
-
17
- print("OPENAI_API_KEY Working...\n")
18
-
19
- users = {(os.getenv("user1"), os.getenv("PASSWORD")),(os.getenv("user2"), os.getenv("PASSWORD")),
20
- (os.getenv("user3"), os.getenv("PASSWORD")),(os.getenv("user4"), os.getenv("PASSWORD")),
21
- (os.getenv("user5"), os.getenv("PASSWORD")),(os.getenv("user6"), os.getenv("PASSWORD")),
22
- (os.getenv("user7"), os.getenv("PASSWORD")),(os.getenv("user8"), os.getenv("PASSWORD")),
23
- (os.getenv("user9"), os.getenv("PASSWORD")),(os.getenv("user10"), os.getenv("PASSWORD")),
24
- (os.getenv("user11"), os.getenv("PASSWORD")),(os.getenv("user12"), os.getenv("PASSWORD")),
25
- (os.getenv("user13"), os.getenv("PASSWORD")),(os.getenv("user14"), os.getenv("PASSWORD")),
26
- (os.getenv("user15"), os.getenv("PASSWORD")),(os.getenv("user16"), os.getenv("PASSWORD")),
27
- (os.getenv("user17"), os.getenv("PASSWORD")),(os.getenv("user18"), os.getenv("PASSWORD"))}
28
-
29
- currentUsers = []
30
- user_num = -1
31
-
32
- def authorization(username, password):
33
- if (username, password) in users:
34
- currentUsers.append(username)
35
- global user_num
36
- user_num += 1
37
- print(currentUsers, user_num)
38
- return True
39
- else:
40
- return False
41
-
42
-
43
- # now = datetime.now()
44
- today = date.today()
45
- # start_time = now.strftime("%H:%M:%S")
46
-
47
- output = []
48
-
49
- ############## STREAMING VERSION W/O FLAGGING ##################################
50
-
51
- def predict(message, history):
52
- history_openai_format = [{"role": "system", "content": debate_prompt_1}]
53
-
54
- for human, assistant in history:
55
- history_openai_format.append({"role": "user", "content": human })
56
- history_openai_format.append({"role": "assistant", "content":assistant})
57
- output.append(f"{currentUsers[0]}: {human}\n\n")
58
- output.append(f"gpt-4: {assistant}\n\n")
59
- history_openai_format.append({"role": "user", "content": message})
60
-
61
- # print(currentUsers[user_num])
62
- # with open(f'activity/{currentUsers[user_num]}_({today}).txt', 'w') as f:
63
- # if (len(output) > 2):
64
- # f.write(f"{output[-2]}\n\n")
65
- # f.write(f"{output[-1]}\n\n")
66
-
67
- response = openai.ChatCompletion.create(
68
- model='gpt-4',
69
- messages= history_openai_format,
70
- temperature=0.8,
71
- max_tokens=512,
72
- top_p=1,
73
- stream=True
74
- )
75
-
76
- partial_message = ""
77
- for chunk in response:
78
- if len(chunk['choices'][0]['delta']) != 0:
79
- partial_message = partial_message + chunk['choices'][0]['delta']['content']
80
- yield partial_message
81
-
82
- # if message == 'exit':
83
- # _thread.interrupt_main()
84
-
85
- gradio.ChatInterface(fn = predict,
86
- title = "80-100 Pre-Writing AI Assistant Chatbot",
87
- description = "Welcome to the 80-100 Pre-Writing AI Chatbot.\n This bot is designed to discuss the readings, create outlines, and a variety of pre-writing tasks.\nRemember to copy and paste your interaction to a document. Conversations are not saved.\n Please start the discussion by asking: What is your job?",
88
-
89
- ).queue().launch(auth = authorization)
90
-
91
- ################################################################################
92
-
93
- # today = date.today()
94
- # now2 = datetime.now()
95
- # end_time = now2.strftime("%H:%M:%S")
96
-
97
- # addition = ""
98
-
99
- # if (os.path.isfile(f'activity/{currentUsers[0]}_({today}).txt')):
100
- # counter = 1
101
- # addition = f"-{counter}"
102
- # while(os.path.isfile(f'activity/{currentUsers[0]}_({today}){addition}.txt')):
103
- # counter += 1
104
- # addition = f"-{counter}"
105
-
106
- # with open(f'activity/{currentUsers[0]}_({today}){addition}.txt', 'w') as f:
107
- # f.write(f"Start of Session: {start_time} \n")
108
- # f.write(f"End of Session: {end_time} \n\n")
109
- # f.writelines(output)
110
- # f.write('------End of Session------')
111
-
112
- # print("Activity has been logged in the history folder. Have a nice day!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/testing/cuda/stream_per_thread.cmake DELETED
@@ -1,11 +0,0 @@
1
- # This test should always use per-thread streams on NVCC.
2
- set_target_properties(${test_target} PROPERTIES
3
- COMPILE_OPTIONS
4
- $<$<AND:$<COMPILE_LANGUAGE:CUDA>,$<CUDA_COMPILER_ID:NVIDIA>>:--default-stream=per-thread>
5
- )
6
-
7
- # NVC++ does not have an equivalent option, and will always
8
- # use the global stream by default.
9
- if (CMAKE_CUDA_COMPILER_ID STREQUAL "Feta")
10
- set_tests_properties(${test_target} PROPERTIES WILL_FAIL ON)
11
- endif()
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/evaluation/fast_eval_api.py DELETED
@@ -1,121 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import logging
4
- import numpy as np
5
- import time
6
- from pycocotools.cocoeval import COCOeval
7
-
8
- from detectron2 import _C
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- class COCOeval_opt(COCOeval):
14
- """
15
- This is a slightly modified version of the original COCO API, where the functions evaluateImg()
16
- and accumulate() are implemented in C++ to speedup evaluation
17
- """
18
-
19
- def evaluate(self):
20
- """
21
- Run per image evaluation on given images and store results in self.evalImgs_cpp, a
22
- datastructure that isn't readable from Python but is used by a c++ implementation of
23
- accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
24
- self.evalImgs because this datastructure is a computational bottleneck.
25
- :return: None
26
- """
27
- tic = time.time()
28
-
29
- p = self.params
30
- # add backward compatibility if useSegm is specified in params
31
- if p.useSegm is not None:
32
- p.iouType = "segm" if p.useSegm == 1 else "bbox"
33
- logger.info("Evaluate annotation type *{}*".format(p.iouType))
34
- p.imgIds = list(np.unique(p.imgIds))
35
- if p.useCats:
36
- p.catIds = list(np.unique(p.catIds))
37
- p.maxDets = sorted(p.maxDets)
38
- self.params = p
39
-
40
- self._prepare() # bottleneck
41
-
42
- # loop through images, area range, max detection number
43
- catIds = p.catIds if p.useCats else [-1]
44
-
45
- if p.iouType == "segm" or p.iouType == "bbox":
46
- computeIoU = self.computeIoU
47
- elif p.iouType == "keypoints":
48
- computeIoU = self.computeOks
49
- self.ious = {
50
- (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
51
- } # bottleneck
52
-
53
- maxDet = p.maxDets[-1]
54
-
55
- # <<<< Beginning of code differences with original COCO API
56
- def convert_instances_to_cpp(instances, is_det=False):
57
- # Convert annotations for a list of instances in an image to a format that's fast
58
- # to access in C++
59
- instances_cpp = []
60
- for instance in instances:
61
- instance_cpp = _C.InstanceAnnotation(
62
- int(instance["id"]),
63
- instance["score"] if is_det else instance.get("score", 0.0),
64
- instance["area"],
65
- bool(instance.get("iscrowd", 0)),
66
- bool(instance.get("ignore", 0)),
67
- )
68
- instances_cpp.append(instance_cpp)
69
- return instances_cpp
70
-
71
- # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
72
- ground_truth_instances = [
73
- [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
74
- for imgId in p.imgIds
75
- ]
76
- detected_instances = [
77
- [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
78
- for imgId in p.imgIds
79
- ]
80
- ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
81
-
82
- if not p.useCats:
83
- # For each image, flatten per-category lists into a single list
84
- ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
85
- detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
86
-
87
- # Call C++ implementation of self.evaluateImgs()
88
- self._evalImgs_cpp = _C.COCOevalEvaluateImages(
89
- p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
90
- )
91
- self._evalImgs = None
92
-
93
- self._paramsEval = copy.deepcopy(self.params)
94
- toc = time.time()
95
- logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
96
- # >>>> End of code differences with original COCO API
97
-
98
- def accumulate(self):
99
- """
100
- Accumulate per image evaluation results and store the result in self.eval. Does not
101
- support changing parameter settings from those used by self.evaluate()
102
- """
103
- logger.info("Accumulating evaluation results...")
104
- tic = time.time()
105
- assert hasattr(
106
- self, "_evalImgs_cpp"
107
- ), "evaluate() must be called before accmulate() is called."
108
-
109
- self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
110
-
111
- # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
112
- self.eval["recall"] = np.array(self.eval["recall"]).reshape(
113
- self.eval["counts"][:1] + self.eval["counts"][2:]
114
- )
115
-
116
- # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
117
- # num_area_ranges X num_max_detections
118
- self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
119
- self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
120
- toc = time.time()
121
- logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/fast_rcnn.py DELETED
@@ -1,1086 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- from typing import Dict, List, Tuple, Union
4
- import torch
5
- from fvcore.nn import giou_loss, smooth_l1_loss
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from detectron2.config import configurable
10
- from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
11
- from detectron2.layers.soft_nms import batched_soft_nms
12
- from detectron2.modeling.box_regression import Box2BoxTransform
13
- from detectron2.structures import Boxes, Instances
14
- from detectron2.utils.events import get_event_storage
15
-
16
- __all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers", "CLIPFastRCNNOutputLayers"]
17
-
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
- """
22
- Shape shorthand in this module:
23
-
24
- N: number of images in the minibatch
25
- R: number of ROIs, combined over all images, in the minibatch
26
- Ri: number of ROIs in image i
27
- K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
28
-
29
- Naming convention:
30
-
31
- deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
32
- transform (see :class:`box_regression.Box2BoxTransform`).
33
-
34
- pred_class_logits: predicted class scores in [-inf, +inf]; use
35
- softmax(pred_class_logits) to estimate P(class).
36
-
37
- gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
38
- foreground object classes and K represents the background class.
39
-
40
- pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
41
- to detection box predictions.
42
-
43
- gt_proposal_deltas: ground-truth box2box transform deltas
44
- """
45
-
46
-
47
- def fast_rcnn_inference(
48
- boxes: List[torch.Tensor],
49
- scores: List[torch.Tensor],
50
- image_shapes: List[Tuple[int, int]],
51
- score_thresh: float,
52
- nms_thresh: float,
53
- soft_nms_enabled,
54
- soft_nms_method,
55
- soft_nms_sigma,
56
- soft_nms_prune,
57
- topk_per_image: int,
58
- scores_bf_multiply,
59
- ):
60
- """
61
- Call `fast_rcnn_inference_single_image` for all images.
62
-
63
- Args:
64
- boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
65
- boxes for each image. Element i has shape (Ri, K * 4) if doing
66
- class-specific regression, or (Ri, 4) if doing class-agnostic
67
- regression, where Ri is the number of predicted objects for image i.
68
- This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
69
- scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
70
- Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
71
- for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
72
- image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
73
- score_thresh (float): Only return detections with a confidence score exceeding this
74
- threshold.
75
- nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
76
- soft_nms_enabled (bool): Indicate to use soft non-maximum suppression.
77
- soft_nms_method: (str): One of ['gaussian', 'linear', 'hard']
78
- soft_nms_sigma: (float): Sigma for gaussian soft nms. Value in (0, inf)
79
- soft_nms_prune: (float): Threshold for pruning during soft nms. Value in [0, 1]
80
- topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
81
- all detections.
82
-
83
- Returns:
84
- instances: (list[Instances]): A list of N instances, one for each image in the batch,
85
- that stores the topk most confidence detections.
86
- kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
87
- the corresponding boxes/scores index in [0, Ri) from the input, for image i.
88
- """
89
- result_per_image = [
90
- fast_rcnn_inference_single_image(
91
- boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh,
92
- soft_nms_enabled, soft_nms_method, soft_nms_sigma, soft_nms_prune, topk_per_image, s_bf_per_img
93
- )
94
- for scores_per_image, boxes_per_image, image_shape, s_bf_per_img in zip(scores, boxes, image_shapes, scores_bf_multiply)
95
- ]
96
- return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
97
-
98
-
99
- def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"):
100
- """
101
- Log the classification metrics to EventStorage.
102
-
103
- Args:
104
- pred_logits: Rx(K+1) logits. The last column is for background class.
105
- gt_classes: R labels
106
- """
107
- num_instances = gt_classes.numel()
108
- if num_instances == 0:
109
- return
110
- pred_classes = pred_logits.argmax(dim=1)
111
- bg_class_ind = pred_logits.shape[1] - 1
112
-
113
- fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind)
114
- num_fg = fg_inds.nonzero().numel()
115
- fg_gt_classes = gt_classes[fg_inds]
116
- fg_pred_classes = pred_classes[fg_inds]
117
-
118
- num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()
119
- num_accurate = (pred_classes == gt_classes).nonzero().numel()
120
- fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()
121
-
122
- storage = get_event_storage()
123
- storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances)
124
- if num_fg > 0:
125
- storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg)
126
- storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg)
127
- #print("cls_accuracy {:.2f}; fg_cls_accuracy {:.2f}; false_negative {:.2f}".format(num_accurate / num_instances, fg_num_accurate / num_fg, num_false_negative / num_fg))
128
-
129
-
130
- def fast_rcnn_inference_single_image(
131
- boxes,
132
- scores,
133
- image_shape: Tuple[int, int],
134
- score_thresh: float,
135
- nms_thresh: float,
136
- soft_nms_enabled,
137
- soft_nms_method,
138
- soft_nms_sigma,
139
- soft_nms_prune,
140
- topk_per_image: int,
141
- scores_bf_multiply: None,
142
- ):
143
- """
144
- Single-image inference. Return bounding-box detection results by thresholding
145
- on scores and applying non-maximum suppression (NMS).
146
-
147
- Args:
148
- Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
149
- per image.
150
-
151
- Returns:
152
- Same as `fast_rcnn_inference`, but for only one image.
153
- """
154
- valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
155
- if not valid_mask.all():
156
- boxes = boxes[valid_mask]
157
- scores = scores[valid_mask]
158
- scores_bf_multiply = scores_bf_multiply[valid_mask]
159
-
160
- # scores = scores[:, :-1]
161
- # scores_bf_multiply = scores_bf_multiply[:, :-1]
162
- num_bbox_reg_classes = boxes.shape[1] // 4
163
- # Convert to Boxes to use the `clip` function ...
164
- boxes = Boxes(boxes.reshape(-1, 4))
165
- boxes.clip(image_shape)
166
- boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
167
-
168
- # 1. Filter results based on detection scores. It can make NMS more efficient
169
- # by filtering out low-confidence detections.
170
- filter_mask = scores > score_thresh # R x K
171
- # R' x 2. First column contains indices of the R predictions;
172
- # Second column contains indices of classes.
173
- filter_inds = filter_mask.nonzero()
174
- if num_bbox_reg_classes == 1:
175
- boxes = boxes[filter_inds[:, 0], 0]
176
- else:
177
- boxes = boxes[filter_mask]
178
- scores = scores[filter_mask]
179
- scores_bf_multiply = scores_bf_multiply[filter_mask]
180
-
181
- # 2. Apply NMS for each class independently.
182
- if not soft_nms_enabled:
183
- keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
184
- else:
185
- keep, soft_nms_scores = batched_soft_nms(
186
- boxes,
187
- scores,
188
- filter_inds[:, 1],
189
- soft_nms_method,
190
- soft_nms_sigma,
191
- nms_thresh,
192
- soft_nms_prune,
193
- )
194
- scores[keep] = soft_nms_scores
195
- # scores_bf_multiply? (TBD)
196
- scores_bf_multiply = scores
197
- if topk_per_image >= 0:
198
- keep = keep[:topk_per_image]
199
- boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
200
- scores_bf_multiply = scores_bf_multiply[keep]
201
-
202
- result = Instances(image_shape)
203
- result.pred_boxes = Boxes(boxes)
204
- result.scores = scores
205
- result.scores = scores_bf_multiply # convert to the original scores before multiplying RPN scores
206
- result.pred_classes = filter_inds[:, 1]
207
- return result, filter_inds[:, 0]
208
-
209
-
210
- class FastRCNNOutputs:
211
- """
212
- An internal implementation that stores information about outputs of a Fast R-CNN head,
213
- and provides methods that are used to decode the outputs of a Fast R-CNN head.
214
- """
215
-
216
- def __init__(
217
- self,
218
- box2box_transform,
219
- pred_class_logits,
220
- pred_proposal_deltas,
221
- proposals,
222
- smooth_l1_beta=0.0,
223
- box_reg_loss_type="smooth_l1",
224
- ):
225
- """
226
- Args:
227
- box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
228
- box2box transform instance for proposal-to-detection transformations.
229
- pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
230
- logits for all R predicted object instances.
231
- Each row corresponds to a predicted object instance.
232
- pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
233
- class-specific or class-agnostic regression. It stores the predicted deltas that
234
- transform proposals into final box detections.
235
- B is the box dimension (4 or 5).
236
- When B is 4, each row is [dx, dy, dw, dh (, ....)].
237
- When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
238
- proposals (list[Instances]): A list of N Instances, where Instances i stores the
239
- proposals for image i, in the field "proposal_boxes".
240
- When training, each Instances must have ground-truth labels
241
- stored in the field "gt_classes" and "gt_boxes".
242
- The total number of all instances must be equal to R.
243
- smooth_l1_beta (float): The transition point between L1 and L2 loss in
244
- the smooth L1 loss function. When set to 0, the loss becomes L1. When
245
- set to +inf, the loss becomes constant 0.
246
- box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
247
- """
248
- self.box2box_transform = box2box_transform
249
- self.num_preds_per_image = [len(p) for p in proposals]
250
- self.pred_class_logits = pred_class_logits
251
- self.pred_proposal_deltas = pred_proposal_deltas
252
- self.smooth_l1_beta = smooth_l1_beta
253
- self.box_reg_loss_type = box_reg_loss_type
254
-
255
- self.image_shapes = [x.image_size for x in proposals]
256
-
257
- if len(proposals):
258
- box_type = type(proposals[0].proposal_boxes)
259
- # cat(..., dim=0) concatenates over all images in the batch
260
- self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
261
- assert (
262
- not self.proposals.tensor.requires_grad
263
- ), "Proposals should not require gradients!"
264
-
265
- # "gt_classes" exists if and only if training. But other gt fields may
266
- # not necessarily exist in training for images that have no groundtruth.
267
- if proposals[0].has("gt_classes"):
268
- self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
269
-
270
- # If "gt_boxes" does not exist, the proposals must be all negative and
271
- # should not be included in regression loss computation.
272
- # Here we just use proposal_boxes as an arbitrary placeholder because its
273
- # value won't be used in self.box_reg_loss().
274
- gt_boxes = [
275
- p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes for p in proposals
276
- ]
277
- self.gt_boxes = box_type.cat(gt_boxes)
278
- else:
279
- self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
280
- self._no_instances = len(self.proposals) == 0 # no instances found
281
-
282
- def softmax_cross_entropy_loss(self):
283
- """
284
- Deprecated
285
- """
286
- _log_classification_stats(self.pred_class_logits, self.gt_classes)
287
- return cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
288
-
289
- def box_reg_loss(self):
290
- """
291
- Deprecated
292
- """
293
- if self._no_instances:
294
- return 0.0 * self.pred_proposal_deltas.sum()
295
-
296
- box_dim = self.proposals.tensor.size(1) # 4 or 5
297
- cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
298
- device = self.pred_proposal_deltas.device
299
-
300
- bg_class_ind = self.pred_class_logits.shape[1] - 1
301
- # Box delta loss is only computed between the prediction for the gt class k
302
- # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
303
- # for non-gt classes and background.
304
- # Empty fg_inds should produce a valid loss of zero because reduction=sum.
305
- fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0]
306
-
307
- if cls_agnostic_bbox_reg:
308
- # pred_proposal_deltas only corresponds to foreground class for agnostic
309
- gt_class_cols = torch.arange(box_dim, device=device)
310
- else:
311
- # pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
312
- # where b is the dimension of box representation (4 or 5)
313
- # Note that compared to Detectron1,
314
- # we do not perform bounding box regression for background classes.
315
- gt_class_cols = box_dim * self.gt_classes[fg_inds, None] + torch.arange(
316
- box_dim, device=device
317
- )
318
-
319
- if self.box_reg_loss_type == "smooth_l1":
320
- gt_proposal_deltas = self.box2box_transform.get_deltas(
321
- self.proposals.tensor, self.gt_boxes.tensor
322
- )
323
- loss_box_reg = smooth_l1_loss(
324
- self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
325
- gt_proposal_deltas[fg_inds],
326
- self.smooth_l1_beta,
327
- reduction="sum",
328
- )
329
- elif self.box_reg_loss_type == "giou":
330
- fg_pred_boxes = self.box2box_transform.apply_deltas(
331
- self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
332
- self.proposals.tensor[fg_inds],
333
- )
334
- loss_box_reg = giou_loss(
335
- fg_pred_boxes,
336
- self.gt_boxes.tensor[fg_inds],
337
- reduction="sum",
338
- )
339
- else:
340
- raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
341
-
342
- loss_box_reg = loss_box_reg / self.gt_classes.numel()
343
- return loss_box_reg
344
-
345
- def losses(self):
346
- """
347
- Deprecated
348
- """
349
- return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss()}
350
-
351
- def predict_boxes(self):
352
- """
353
- Deprecated
354
- """
355
- pred = self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor)
356
- return pred.split(self.num_preds_per_image, dim=0)
357
-
358
- def predict_probs(self):
359
- """
360
- Deprecated
361
- """
362
- probs = F.softmax(self.pred_class_logits, dim=-1)
363
- return probs.split(self.num_preds_per_image, dim=0)
364
-
365
-
366
- class FastRCNNOutputLayers(nn.Module):
367
- """
368
- Two linear layers for predicting Fast R-CNN outputs:
369
-
370
- 1. proposal-to-detection box regression deltas
371
- 2. classification scores
372
- """
373
-
374
- @configurable
375
- def __init__(
376
- self,
377
- input_shape: ShapeSpec,
378
- *,
379
- box2box_transform,
380
- num_classes: int,
381
- test_score_thresh: float = 0.0,
382
- test_nms_thresh: float = 0.5,
383
- soft_nms_enabled=False,
384
- soft_nms_method="gaussian",
385
- soft_nms_sigma=0.5,
386
- soft_nms_prune=0.001,
387
- test_topk_per_image: int = 100,
388
- cls_agnostic_bbox_reg: bool = False,
389
- smooth_l1_beta: float = 0.0,
390
- box_reg_loss_type: str = "smooth_l1",
391
- loss_weight: Union[float, Dict[str, float]] = 1.0,
392
- clip_cls_emb: tuple = (False, None),
393
- no_box_delta: bool = False,
394
- bg_cls_loss_weight: None,
395
- multiply_rpn_score: False,
396
- openset_test: None,
397
- ):
398
- """
399
- NOTE: this interface is experimental.
400
-
401
- Args:
402
- input_shape (ShapeSpec): shape of the input feature to this module
403
- box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
404
- num_classes (int): number of foreground classes
405
- test_score_thresh (float): threshold to filter predictions results.
406
- test_nms_thresh (float): NMS threshold for prediction results.
407
- test_topk_per_image (int): number of top predictions to produce per image.
408
- cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
409
- smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if
410
- `box_reg_loss_type` is "smooth_l1"
411
- box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
412
- loss_weight (float|dict): weights to use for losses. Can be single float for weighting
413
- all losses, or a dict of individual weightings. Valid dict keys are:
414
- * "loss_cls": applied to classification loss
415
- * "loss_box_reg": applied to box regression loss
416
- """
417
- super().__init__()
418
- if isinstance(input_shape, int): # some backward compatibility
419
- input_shape = ShapeSpec(channels=input_shape)
420
- self.num_classes = num_classes
421
- input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
422
- if clip_cls_emb[0]: # if combine {C4, text emb as classifier}, then has to use att_pool to match dimension
423
- input_size = clip_cls_emb[3] if clip_cls_emb[2] in ['CLIPRes5ROIHeads', 'CLIPStandardROIHeads'] else input_size
424
- # prediction layer for num_classes foreground classes and one background class (hence + 1)
425
- self.cls_score = nn.Linear(input_size, num_classes + 1)
426
- num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
427
- box_dim = len(box2box_transform.weights)
428
- self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
429
-
430
- nn.init.normal_(self.cls_score.weight, std=0.01)
431
- nn.init.normal_(self.bbox_pred.weight, std=0.001)
432
- for l in [self.cls_score, self.bbox_pred]:
433
- nn.init.constant_(l.bias, 0)
434
-
435
- self.box2box_transform = box2box_transform
436
- self.smooth_l1_beta = smooth_l1_beta
437
- self.test_score_thresh = test_score_thresh
438
- self.test_nms_thresh = test_nms_thresh
439
- self.soft_nms_enabled = soft_nms_enabled
440
- self.soft_nms_method = soft_nms_method
441
- self.soft_nms_sigma = soft_nms_sigma
442
- self.soft_nms_prune = soft_nms_prune
443
- self.test_topk_per_image = test_topk_per_image
444
- self.box_reg_loss_type = box_reg_loss_type
445
- if isinstance(loss_weight, float):
446
- loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight}
447
- self.loss_weight = loss_weight
448
-
449
- # use clip text embeddings as classifier's weights
450
- self.use_clip_cls_emb = clip_cls_emb[0]
451
- if self.use_clip_cls_emb:
452
- ######### V2L projection layer in CVPR OVR model #########
453
- if openset_test[3]: # run CVPR model
454
- self.emb_pred = nn.Linear(input_size, 768)
455
- self.emb_pred.weight.requires_grad = False
456
- self.emb_pred.bias.requires_grad = False
457
- input_size = 768
458
- else:
459
- self.emb_pred = None
460
- ######### V2L projection layer in CVPR OVR model #########
461
- text_emb_require_grad = False
462
- self.use_bias = False
463
- self.tempurature = openset_test[2] # 0.01 # the smaller, the bigger difference among probs after softmax
464
- self.no_box_delta = no_box_delta
465
- if bg_cls_loss_weight is not None: # loss weigh for bg regions
466
- self.cls_loss_weight = torch.ones(num_classes + 1)
467
- self.cls_loss_weight[-1] = bg_cls_loss_weight
468
- else:
469
- self.cls_loss_weight = None
470
- self.multiply_rpn_score = multiply_rpn_score
471
- self.focal_scaled_loss = openset_test[4]
472
-
473
- @classmethod
474
- def from_config(cls, cfg, input_shape):
475
- # if cfg.MODEL.CLIP.CROP_REGION_TYPE == "RPN":
476
- # assert cfg.MODEL.CLIP.NO_BOX_DELTA is False
477
- return {
478
- "input_shape": input_shape,
479
- "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
480
- # fmt: off
481
- "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,
482
- "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
483
- "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
484
- "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
485
- "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
486
- "soft_nms_enabled" : cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED,
487
- "soft_nms_method" : cfg.MODEL.ROI_HEADS.SOFT_NMS_METHOD,
488
- "soft_nms_sigma" : cfg.MODEL.ROI_HEADS.SOFT_NMS_SIGMA,
489
- "soft_nms_prune" : cfg.MODEL.ROI_HEADS.SOFT_NMS_PRUNE,
490
- "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE,
491
- "box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,
492
- "loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT},
493
- "clip_cls_emb" : (cfg.MODEL.CLIP.USE_TEXT_EMB_CLASSIFIER, cfg.MODEL.CLIP.TEXT_EMB_PATH, cfg.MODEL.ROI_HEADS.NAME, cfg.MODEL.CLIP.TEXT_EMB_DIM),
494
- "no_box_delta" : cfg.MODEL.CLIP.NO_BOX_DELTA or cfg.MODEL.CLIP.CROP_REGION_TYPE == 'GT',
495
- "bg_cls_loss_weight" : cfg.MODEL.CLIP.BG_CLS_LOSS_WEIGHT,
496
- "multiply_rpn_score" : cfg.MODEL.CLIP.MULTIPLY_RPN_SCORE,
497
- "openset_test" : (cfg.MODEL.CLIP.OPENSET_TEST_NUM_CLASSES, cfg.MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH, \
498
- cfg.MODEL.CLIP.CLSS_TEMP, cfg.MODEL.CLIP.RUN_CVPR_OVR, cfg.MODEL.CLIP.FOCAL_SCALED_LOSS)
499
- # fmt: on
500
- }
501
-
502
- def forward(self, x, queries):
503
- """
504
- Args:
505
- x: per-region features of shape (N, ...) for N bounding boxes to predict.
506
-
507
- Returns:
508
- (Tensor, Tensor):
509
- First tensor: shape (N,K+1), scores for each of the N box. Each row contains the
510
- scores for K object categories and 1 background class.
511
-
512
- Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),
513
- or (N,4) for class-agnostic regression.
514
- """
515
- if x.dim() > 2:
516
- x = torch.flatten(x, start_dim=1)
517
- if self.use_clip_cls_emb: # use clip text embeddings as classifier's weights
518
- normalized_x = F.normalize(x, p=2.0, dim=1)
519
- cls_scores = normalized_x @ queries.t()
520
- bg_cls_scores = cls_scores.new(cls_scores.shape[0], 1).fill_(0.3)
521
- scores = cls_scores # torch.cat((cls_scores, bg_cls_scores), 1)
522
- else: # default setting
523
- scores = self.cls_score(x)
524
- proposal_deltas = scores.new(scores.shape[0], 4).fill_(0) # self.bbox_pred(x)
525
- return scores, proposal_deltas
526
-
527
- def losses(self, predictions, proposals):
528
- """
529
- Args:
530
- predictions: return values of :meth:`forward()`.
531
- proposals (list[Instances]): proposals that match the features that were used
532
- to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
533
- ``gt_classes`` are expected.
534
-
535
- Returns:
536
- Dict[str, Tensor]: dict of losses
537
- """
538
- scores, proposal_deltas = predictions
539
-
540
- # parse classification outputs
541
- gt_classes = (
542
- cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
543
- )
544
- _log_classification_stats(scores, gt_classes)
545
-
546
- # parse box regression outputs
547
- if len(proposals):
548
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
549
- assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
550
- # If "gt_boxes" does not exist, the proposals must be all negative and
551
- # should not be included in regression loss computation.
552
- # Here we just use proposal_boxes as an arbitrary placeholder because its
553
- # value won't be used in self.box_reg_loss().
554
- gt_boxes = cat(
555
- [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
556
- dim=0,
557
- )
558
- else:
559
- proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
560
-
561
- # loss weights
562
- if self.cls_loss_weight is not None and self.cls_loss_weight.device != scores.device:
563
- self.cls_loss_weight = self.cls_loss_weight.to(scores.device)
564
- if self.focal_scaled_loss is not None:
565
- loss_cls = self.focal_loss(scores, gt_classes, gamma=self.focal_scaled_loss)
566
- else:
567
- loss_cls = cross_entropy(scores, gt_classes, reduction="mean") if self.cls_loss_weight is None else \
568
- cross_entropy(scores, gt_classes, reduction="mean", weight=self.cls_loss_weight)
569
- losses = {
570
- "loss_cls": loss_cls,
571
- "loss_box_reg": self.box_reg_loss(
572
- proposal_boxes, gt_boxes, proposal_deltas, gt_classes
573
- ),
574
- }
575
- return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
576
-
577
- def focal_loss(self, inputs, targets, alpha=0.25, gamma=0.5, reduction="mean", mode='softmax'):
578
- """Inspired by RetinaNet implementation"""
579
- if mode == 'sigmoid': # original focal loss implementation, except we include bg loss
580
- targets = F.one_hot(targets, num_classes=self.num_classes + 1).to(inputs.dtype) # create binary label for each logit entry, including bg loss
581
- p = torch.sigmoid(inputs)
582
- ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
583
- p_t = p * targets + (1 - p) * (1 - targets)
584
- loss = ce_loss * ((1 - p_t) ** gamma)
585
-
586
- if alpha >= 0:
587
- alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
588
- loss = alpha_t * loss
589
- elif mode == 'softmax':
590
- only_fg = False # if True, only fg rois are attached the focal loss scaling
591
- #gamma = 0.3 # 0.5 # 0.8 # 1.5 # 1.0
592
- alpha = -1 # no binary target in this case; instead, we can use bg loss weight
593
- if targets.numel() == 0 and reduction == "mean":
594
- return input.sum() * 0.0 # connect the gradient
595
- ce_loss = F.cross_entropy(inputs, targets, reduction="none")
596
- p = F.softmax(inputs, dim=-1)
597
- p_t = p[torch.arange(p.size(0)).to(p.device), targets] # get prob of target class
598
- if only_fg: # apply scaling to only fg rois
599
- roi_wise_gamma = torch.zeros(p.size(0)).to(p.device)
600
- roi_wise_gamma[targets != self.num_classes] = gamma
601
- gamma = roi_wise_gamma
602
- loss = ce_loss * ((1 - p_t) ** gamma)
603
-
604
- # if alpha >= 0:
605
- # alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
606
- # loss = alpha_t * loss
607
- # bg loss weight
608
- if self.cls_loss_weight is not None:
609
- loss_weight = torch.ones(loss.size(0)).to(p.device)
610
- loss_weight[targets == self.num_classes] = self.cls_loss_weight[-1].item()
611
- loss = loss * loss_weight
612
-
613
- if reduction == "mean":
614
- loss = loss.mean()
615
- elif reduction == "sum":
616
- loss = loss.sum()
617
-
618
- return loss
619
-
620
- def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):
621
- """
622
- Args:
623
- All boxes are tensors with the same shape Rx(4 or 5).
624
- gt_classes is a long tensor of shape R, the gt class label of each proposal.
625
- R shall be the number of proposals.
626
- """
627
- box_dim = proposal_boxes.shape[1] # 4 or 5
628
- # Regression loss is only computed for foreground proposals (those matched to a GT)
629
- fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]
630
- if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
631
- fg_pred_deltas = pred_deltas[fg_inds]
632
- else:
633
- fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
634
- fg_inds, gt_classes[fg_inds]
635
- ]
636
-
637
- if self.box_reg_loss_type == "smooth_l1":
638
- gt_pred_deltas = self.box2box_transform.get_deltas(
639
- proposal_boxes[fg_inds],
640
- gt_boxes[fg_inds],
641
- )
642
- loss_box_reg = smooth_l1_loss(
643
- fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
644
- )
645
- elif self.box_reg_loss_type == "giou":
646
- fg_pred_boxes = self.box2box_transform.apply_deltas(
647
- fg_pred_deltas, proposal_boxes[fg_inds]
648
- )
649
- loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
650
- else:
651
- raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
652
- # The reg loss is normalized using the total number of regions (R), not the number
653
- # of foreground regions even though the box regression loss is only defined on
654
- # foreground regions. Why? Because doing so gives equal training influence to
655
- # each foreground example. To see how, consider two different minibatches:
656
- # (1) Contains a single foreground region
657
- # (2) Contains 100 foreground regions
658
- # If we normalize by the number of foreground regions, the single example in
659
- # minibatch (1) will be given 100 times as much influence as each foreground
660
- # example in minibatch (2). Normalizing by the total number of regions, R,
661
- # means that the single example in minibatch (1) and each of the 100 examples
662
- # in minibatch (2) are given equal influence.
663
- return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty
664
-
665
- def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):
666
- """
667
- Args:
668
- predictions: return values of :meth:`forward()`.
669
- proposals (list[Instances]): proposals that match the features that were
670
- used to compute predictions. The ``proposal_boxes`` field is expected.
671
-
672
- Returns:
673
- list[Instances]: same as `fast_rcnn_inference`.
674
- list[Tensor]: same as `fast_rcnn_inference`.
675
- """
676
- boxes = self.predict_boxes(predictions, proposals)
677
- scores = self.predict_probs(predictions, proposals)
678
- image_shapes = [x.image_size for x in proposals]
679
- scores_bf_multiply = scores # as a backup
680
- if self.multiply_rpn_score:
681
- rpn_scores = [p.get('objectness_logits') for p in proposals]
682
- # filter based on rpn_scores
683
- # boxes = (boxes[0][rpn_scores[0] > 0.9],)
684
- # scores = (scores[0][rpn_scores[0] > 0.9],)
685
- # rpn_scores = [rpn_scores[0][rpn_scores[0] > 0.9]]
686
- # scores_bf_multiply = scores # as a backup
687
- #rpn_scores = [p.get('objectness_logits').sigmoid() for p in proposals]
688
- scores = [(torch.sigmoid(s) * torch.sigmoid(rpn_s[:, None])) ** 0.5 for s, rpn_s in zip(scores, rpn_scores)]
689
- return fast_rcnn_inference(
690
- boxes,
691
- scores,
692
- image_shapes,
693
- self.test_score_thresh,
694
- self.test_nms_thresh,
695
- self.soft_nms_enabled,
696
- self.soft_nms_method,
697
- self.soft_nms_sigma,
698
- self.soft_nms_prune,
699
- self.test_topk_per_image,
700
- scores_bf_multiply = scores_bf_multiply if self.multiply_rpn_score else None,
701
- )
702
-
703
- def predict_boxes_for_gt_classes(self, predictions, proposals):
704
- """
705
- Args:
706
- predictions: return values of :meth:`forward()`.
707
- proposals (list[Instances]): proposals that match the features that were used
708
- to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.
709
-
710
- Returns:
711
- list[Tensor]:
712
- A list of Tensors of predicted boxes for GT classes in case of
713
- class-specific box head. Element i of the list has shape (Ri, B), where Ri is
714
- the number of proposals for image i and B is the box dimension (4 or 5)
715
- """
716
- if not len(proposals):
717
- return []
718
- scores, proposal_deltas = predictions
719
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
720
- N, B = proposal_boxes.shape
721
- predict_boxes = self.box2box_transform.apply_deltas(
722
- proposal_deltas, proposal_boxes
723
- ) # Nx(KxB)
724
-
725
- K = predict_boxes.shape[1] // B
726
- if K > 1:
727
- gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
728
- # Some proposals are ignored or have a background class. Their gt_classes
729
- # cannot be used as index.
730
- gt_classes = gt_classes.clamp_(0, K - 1)
731
-
732
- predict_boxes = predict_boxes.view(N, K, B)[
733
- torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes
734
- ]
735
- num_prop_per_image = [len(p) for p in proposals]
736
- return predict_boxes.split(num_prop_per_image)
737
-
738
- def predict_boxes(
739
- self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
740
- ):
741
- """
742
- Args:
743
- predictions: return values of :meth:`forward()`.
744
- proposals (list[Instances]): proposals that match the features that were
745
- used to compute predictions. The ``proposal_boxes`` field is expected.
746
-
747
- Returns:
748
- list[Tensor]:
749
- A list of Tensors of predicted class-specific or class-agnostic boxes
750
- for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
751
- the number of proposals for image i and B is the box dimension (4 or 5)
752
- """
753
- if not len(proposals):
754
- return []
755
- _, proposal_deltas = predictions
756
- num_prop_per_image = [len(p) for p in proposals]
757
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
758
- if self.no_box_delta:
759
- predict_boxes = proposal_boxes
760
- else:
761
- predict_boxes = self.box2box_transform.apply_deltas(
762
- proposal_deltas,
763
- proposal_boxes,
764
- ) # Nx(KxB)
765
- return predict_boxes.split(num_prop_per_image)
766
-
767
- def predict_probs(
768
- self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
769
- ):
770
- """
771
- Args:
772
- predictions: return values of :meth:`forward()`.
773
- proposals (list[Instances]): proposals that match the features that were
774
- used to compute predictions.
775
-
776
- Returns:
777
- list[Tensor]:
778
- A list of Tensors of predicted class probabilities for each image.
779
- Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.
780
- """
781
- scores, _ = predictions
782
- num_inst_per_image = [len(p) for p in proposals]
783
- # probs = F.softmax(scores, dim=-1)
784
- probs = scores
785
- return probs.split(num_inst_per_image, dim=0)
786
-
787
-
788
- class OLDFastRCNNOutputLayers(nn.Module):
789
- """
790
- Two linear layers for predicting Fast R-CNN outputs:
791
-
792
- 1. proposal-to-detection box regression deltas
793
- 2. classification scores
794
- """
795
-
796
- @configurable
797
- def __init__(
798
- self,
799
- input_shape: ShapeSpec,
800
- *,
801
- box2box_transform,
802
- num_classes: int,
803
- test_score_thresh: float = 0.0,
804
- test_nms_thresh: float = 0.5,
805
- test_topk_per_image: int = 100,
806
- cls_agnostic_bbox_reg: bool = False,
807
- smooth_l1_beta: float = 0.0,
808
- box_reg_loss_type: str = "smooth_l1",
809
- loss_weight: Union[float, Dict[str, float]] = 1.0,
810
- no_box_delta: bool = False,
811
- ):
812
- """
813
- NOTE: this interface is experimental.
814
-
815
- Args:
816
- input_shape (ShapeSpec): shape of the input feature to this module
817
- box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
818
- num_classes (int): number of foreground classes
819
- test_score_thresh (float): threshold to filter predictions results.
820
- test_nms_thresh (float): NMS threshold for prediction results.
821
- test_topk_per_image (int): number of top predictions to produce per image.
822
- cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
823
- smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if
824
- `box_reg_loss_type` is "smooth_l1"
825
- box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
826
- loss_weight (float|dict): weights to use for losses. Can be single float for weighting
827
- all losses, or a dict of individual weightings. Valid dict keys are:
828
- * "loss_cls": applied to classification loss
829
- * "loss_box_reg": applied to box regression loss
830
- """
831
- super().__init__()
832
- if isinstance(input_shape, int): # some backward compatibility
833
- input_shape = ShapeSpec(channels=input_shape)
834
- self.num_classes = num_classes
835
- input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
836
- # prediction layer for num_classes foreground classes and one background class (hence + 1)
837
- self.cls_score = nn.Linear(input_size, num_classes + 1)
838
- num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
839
- box_dim = len(box2box_transform.weights)
840
- self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
841
-
842
- nn.init.normal_(self.cls_score.weight, std=0.01)
843
- nn.init.normal_(self.bbox_pred.weight, std=0.001)
844
- for l in [self.cls_score, self.bbox_pred]:
845
- nn.init.constant_(l.bias, 0)
846
-
847
- self.box2box_transform = box2box_transform
848
- self.smooth_l1_beta = smooth_l1_beta
849
- self.test_score_thresh = test_score_thresh
850
- self.test_nms_thresh = test_nms_thresh
851
- self.test_topk_per_image = test_topk_per_image
852
- self.box_reg_loss_type = box_reg_loss_type
853
- if isinstance(loss_weight, float):
854
- loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight}
855
- self.loss_weight = loss_weight
856
- self.no_box_delta = no_box_delta
857
-
858
- @classmethod
859
- def from_config(cls, cfg, input_shape):
860
- return {
861
- "input_shape": input_shape,
862
- "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
863
- # fmt: off
864
- "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,
865
- "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
866
- "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
867
- "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
868
- "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
869
- "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE,
870
- "box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,
871
- "loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT},
872
- "no_box_delta" : cfg.MODEL.CLIP.NO_BOX_DELTA or cfg.MODEL.CLIP.CROP_REGION_TYPE == 'GT',
873
- # fmt: on
874
- }
875
-
876
- def forward(self, x):
877
- """
878
- Args:
879
- x: per-region features of shape (N, ...) for N bounding boxes to predict.
880
-
881
- Returns:
882
- (Tensor, Tensor):
883
- First tensor: shape (N,K+1), scores for each of the N box. Each row contains the
884
- scores for K object categories and 1 background class.
885
-
886
- Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),
887
- or (N,4) for class-agnostic regression.
888
- """
889
- if x.dim() > 2:
890
- x = torch.flatten(x, start_dim=1)
891
- scores = self.cls_score(x)
892
- proposal_deltas = self.bbox_pred(x)
893
- return scores, proposal_deltas
894
-
895
- def losses(self, predictions, proposals):
896
- """
897
- Args:
898
- predictions: return values of :meth:`forward()`.
899
- proposals (list[Instances]): proposals that match the features that were used
900
- to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
901
- ``gt_classes`` are expected.
902
-
903
- Returns:
904
- Dict[str, Tensor]: dict of losses
905
- """
906
- scores, proposal_deltas = predictions
907
-
908
- # parse classification outputs
909
- gt_classes = (
910
- cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
911
- )
912
- _log_classification_stats(scores, gt_classes)
913
-
914
- # parse box regression outputs
915
- if len(proposals):
916
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
917
- assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
918
- # If "gt_boxes" does not exist, the proposals must be all negative and
919
- # should not be included in regression loss computation.
920
- # Here we just use proposal_boxes as an arbitrary placeholder because its
921
- # value won't be used in self.box_reg_loss().
922
- gt_boxes = cat(
923
- [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
924
- dim=0,
925
- )
926
- else:
927
- proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
928
-
929
- losses = {
930
- "loss_cls": cross_entropy(scores, gt_classes, reduction="mean"),
931
- "loss_box_reg": self.box_reg_loss(
932
- proposal_boxes, gt_boxes, proposal_deltas, gt_classes
933
- ),
934
- }
935
- return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
936
-
937
- def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):
938
- """
939
- Args:
940
- All boxes are tensors with the same shape Rx(4 or 5).
941
- gt_classes is a long tensor of shape R, the gt class label of each proposal.
942
- R shall be the number of proposals.
943
- """
944
- box_dim = proposal_boxes.shape[1] # 4 or 5
945
- # Regression loss is only computed for foreground proposals (those matched to a GT)
946
- fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]
947
- if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
948
- fg_pred_deltas = pred_deltas[fg_inds]
949
- else:
950
- fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
951
- fg_inds, gt_classes[fg_inds]
952
- ]
953
-
954
- if self.box_reg_loss_type == "smooth_l1":
955
- gt_pred_deltas = self.box2box_transform.get_deltas(
956
- proposal_boxes[fg_inds],
957
- gt_boxes[fg_inds],
958
- )
959
- loss_box_reg = smooth_l1_loss(
960
- fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
961
- )
962
- elif self.box_reg_loss_type == "giou":
963
- fg_pred_boxes = self.box2box_transform.apply_deltas(
964
- fg_pred_deltas, proposal_boxes[fg_inds]
965
- )
966
- loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
967
- else:
968
- raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
969
- # The reg loss is normalized using the total number of regions (R), not the number
970
- # of foreground regions even though the box regression loss is only defined on
971
- # foreground regions. Why? Because doing so gives equal training influence to
972
- # each foreground example. To see how, consider two different minibatches:
973
- # (1) Contains a single foreground region
974
- # (2) Contains 100 foreground regions
975
- # If we normalize by the number of foreground regions, the single example in
976
- # minibatch (1) will be given 100 times as much influence as each foreground
977
- # example in minibatch (2). Normalizing by the total number of regions, R,
978
- # means that the single example in minibatch (1) and each of the 100 examples
979
- # in minibatch (2) are given equal influence.
980
- return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty
981
-
982
- def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):
983
- """
984
- Args:
985
- predictions: return values of :meth:`forward()`.
986
- proposals (list[Instances]): proposals that match the features that were
987
- used to compute predictions. The ``proposal_boxes`` field is expected.
988
-
989
- Returns:
990
- list[Instances]: same as `fast_rcnn_inference`.
991
- list[Tensor]: same as `fast_rcnn_inference`.
992
- """
993
- boxes = self.predict_boxes(predictions, proposals)
994
- scores = self.predict_probs(predictions, proposals)
995
- image_shapes = [x.image_size for x in proposals]
996
- return fast_rcnn_inference(
997
- boxes,
998
- scores,
999
- image_shapes,
1000
- self.test_score_thresh,
1001
- self.test_nms_thresh,
1002
- self.test_topk_per_image,
1003
- )
1004
-
1005
- def predict_boxes_for_gt_classes(self, predictions, proposals):
1006
- """
1007
- Args:
1008
- predictions: return values of :meth:`forward()`.
1009
- proposals (list[Instances]): proposals that match the features that were used
1010
- to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.
1011
-
1012
- Returns:
1013
- list[Tensor]:
1014
- A list of Tensors of predicted boxes for GT classes in case of
1015
- class-specific box head. Element i of the list has shape (Ri, B), where Ri is
1016
- the number of proposals for image i and B is the box dimension (4 or 5)
1017
- """
1018
- if not len(proposals):
1019
- return []
1020
- scores, proposal_deltas = predictions
1021
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
1022
- N, B = proposal_boxes.shape
1023
- predict_boxes = self.box2box_transform.apply_deltas(
1024
- proposal_deltas, proposal_boxes
1025
- ) # Nx(KxB)
1026
-
1027
- K = predict_boxes.shape[1] // B
1028
- if K > 1:
1029
- gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
1030
- # Some proposals are ignored or have a background class. Their gt_classes
1031
- # cannot be used as index.
1032
- gt_classes = gt_classes.clamp_(0, K - 1)
1033
-
1034
- predict_boxes = predict_boxes.view(N, K, B)[
1035
- torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes
1036
- ]
1037
- num_prop_per_image = [len(p) for p in proposals]
1038
- return predict_boxes.split(num_prop_per_image)
1039
-
1040
- def predict_boxes(
1041
- self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
1042
- ):
1043
- """
1044
- Args:
1045
- predictions: return values of :meth:`forward()`.
1046
- proposals (list[Instances]): proposals that match the features that were
1047
- used to compute predictions. The ``proposal_boxes`` field is expected.
1048
-
1049
- Returns:
1050
- list[Tensor]:
1051
- A list of Tensors of predicted class-specific or class-agnostic boxes
1052
- for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
1053
- the number of proposals for image i and B is the box dimension (4 or 5)
1054
- """
1055
- if not len(proposals):
1056
- return []
1057
- _, proposal_deltas = predictions
1058
- num_prop_per_image = [len(p) for p in proposals]
1059
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
1060
- if self.no_box_delta:
1061
- predict_boxes = proposal_boxes
1062
- else:
1063
- predict_boxes = self.box2box_transform.apply_deltas(
1064
- proposal_deltas,
1065
- proposal_boxes,
1066
- ) # Nx(KxB)
1067
- return predict_boxes.split(num_prop_per_image)
1068
-
1069
- def predict_probs(
1070
- self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
1071
- ):
1072
- """
1073
- Args:
1074
- predictions: return values of :meth:`forward()`.
1075
- proposals (list[Instances]): proposals that match the features that were
1076
- used to compute predictions.
1077
-
1078
- Returns:
1079
- list[Tensor]:
1080
- A list of Tensors of predicted class probabilities for each image.
1081
- Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.
1082
- """
1083
- scores, _ = predictions
1084
- num_inst_per_image = [len(p) for p in proposals]
1085
- probs = F.softmax(scores, dim=-1)
1086
- return probs.split(num_inst_per_image, dim=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/js/overlay.js DELETED
@@ -1,29 +0,0 @@
1
- const overlay = document.createElement('div');
2
- Object.assign(overlay.style, {
3
- position: 'fixed',
4
- zIndex: 999999,
5
- top: 0,
6
- left: 0,
7
- width: '100%',
8
- height: '100%',
9
- background: 'rgba(0, 0, 0, 0.7)',
10
- color: '#fff',
11
- fontSize: '24px',
12
- fontWeight: 'bold',
13
- display: 'flex',
14
- justifyContent: 'center',
15
- alignItems: 'center',
16
- });
17
- const textContent = document.createElement('div');
18
- Object.assign(textContent.style, {
19
- textAlign: 'center',
20
- });
21
- textContent.textContent = 'AutoGPT Analyzing Page';
22
- overlay.appendChild(textContent);
23
- document.body.append(overlay);
24
- document.body.style.overflow = 'hidden';
25
- let dotCount = 0;
26
- setInterval(() => {
27
- textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
28
- dotCount = (dotCount + 1) % 4;
29
- }, 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/google/__init__.py DELETED
@@ -1,28 +0,0 @@
1
- from typing import List
2
-
3
- from pil_utils import BuildImage, Text2Image
4
-
5
- from meme_generator import add_meme
6
-
7
-
8
- def google(images, texts: List[str], args):
9
- text = texts[0]
10
- text = " ".join(text.splitlines())
11
- colors = ["#4285f4", "#db4437", "#f4b400", "#4285f4", "#0f9d58", "#db4437"]
12
- t2m = Text2Image.from_text(text, 200)
13
- index = 0
14
- for char in t2m.lines[0].chars:
15
- char.fill = colors[index % len(colors)]
16
- if char.char.strip():
17
- index += 1
18
- return BuildImage(t2m.to_image(bg_color="white", padding=(50, 50))).save_jpg()
19
-
20
-
21
- add_meme(
22
- "google",
23
- google,
24
- min_texts=1,
25
- max_texts=1,
26
- default_texts=["Google"],
27
- keywords=["google"],
28
- )