parquet-converter commited on
Commit
67d41b6
·
1 Parent(s): f8beb95

Update parquet files (step 31 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/Avatar Friday Patcher V1.1 __HOT__.md +0 -84
  2. spaces/1gistliPinn/ChatGPT4/Crystal-Cs4280-Cm-Ep-Sound-Card-Driver-FOR-WINDOWS-7181.md +0 -65
  3. spaces/1gistliPinn/ChatGPT4/Examples/Fifa 11 World Cup Patch Update V1.rar How to Get the Most Out of Your Fifa 11 Game.md +0 -7
  4. spaces/1phancelerku/anime-remove-background/Blue 3 ft. Radio Weasel - Where You Are - Free MP3 and Lyrics Download.md +0 -105
  5. spaces/1phancelerku/anime-remove-background/Download Geometry Dash Lite APK for Android 2.3 and Enjoy Rhythm-based Action Platforming!.md +0 -106
  6. spaces/1phancelerku/anime-remove-background/Euphoria Season 1 Download Where to Find the Full Episodes Online.md +0 -131
  7. spaces/1toTree/lora_test/ppdiffusers/pipelines/ddpm/pipeline_ddpm.py +0 -108
  8. spaces/44brabal/runwayml-stable-diffusion-v1-5/README.md +0 -12
  9. spaces/A00001/bingothoo/src/components/ui/badge.tsx +0 -36
  10. spaces/AIZ2H/02-Gradio-Art-From-Text-And-Images/app.py +0 -224
  11. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/you.py +0 -79
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toggleswitchshape.d.ts +0 -2
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.js +0 -13
  14. spaces/AiMimicry/sovits-models/hubert/hubert_model.py +0 -222
  15. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/setup.py +0 -9
  16. spaces/Alfasign/HuggingGPT-Lite/awesome_chat.py +0 -933
  17. spaces/Amrrs/DragGan-Inversion/PTI/criteria/localitly_regulizer.py +0 -65
  18. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.h +0 -90
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint_img2img.py +0 -1119
  20. spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/README.md +0 -16
  21. spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py +0 -4
  22. spaces/Andy1621/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py +0 -18
  23. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fast_rcnn.py +0 -52
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/pisa_roi_head.py +0 -159
  25. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/encnet_r50-d8.py +0 -48
  26. spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py +0 -10
  27. spaces/Aniquel/WizApp/README.md +0 -13
  28. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/RWKV-model.md +0 -72
  29. spaces/AnonAndDesu/Desu_Proxy/Dockerfile +0 -11
  30. spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/geometry.py +0 -96
  31. spaces/Ariharasudhan/YoloV5/utils/aws/resume.py +0 -40
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py +0 -37
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py +0 -142
  34. spaces/AvaterClasher/Food_Classifier_Moni/model.py +0 -36
  35. spaces/Awesimo/jojogan/e4e/criteria/moco_loss.py +0 -71
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py +0 -207
  37. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py +0 -90
  38. spaces/Bart92/RVC_HF/tools/calc_rvc_model_similarity.py +0 -96
  39. spaces/Benson/text-generation/Examples/Choo Choo Charles Juego Completo.md +0 -47
  40. spaces/Benson/text-generation/Examples/Cmo Descargar Llamada De Deber Warzone Mvil Apk.md +0 -165
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py +0 -142
  42. spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/base_dataset_builder.py +0 -235
  43. spaces/CVPR/LIVE/scene.cpp +0 -1035
  44. spaces/CVPR/LIVE/thrust/thrust/system/cpp/execution_policy.h +0 -157
  45. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/uninitialized_fill.h +0 -22
  46. spaces/Catspin/2_ai_chat/README.md +0 -10
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attrs/exceptions.py +0 -3
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_textwrap.py +0 -49
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py +0 -217
  50. spaces/DaleChen/AutoGPT/autogpt/commands/times.py +0 -10
spaces/1acneusushi/gradio-2dmoleculeeditor/Avatar Friday Patcher V1.1 __HOT__.md DELETED
@@ -1,84 +0,0 @@
1
- ## Avatar Friday Patcher V1.1
2
-
3
-
4
-
5
-
6
-
7
- ![Avatar Friday Patcher V1.1 __HOT__](https://cdn.shopify.com/s/files/1/1207/0358/products/logo_back_patch_1200x1200.png?v\u003d1560967230)
8
-
9
-
10
-
11
-
12
-
13
- **Download ✅ [https://jinyurl.com/2tA014](https://jinyurl.com/2tA014)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # Avatar Friday Patcher v1.1: How to Fix Common Issues and Enjoy the Game
28
-
29
-
30
-
31
- If you are a fan of the Avatar franchise, you might have been eagerly waiting for the release of Avatar Friday, the new open-world RPG game based on the popular movie and TV series. However, some players have reported experiencing various issues with the game, such as crashes, glitches, low FPS, and missing features. Fortunately, there is a solution: Avatar Friday Patcher v1.1.
32
-
33
-
34
-
35
- Avatar Friday Patcher v1.1 is a fan-made mod that aims to improve the performance and stability of the game, as well as add some missing features and enhancements. The patcher is easy to use and compatible with most versions of the game. Here are some of the benefits of using Avatar Friday Patcher v1.1:
36
-
37
-
38
-
39
- - Fixes crashes and freezes that occur randomly or at certain points in the game.
40
-
41
- - Optimizes the graphics settings and reduces the CPU and GPU load, resulting in higher FPS and smoother gameplay.
42
-
43
- - Enables full-screen mode and custom resolutions, allowing you to play the game in your preferred display settings.
44
-
45
- - Adds missing features such as subtitles, controller support, achievements, and cloud saves.
46
-
47
- - Enhances the game's visuals and audio quality, making the world of Pandora more immersive and realistic.
48
-
49
- - Fixes bugs and glitches that affect the gameplay, such as broken quests, missing items, clipping issues, and more.
50
-
51
-
52
-
53
- To use Avatar Friday Patcher v1.1, you need to download it from the official website or a trusted source. Then, you need to extract the files to your game folder and run the patcher.exe file. The patcher will automatically detect your game version and apply the necessary changes. You can also customize some of the options according to your preferences. Once the patching process is done, you can launch the game and enjoy it without any problems.
54
-
55
-
56
-
57
- Avatar Friday Patcher v1.1 is a must-have mod for anyone who wants to play Avatar Friday without any hassle. It will make your gaming experience more enjoyable and satisfying. Download it today and see for yourself!
58
-
59
-
60
-
61
- Avatar Friday Patcher v1.1 is not only a mod that fixes and improves the game, but also a mod that adds new content and features. Here are some of the additional things you can do with Avatar Friday Patcher v1.1:
62
-
63
-
64
-
65
- - Explore new areas and locations that were not included in the original game, such as the Floating Mountains, the Tree of Souls, and the Hallelujah Mountains.
66
-
67
- - Interact with new characters and factions that have their own stories and quests, such as the Na'vi clans, the RDA soldiers, and the wildlife researchers.
68
-
69
- - Customize your avatar's appearance and skills, choosing from different races, genders, hairstyles, outfits, weapons, and abilities.
70
-
71
- - Collect and craft new items and resources, such as plants, minerals, artifacts, and equipment.
72
-
73
- - Ride and tame various creatures that inhabit Pandora, such as banshees, direhorses, thanators, and more.
74
-
75
-
76
-
77
- Avatar Friday Patcher v1.1 is a mod that transforms Avatar Friday into a more complete and satisfying game. It is compatible with most of the other mods available for the game, so you can mix and match them to create your own unique experience. If you are looking for a way to enhance your Avatar Friday adventure, you should definitely give Avatar Friday Patcher v1.1 a try!
78
-
79
- 145887f19f
80
-
81
-
82
-
83
-
84
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Crystal-Cs4280-Cm-Ep-Sound-Card-Driver-FOR-WINDOWS-7181.md DELETED
@@ -1,65 +0,0 @@
1
- Crystal Cs4280 Cm Ep Sound Card Driver FOR WINDOWS 7.181
2
-
3
-
4
-
5
- DOWNLOAD === [https://gohhs.com/2tvp6s](https://gohhs.com/2tvp6s)
6
-
7
-
8
-
9
-
10
-
11
-
12
-
13
-
14
-
15
- Here is a possible title and article for your keyword:
16
-
17
- How to Install Crystal Cs4280 Cm Ep Sound Card Driver for Windows 7.181
18
-
19
- If you have a Crystal Cs4280 Cm Ep sound card and you want to use it with Windows 7.181, you may need to install a driver to make it work properly. A driver is a software that allows your computer to communicate with your hardware devices. Without a driver, your sound card may not function correctly or at all.
20
-
21
- In this article, we will show you how to download and install the Crystal Cs4280 Cm Ep sound card driver for Windows 7.181 in a few easy steps. We will also provide some tips on how to troubleshoot common issues that may arise during or after the installation process.
22
-
23
- Step 1: Download the Crystal Cs4280 Cm Ep Sound Card Driver
24
-
25
- The first step is to download the Crystal Cs4280 Cm Ep sound card driver from a reliable source. You can use one of the following links to download the driver file:
26
-
27
-
28
- Crystal Digital cs4280-cm Drivers Download - Solvusoft [^1^]
29
- Crystal CS4280/CS4614/CS4624 Sound Driver | Crystal Semiconductors [^2^]
30
- Crystal Audio Drivers Cs4280-Cm | Audio-Digital.net [^3^]
31
- Crystal Cs4280 Cm Driver Download Win7 [^4^]
32
-
33
-
34
- Make sure you choose the correct version of the driver that matches your operating system and your sound card model. The file name should be something like d1265070.rar or Crystal_CS4281.zip.
35
-
36
- Save the file to a location where you can easily find it later, such as your desktop or downloads folder.
37
-
38
- Step 2: Extract the Crystal Cs4280 Cm Ep Sound Card Driver File
39
-
40
- The next step is to extract the contents of the driver file that you downloaded. The file is compressed in a .rar or .zip format, so you will need a software that can open and extract these types of files. You can use one of the following programs:
41
-
42
-
43
- WinRAR
44
- 7-Zip
45
- PeaZip
46
-
47
-
48
- Right-click on the driver file and select "Extract Here" or "Extract to" from the menu. Choose a destination folder where you want to extract the files, such as your desktop or downloads folder.
49
-
50
- You should see a folder with the name of the driver file, such as d1265070 or Crystal_CS4281. Open this folder and look for a file named setup.exe or install.exe. This is the executable file that will install the driver on your computer.
51
-
52
- Step 3: Install the Crystal Cs4280 Cm Ep Sound Card Driver
53
-
54
- The final step is to run the setup.exe or install.exe file that you extracted in the previous step. Double-click on this file and follow the instructions on the screen to complete the installation process.
55
-
56
- You may need to agree to some terms and conditions, choose a language and a destination folder, and restart your computer after the installation is finished.
57
-
58
- Once the installation is done, you should be able to use your Crystal Cs4280 Cm Ep sound card with Windows 7.181 without any problems.
59
-
60
- Troubleshooting Tips
61
-
62
- If you encounter any issues during or after installing dfd1c89656
63
-
64
-
65
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Fifa 11 World Cup Patch Update V1.rar How to Get the Most Out of Your Fifa 11 Game.md DELETED
@@ -1,7 +0,0 @@
1
-
2
- <p>Then, from 21st November to 18th December, a new "live" World Cup mode will be updated during the group and knockout stages, letting you play a single-player tournament along with real-world fixtures and squads for each game. You can play any past game to rewrite history and better real-world England's inevitable disappointing result.</p>
3
- <p>EA Game has released FIFA 23 patch 1.04 details for PC, PS4, and Xbox One. According to the official <strong>Fifa 23 patch notes</strong>, the latest update added the <strong>FIFA World Cup 2022</strong> to the game. Apart from this, Fifa 23 update 1.04 also includes stability fixes.</p>
4
- <h2>Fifa 11 World Cup Patch Update V1.rar</h2><br /><p><b><b>Download Zip</b> === <a href="https://imgfil.com/2uxY43">https://imgfil.com/2uxY43</a></b></p><br /><br />
5
- <p>This patch is based on FIFA 17, and will 'update' FIFA 11 to the 2016-17 season. The squads (player stats, team tactics, ...) are exactly same as the FIFA 17 ea' squad updates graphics (kits, shoes, ...) are mostly from fifa 17, combined with files from FIFA online 3 & FIFA 16 mods (fifa online 3 have updated 2014-15 --FIFA 15-- graphics)</p> aaccfb2cb3<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Blue 3 ft. Radio Weasel - Where You Are - Free MP3 and Lyrics Download.md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>How to Download Blue 3's "Where You Are" as an MP3 File</h1>
3
- <p>If you love Ugandan music, you probably know Blue 3, the girl group that rose to fame in 2005 after winning a talent show. And you probably know their hit song "Where You Are", which features Radio and Weasel, another popular duo in the Ugandan music scene.</p>
4
- <p>"Where You Are" is a catchy and romantic song that blends Afrobeat, R&B, and dancehall genres. It has over 20,000 views on YouTube and has been praised by critics and fans alike.</p>
5
- <h2>download blue 3 where you are mp3</h2><br /><p><b><b>DOWNLOAD</b> &#127383; <a href="https://jinyurl.com/2uNSAg">https://jinyurl.com/2uNSAg</a></b></p><br /><br />
6
- <p>But what if you want to listen to this song offline, without any interruptions or ads? What if you want to save some storage space on your device and still enjoy the high-quality sound of this song?</p>
7
- <p>The answer is simple: download "Where You Are" as an MP3 file.</p>
8
- <p>In this article, we'll show you how to do that in a few easy steps. We'll also give you some alternative sources where you can download this song as an MP3 file.</p>
9
- <p>So let's get started!</p>
10
- <h2>What is Blue 3 and "Where You Are"?</h2>
11
- <p>Blue 3 was a Ugandan girl group that consisted of Jackie Chandiru, Lillian Mbabazi, and Cindy Sanyu. They formed in 2005 after winning a talent show called Coca-Cola Popstars.</p>
12
- <p>The group released their debut album "Hitaji" in 2006, which featured songs like "Burrn", "Ndayila", and "Hitaji". They also collaborated with other Ugandan artists like Bebe Cool, Jose Chameleone, and Bobi Wine.</p>
13
- <p>"Where You Are" was one of their most successful songs, released in 2008. It featured Radio and Weasel, who were part of the Goodlyfe Crew at the time. The song was a love ballad that expressed the desire to be with someone no matter where they are.</p>
14
- <p>The song was well-received by both fans and critics, who praised its catchy melody, smooth vocals, and sweet lyrics. It also won several awards, including Song of the Year at the Pearl of Africa Music Awards in 2008.</p>
15
- <p>How to download blue 3 where you are mp3 for free<br />
16
- Download blue 3 where you are mp3 audio<br />
17
- Blue 3 where you are mp3 download Uganda<br />
18
- Where you are by blue 3 ft radio and weasel mp3 download<br />
19
- Download blue 3 where you are mp3 song<br />
20
- Blue 3 where you are mp3 lyrics download<br />
21
- Download blue 3 where you are mp3 video<br />
22
- Blue 3 where you are mp3 online download<br />
23
- Download blue 3 where you are mp3 remix<br />
24
- Blue 3 where you are mp3 instrumental download<br />
25
- Download blue 3 where you are mp3 album<br />
26
- Blue 3 where you are mp3 ringtone download<br />
27
- Download blue 3 where you are mp3 music<br />
28
- Blue 3 where you are mp3 karaoke download<br />
29
- Download blue 3 where you are mp3 version<br />
30
- Blue 3 where you are mp3 live performance download<br />
31
- Download blue 3 where you are mp3 original<br />
32
- Blue 3 where you are mp3 cover download<br />
33
- Download blue 3 where you are mp3 official<br />
34
- Blue 3 where you are mp3 dance download<br />
35
- Download blue 3 where you are mp3 quality<br />
36
- Blue 3 where you are mp3 review download<br />
37
- Download blue 3 where you are mp3 format<br />
38
- Blue 3 where you are mp3 genre download<br />
39
- Download blue 3 where you are mp3 release date<br />
40
- Blue 3 where you are mp3 history download<br />
41
- Download blue 3 where you are mp3 meaning<br />
42
- Blue 3 where you are mp3 reaction download<br />
43
- Download blue 3 where you are mp3 playlist<br />
44
- Blue 3 where you are mp3 streaming download<br />
45
- Download blue 3 where you are mp4 to mp3 converter<br />
46
- Blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue</p>
47
- <h2>Why Download "Where You Are" as an MP3 File?</h2>
48
- <p>Downloading "Where You Are" as an MP3 file has many advantages over streaming it online or playing it from a CD. Here are some of them:</p>
49
- <ul>
50
- <li>You can listen to it offline, without any internet connection or data charges.</li>
51
- <li>You can avoid any ads or interruptions that may ruin your listening experience.</li>
52
- <li>You can save some storage space on your device, as MP3 files are smaller than video or audio files.</li>
53
- <li>You can transfer it to any device that supports MP3 playback, such as your smartphone, tablet, laptop, or MP3 player.</li>
54
- <li>You can enjoy the high-quality sound of the song, as MP3 files preserve the original audio quality.</li>
55
- </ul>
56
- <p>As you can see, downloading "Where You Are" as an MP3 file is a smart and convenient way to enjoy this song anytime, anywhere.</p>
57
- <h2>How to Download "Where You Are" as an MP3 File from YouTube</h2>
58
- <p>One of the easiest ways to download "Where You Are" as an MP3 file is to use YouTube, where you can find the official video of the song. Here are the steps you need to follow:</p>
59
- <ol>
60
- <li>Go to YouTube and search for "Blue 3 Where You Are".</li>
61
- <li>Select the video that has the title "Blue 3 ft Radio & Weasel - Where You Are (Official Video)" and has over 20,000 views. This is the official video of the song.</li>
62
- <li>Copy the URL of the video from the address bar of your browser.</li>
63
- <li>Go to a website that can convert YouTube videos into MP3 files, such as ytmp3.cc, y2mate.com, or onlinevideoconverter.com.</li>
64
- <li>Paste the URL of the video into the input box of the website and click on "Convert" or "Download".</li>
65
- <li>Wait for a few seconds until the conversion is done and then click on "Download" or "Save" to save the MP3 file to your device.</li>
66
- </ol>
67
- <p>Congratulations! You have successfully downloaded "Where You Are" as an MP3 file from YouTube. You can now play it on your device or transfer it to another device.</p>
68
- <h2>How to Download "Where You Are" as an MP3 File from Other Sources</h2>
69
- <p>If you don't want to use YouTube or you want to explore other sources where you can download "Where You Are" as an MP3 file, here are some options you can try:</p>
70
- <table>
71
- <tr>
72
- <th>Source</th>
73
- <th>How to Download</th>
74
- </tr>
75
- <tr>
76
- <td>SoundCloud</td>
77
- <td>Go to soundcloud.com and search for "Blue 3 Where You Are". Select the track that has the title "Blue 3 ft Radio & Weasel - Where You Are (Official Audio)" and has over 1,000 plays. This is the official audio of the song. Click on the "More" button below the track and then click on "Download file". Save the MP3 file to your device.</td>
78
- </tr>
79
- <tr>
80
- <td>Spotify</td>
81
- <td>Go to spotify.com and sign up for a free account or log in if you already have one. Search for "Blue 3 Where You Are". Select the track that has the title "Where You Are (feat. Radio & Weasel)" and has over 10,000 streams. This is the official track of the song. Click on the "..." button next to the track and then click on "Save to Your Library". Go to your library and find the track under "Liked Songs". Click on the "..." button again and then click on "Download". Wait for the download to finish and then play the MP3 file on your device.</td>
82
- </tr>
83
- <tr>
84
- <td>iTunes</td>
85
- <td>Go to itunes.apple.com and search for "Blue 3 Where You Are". Select the track that has the title "Where You Are (feat. Radio & Weasel)" and has a price of $0.99. This is the official track of the song. Click on the "Buy" button and enter your payment details. After purchasing, go to your library and find the track under "Purchased". Click on the "Download" button and save the MP3 file to your device.</td>
86
- </tr>
87
- </table>
88
- <h2>Conclusion</h2>
89
- <p>In this article, we have shown you how to download Blue 3's song "Where You Are" as an MP3 file from various sources. We have also explained why downloading this song as an MP3 file is a good idea.</p>
90
- <p>"Where You Are" is a beautiful song that deserves to be listened to over and over again. By downloading it as an MP3 file, you can enjoy it offline, without ads, and with high-quality sound.</p>
91
- <p>So what are you waiting for? Download "Where You Are" as an MP3 file today and enjoy this Ugandan masterpiece!</p>
92
- <h2>FAQs</h2>
93
- <p>Here are some frequently asked questions and answers about downloading "Where You Are" as an MP3 file:</p>
94
- <h4>Q: Is it legal to download "Where You Are" as an MP3 file?</h4>
95
- <p>A: It depends on where you download it from and how you use it. If you download it from a source that has permission from the artists or the record label, or if you use it for personal and non-commercial purposes, then it is legal. However, if you download it from a source that does not have permission or if you use it for commercial or public purposes, then it is illegal. You should always respect the intellectual property rights of the creators and follow the terms and conditions of the source you download from.</p>
96
- <h4>Q: How can I play "Where You Are" as an MP3 file on my device?</h4>
97
- <p>A: Once you have downloaded "Where You Are" as an MP3 file, you can play it on any device that supports MP3 playback. For example, you can play it on your smartphone using the default music player app or any other app that can play MP3 files. You can also play it on your laptop or desktop computer using a program like Windows Media Player, VLC Media Player, or iTunes. You can also transfer it to an MP3 player or a USB drive and play it on any compatible device.</p>
98
- <h4>Q: How can I share "Where You Are" as an MP3 file with my friends?</h4>
99
- <p>A: If you want to share "Where You Are" as an MP3 file with your friends, you can do so in several ways. For example, you can send it to them via email, WhatsApp, Telegram, or any other messaging app. You can also upload it to a cloud service like Google Drive, Dropbox, or OneDrive and share the link with them. You can also burn it to a CD or copy it to a USB drive and give it to them physically. However, you should always make sure that you have permission from the artists or the record label before sharing their music with others.</p>
100
- <h4>Q: How can I support Blue 3 and their music?</h4>
101
- <p>A: If you love Blue 3 and their music, you can support them in various ways. For example, you can buy their albums or songs from official sources like iTunes, Spotify, or Amazon. You can also stream their music from legal platforms like YouTube, SoundCloud, or Deezer. You can also follow them on social media like Facebook, Twitter, or Instagram and show them some love and appreciation. You can also attend their concerts or events if they are available in your area. By supporting Blue 3 and their music, you are helping them to continue making amazing songs for their fans.</p>
102
- <h4>Q: Where can I find more information about Blue 3 and their music?</h4>
103
- <p>A: If you want to find more information about Blue 3 and their music, you can visit their official website at www.blue3music.com. There you can find their biography, discography, news, photos, videos, and contact details. You can also check out their Wikipedia page at https://en.wikipedia.org/wiki/Blue_3_(group) for more facts and history about them. You can also search for them on Google or any other search engine for more articles and reviews about them.</p> 401be4b1e0<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Geometry Dash Lite APK for Android 2.3 and Enjoy Rhythm-based Action Platforming!.md DELETED
@@ -1,106 +0,0 @@
1
-
2
- <h1>Geometry Dash Lite: A Rhythm-Based Action Platformer for Android 2.3</h1>
3
- <p>If you are looking for a fun and challenging game that will test your reflexes and timing, you might want to try Geometry Dash Lite. Geometry Dash Lite is a free version of the popular game Geometry Dash, which is a rhythm-based action platformer that has millions of fans around the world. In this article, we will tell you what Geometry Dash Lite is, what features it offers, and how to download and install it on your Android device running version 2.3 or higher.</p>
4
- <h2>geometry dash lite apk android 2.3</h2><br /><p><b><b>Download</b> &#10037;&#10037;&#10037; <a href="https://jinyurl.com/2uNRjJ">https://jinyurl.com/2uNRjJ</a></b></p><br /><br />
5
- <h2>What is Geometry Dash Lite?</h2>
6
- <p>Geometry Dash Lite is a game developed by RobTop Games AB, a Swedish game studio that specializes in creating addictive and colorful games. Geometry Dash Lite is a simplified version of Geometry Dash, which has more levels, soundtracks, achievements, and an online level editor. However, Geometry Dash Lite still offers plenty of fun and challenge for casual and hardcore gamers alike.</p>
7
- <h3>Features of Geometry Dash Lite</h3>
8
- <p>Geometry Dash Lite has many features that make it an enjoyable and engaging game. Here are some of them:</p>
9
- <h4>Rhythm-based action platforming</h4>
10
- <p>The core gameplay of Geometry Dash Lite is based on jumping, flying, and flipping your way through dangerous passages and spiky obstacles. You have to tap the screen at the right moment to avoid crashing and losing. The game is synchronized with the music, so you have to follow the rhythm and the beat to succeed. The game is fast-paced and requires quick reflexes and concentration.</p>
11
- <h4>Customization options</h4>
12
- <p>You can customize your character in Geometry Dash Lite by unlocking new icons and colors. You can also choose from different vehicles, such as rockets, gravity balls, UFOs, and more. You can mix and match different combinations to create your own unique style.</p>
13
- <h4>Various game modes and levels</h4>
14
- <p>Geometry Dash Lite has several game modes to keep you entertained for hours. You can play the normal mode, where you have to complete the levels in order. You can also play the practice mode, where you can set checkpoints and practice your skills. You can also play the challenge mode, where you have to complete random levels with increasing difficulty. The game has 13 levels in total, each with its own soundtrack and theme.</p>
15
- <h2>How to download and install Geometry Dash Lite apk for Android 2.3?</h2>
16
- <p>If you want to play Geometry Dash Lite on your Android device running version 2.3 or higher, you will need to download and install the apk file of the game. An apk file is a package file that contains all the necessary files and data for an app to run on your device. Here are the requirements and steps to download and install Geometry Dash Lite apk:</p>
17
- <h3>Requirements for Geometry Dash Lite apk</h3>
18
- <p>Before you download and install Geometry Dash Lite apk, you need to make sure that your device meets the following requirements:</p>
19
- <h4>Android version</h4>
20
- <p>Your device must have Android version 2.3 or higher to run Geometry Dash Lite apk. You can check your device's Android version by going to Settings > About phone > Software information.</p>
21
- <p>geometry dash lite apk download for android 2.3<br />
22
- geometry dash lite 2.2 apk android 2.3<br />
23
- geometry dash lite mod apk android 2.3<br />
24
- geometry dash lite full version apk android 2.3<br />
25
- geometry dash lite hack apk android 2.3<br />
26
- geometry dash lite free apk android 2.3<br />
27
- geometry dash lite latest apk android 2.3<br />
28
- geometry dash lite old version apk android 2.3<br />
29
- geometry dash lite unlimited apk android 2.3<br />
30
- geometry dash lite offline apk android 2.3<br />
31
- geometry dash lite app for android 2.3<br />
32
- geometry dash lite game for android 2.3<br />
33
- geometry dash lite update for android 2.3<br />
34
- geometry dash lite cheats for android 2.3<br />
35
- geometry dash lite tips for android 2.3<br />
36
- geometry dash lite guide for android 2.3<br />
37
- geometry dash lite levels for android 2.3<br />
38
- geometry dash lite songs for android 2.3<br />
39
- geometry dash lite icons for android 2.3<br />
40
- geometry dash lite skins for android 2.3<br />
41
- geometry dash lite online for android 2.3<br />
42
- geometry dash lite play store for android 2.3<br />
43
- geometry dash lite filehippo for android 2.3<br />
44
- geometry dash lite robtop games for android 2.3<br />
45
- geometry dash lite rhythm-based action platformer for android 2.3<br />
46
- how to install geometry dash lite on android 2.3<br />
47
- how to play geometry dash lite on android 2.3<br />
48
- how to update geometry dash lite on android 2.3<br />
49
- how to hack geometry dash lite on android 2.3<br />
50
- how to unlock all levels in geometry dash lite on android 2.3<br />
51
- how to get more icons in geometry dash lite on android 2.3<br />
52
- how to change the music in geometry dash lite on android 2.3<br />
53
- how to create your own level in geometry dash lite on android 2.3<br />
54
- how to beat theory of everything in geometry dash lite on android 2.3<br />
55
- how to remove ads in geometry dash lite on android 2.3<br />
56
- is geometry dash lite compatible with android 2.3<br />
57
- is geometry dash lite safe for android 2.3<br />
58
- is geometry dash lite fun for android 2.3<br />
59
- is geometry dash lite hard for android 2.3<br />
60
- is geometry dash lite worth it for android 2.3<br />
61
- what is the difference between geometry dash and geometry dash lite on android 2.3<br />
62
- what is the best strategy for geometry dash lite on android 2.3<br />
63
- what is the highest score in geometry dash lite on android 2.3<br />
64
- what is the easiest level in geometry dash lite on android 2.3<br />
65
- what is the hardest level in geometry dash lite on android 2.3<br />
66
- why is geometry dash lite so popular on android 2.3<br />
67
- why is geometry dash lite so addictive on android 2.3<br />
68
- why is geometry dash lite so challenging on android 2.3<br />
69
- why does geometry dash lite crash on android 2.3</p>
70
- <h4>Storage space</h4>
71
- <p>You need to have enough free storage space on your device to download and install Geometry Dash Lite apk. The size of the apk file is about 50 MB, so you need at least 100 MB of free space to avoid any errors or issues.</p>
72
- <h4>Permissions</h4> <p>You also need to grant some permissions to Geometry Dash Lite apk to run properly on your device. The permissions are:</p>
73
- <ul>
74
- <li>Full network access: This allows the game to access the internet and download additional data.</li>
75
- <li>View network connections: This allows the game to check the status of your network connection and optimize the performance.</li>
76
- <li>Modify or delete the contents of your USB storage: This allows the game to save your progress and settings on your device.</li>
77
- </ul>
78
- <p>You can review and manage these permissions by going to Settings > Apps > Geometry Dash Lite > Permissions.</p>
79
- <h3>Steps to download and install Geometry Dash Lite apk</h3>
80
- <p>After you have checked the requirements, you can follow these steps to download and install Geometry Dash Lite apk on your device:</p>
81
- <h4>Download the apk file from a trusted source</h4>
82
- <p>The first step is to download the apk file of Geometry Dash Lite from a reliable and secure source. You can use your browser or a third-party app store to find and download the apk file. However, you need to be careful and avoid any malicious or fake links that might harm your device or steal your data. You can use this link to download the latest version of Geometry Dash Lite apk from APKPure, a trusted and verified app store.</p>
83
- <h4>Enable unknown sources in your device settings</h4>
84
- <p>The next step is to enable unknown sources in your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on. You might see a warning message that installing apps from unknown sources might be risky, but you can ignore it if you trust the source of the apk file.</p>
85
- <h4>Locate and install the apk file</h4>
86
- <p>The third step is to locate and install the apk file on your device. You can use a file manager app or your browser's downloads folder to find the apk file. Once you find it, tap on it and follow the instructions on the screen to install it. You might see a confirmation message that asks you if you want to install this app, just tap on Install and wait for the process to finish.</p>
87
- <h4>Launch and enjoy the game</h4>
88
- <p>The final step is to launch and enjoy the game. You can find the Geometry Dash Lite icon on your home screen or app drawer. Tap on it and start playing the game. You can adjust the settings, choose a level, customize your character, and have fun with the rhythm-based action platforming.</p>
89
- <h2>Conclusion</h2>
90
- <p>Geometry Dash Lite is a great game for anyone who loves music, action, and challenge. It is a free version of Geometry Dash, which has more features and content. However, Geometry Dash Lite still offers plenty of fun and excitement for casual and hardcore gamers alike. You can download and install Geometry Dash Lite apk on your Android device running version 2.3 or higher by following the steps we have explained in this article. We hope you enjoy playing Geometry Dash Lite and have a blast with the rhythm-based action platforming.</p>
91
- <h3>FAQs</h3>
92
- <ul>
93
- <li><b>What is the difference between Geometry Dash Lite and Geometry Dash?</b><br>
94
- Geometry Dash Lite is a free version of Geometry Dash, which is a paid game that costs $1.99. Geometry Dash Lite has fewer levels, soundtracks, achievements, and features than Geometry Dash. However, Geometry Dash Lite still offers plenty of fun and challenge for casual and hardcore gamers alike.</li>
95
- <li><b>Is Geometry Dash Lite safe to download and install?</b><br>
96
- Yes, Geometry Dash Lite is safe to download and install if you use a trusted and verified source like APKPure. However, you need to be careful and avoid any malicious or fake links that might harm your device or steal your data.</li>
97
- <li><b>Can I play Geometry Dash Lite offline?</b><br>
98
- Yes, you can play Geometry Dash Lite offline without an internet connection. However, you will need an internet connection to download additional data or access some online features like leaderboards or achievements.</li>
99
- <li><b>How can I unlock more icons and colors in Geometry Dash Lite?</b><br>
100
- You can unlock more icons and colors in Geometry Dash Lite by completing levels, collecting stars, completing achievements, or using secret coins. You can also use some codes in the vault to unlock some special icons.</li>
101
- <li><b>How can I create my own levels in Geometry Dash Lite?</b><br>
102
- You cannot create your own levels in Geometry Dash Lite as this feature is only available in Geometry Dash. However, you can play some user-created levels in challenge mode by tapping on the dice icon in the main menu.</li>
103
- </ul>
104
- <p></</p> 401be4b1e0<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Euphoria Season 1 Download Where to Find the Full Episodes Online.md DELETED
@@ -1,131 +0,0 @@
1
-
2
- <h1>Download Euphoria Season 1 Reddit: How to Watch the Hit HBO Series Online</h1>
3
- <p>If you are looking for a way to download Euphoria season 1 reddit, you are not alone. Euphoria is one of the most popular and acclaimed shows of recent years, and many people want to watch it online. But how can you download Euphoria season 1 reddit safely and legally? And what are the pros and cons of doing so? In this article, we will answer these questions and more.</p>
4
- <h2>download euphoria season 1 reddit</h2><br /><p><b><b>Download File</b> &#10042;&#10042;&#10042; <a href="https://jinyurl.com/2uNJMt">https://jinyurl.com/2uNJMt</a></b></p><br /><br />
5
- <h2>What is Euphoria and why should you watch it?</h2>
6
- <p>Euphoria is a drama series that follows a group of high-school students as they navigate a minefield of drugs, sex, identity, trauma, social media, love and friendship in today's increasingly unstable world. The show stars Zendaya as Rue, a 17-year-old drug addict who falls in love with Jules, a transgender girl played by Hunter Schafer. The show also features other talented actors such as Sydney Sweeney, Maude Apatow, Jacob Elordi, Alexa Demie, Barbie Ferreira, Algee Smith, Storm Reid, Angus Cloud, Eric Dane, Nika King and Colman Domingo.</p>
7
- <h3>A brief summary of Euphoria season 1</h3>
8
- <p>Euphoria season 1 consists of eight episodes that aired on HBO from June to August 2019. The season also has two special episodes that were released in December 2020 and January 2021. Here is a brief summary of what happens in each episode:</p>
9
- <ul>
10
- <li>Episode 1: Pilot. Rue returns home from rehab with no plans to stay clean. She meets Jules at a party where she is harassed by Nate, a local jock and bully.</li>
11
- <li>Episode 2: Stuntin' Like My Daddy. Rue tries to get clean for Jules but struggles to put the past behind her. Jules hooks up with an older man who turns out to be Nate's father.</li>
12
- <li>Episode 3: Made You Look. Kat starts camming; Jules falls for a boy online; Rue is confronted at NA; Cassie visits McKay at college.</li>
13
- <li>Episode 4: Shook Ones Pt. II. Rue and Jules attend a carnival where they encounter Nate and his father. Nate blackmails Jules with her nude pictures.</li>
14
- <li>Episode 5: '03 Bonnie and Clyde. Maddy and Nate must deal with a police investigation; Rue and Jules each reflect on their relationship.</li>
15
- <li>Episode 6: The Next Episode. On Halloween, Rue worries about her reliance on Jules when she starts acting strange.</li>
16
- <li>Episode 7: The Trials and Tribulations of Trying to Pee While Depressed. Rue gets depressed and watches 22 straight episodes of a British reality show. Jules visits an old friend.</li>
17
- <li>Episode 8: And Salt the Earth Behind You. In the season finale, Rue suggests to Jules that they run away together. Jules leaves but Rue stays behind and relapses.</li>
18
- <li <p>Episode 9: Trouble Don't Last Always. This is the first special episode that focuses on Rue as she celebrates Christmas with her family and talks to her sponsor Ali.</p>
19
- <p>Episode 10: F*ck Anyone Who's Not A Sea Blob. This is the second special episode that focuses on Jules as she reflects on her year and attends a therapy session.</p>
20
- <h3>The main characters and their stories</h3>
21
- <p>Euphoria features a diverse and complex cast of characters, each with their own struggles and secrets. Here are some of the main characters and their stories:</p>
22
- <ul>
23
- <li>Rue Bennett (Zendaya): The protagonist and narrator of the show, Rue is a troubled teenager who suffers from bipolar disorder, anxiety, depression and drug addiction. She is in love with Jules but also fears losing her.</li>
24
- <li>Jules Vaughn (Hunter Schafer): The deuteragonist and Rue's love interest, Jules is a transgender girl who moved to town with her father after her mother abandoned her. She is adventurous, optimistic and fearless, but also vulnerable and lonely.</li>
25
- <li>Nate Jacobs (Jacob Elordi): The main antagonist of the show, Nate is a violent and manipulative jock who has a toxic relationship with Maddy. He is also closeted and struggles with his sexuality and identity.</li>
26
- <li>Maddy Perez (Alexa Demie): Nate's girlfriend and Cassie's best friend, Maddy is a confident and outspoken cheerleader who likes to be the center of attention. She is often abused by Nate but still loves him.</li>
27
- <li>Kat Hernandez (Barbie Ferreira): Rue's childhood friend and Ethan's girlfriend, Kat is a smart and sarcastic girl who undergoes a transformation after losing her virginity. She becomes a cam girl and embraces her sexuality and body positivity.</li>
28
- <li>Cassie Howard (Sydney Sweeney): Maddy's best friend and McKay's girlfriend, Cassie is a sweet and naive girl who has a reputation for being promiscuous. She gets pregnant by McKay but decides to have an abortion.</li>
29
- <li>Christopher McKay (Algee Smith): Cassie's boyfriend and Nate's friend, McKay is a former football star who attends college on a scholarship. He is insecure about his future and his relationship with Cassie.</li>
30
- <li>Lexi Howard (Maude Apatow): Cassie's younger sister and Rue's friend, Lexi is a loyal and supportive girl who often feels overlooked by others. She has a crush on Rue but never acts on it.</li>
31
- <li>Fezco (Angus Cloud): Rue's drug dealer and friend, Fezco is a kind-hearted and protective guy who cares about Rue's well-being. He has a tense relationship with Nate and his father.</li>
32
- <li>Cal Jacobs (Eric Dane): Nate's father and Jules' lover, Cal is a successful businessman who leads a double life. He has sex with young trans women in motels while hiding his true self from his family.</li>
33
- </ul>
34
- <h3>The critical acclaim and awards</h3>
35
- <p>Euphoria has received widespread praise from critics and audiences alike for its realistic and unflinching portrayal of teenage life, its stunning cinematography and soundtrack, its diverse and talented cast, and its powerful performances by Zendaya and Hunter Schafer. The show has also won several awards, including:</p>
36
- <p>download euphoria season 1 episodes free<br />
37
- watch euphoria season 1 online without hbo max<br />
38
- euphoria season 1 google drive link<br />
39
- euphoria season 1 index of series<br />
40
- euphoria season 1 torrent download<br />
41
- euphoria season 1 streaming sites reddit<br />
42
- euphoria season 1 cuevana3<br />
43
- euphoria season 1 soap2day<br />
44
- euphoria season 1 flixtor<br />
45
- euphoria season 1 bflix.to<br />
46
- euphoria season 1 amazon prime<br />
47
- euphoria season 1 nowtv<br />
48
- euphoria season 1 crave on demand<br />
49
- euphoria season 1 filmlicious<br />
50
- euphoria season 1 yourmovies.xyz<br />
51
- euphoria season 1 justwatch.one<br />
52
- euphoria season 1 jistoooiekttes.com<br />
53
- euphoria season 1 movies2watch.tv<br />
54
- euphoria season 1 series.movie<br />
55
- download euphoria specials rue and jules<br />
56
- download euphoria season 2 reddit<br />
57
- download euphoria soundtrack reddit<br />
58
- download euphoria subtitles reddit<br />
59
- download euphoria scripts reddit<br />
60
- download euphoria behind the scenes reddit<br />
61
- download euphoria cast interviews reddit<br />
62
- download euphoria fan edits reddit<br />
63
- download euphoria fan art reddit<br />
64
- download euphoria fan fiction reddit<br />
65
- download euphoria memes reddit<br />
66
- download zendaya's performance in euphoria reddit<br />
67
- download hunter schafer's performance in euphoria reddit<br />
68
- download jacob elordi's performance in euphoria reddit<br />
69
- download barbie ferreira's performance in euphoria reddit<br />
70
- download sydney sweeney's performance in euphoria reddit<br />
71
- download maude apatow's performance in euphoria reddit<br />
72
- download angus cloud's performance in euphoria reddit<br />
73
- download alexa demie's performance in euphoria reddit<br />
74
- download algee smith's performance in euphoria reddit<br />
75
- download storm reid's performance in euphoria reddit</p>
76
- <ul>
77
- <li>The Primetime Emmy Award for Outstanding Lead Actress in a Drama Series for Zendaya in 2020</li>
78
- <li>The Satellite Award for Best Actress in a Drama / Genre Series for Zendaya in 2020</li>
79
- <li>The People's Choice Award for The Drama TV Star of 2020 for Zendaya in 2020</li>
80
- <li>The GLAAD Media Award for Outstanding Drama Series in 2020</li>
81
- <li>The Critics' Choice Television Award for Best Supporting Actress in a Drama Series for Hunter Schafer in 2021</li>
82
- </ul>
83
- <h2>How to download Euphoria season 1 reddit</h2>
84
- <p>If you want to watch Euphoria season 1 online, you have two options: the official ways or the unofficial ways. The official ways are the legal and authorized methods to stream the show from HBO or other platforms that have the rights to distribute it. The unofficial ways are the illegal or unauthorized methods to download the show from torrent sites, streaming sites or reddit links. Let's take a look at each option in more detail.</p>
85
- <h3>The official ways to stream Euphoria season 1 online</h3>
86
- <p>The official ways to stream Euphoria season 1 online are the safest and most reliable methods to watch the show without any hassle or risk. However, they may also require you to pay a subscription fee or have access to certain devices or regions. Here are some of the official ways to stream Euphoria season 1 online:</ <h4>HBO.com</h4>
87
- <p>The most obvious and direct way to stream Euphoria season 1 online is to visit the official website of HBO, the network that produces and airs the show. You can watch all the episodes of Euphoria season 1 on HBO.com, as well as the two special episodes and other bonus content. However, you will need to have an HBO subscription or a free trial to access the content. You can also use your HBO account to watch the show on other devices, such as your TV, smartphone, tablet or laptop, through the HBO app or HBO Max app.</p>
88
- <h4>Hulu</h4>
89
- <p>Another official way to stream Euphoria season 1 online is to use Hulu, a popular streaming service that offers a variety of TV shows and movies. You can watch all the episodes of Euphoria season 1 on Hulu, as well as the two special episodes and other bonus content. However, you will need to have a Hulu subscription or a free trial to access the content. You can also use your Hulu account to watch the show on other devices, such as your TV, smartphone, tablet or laptop, through the Hulu app.</p>
90
- <h4>JustWatch</h4>
91
- <p>A third official way to stream Euphoria season 1 online is to use JustWatch, a website that helps you find where to watch your favorite shows and movies online. You can search for Euphoria season 1 on JustWatch and see which platforms offer the show in your region. You can also compare the prices and features of each platform and choose the best option for you. JustWatch also provides links to the platforms where you can watch the show online.</p>
92
- <h3>The unofficial ways to download Euphoria season 1 reddit</h3>
93
- <p>The unofficial ways to download Euphoria season 1 reddit are the risky and illegal methods to watch the show without paying or following the rules. However, they may also provide you with free or cheap access to the show and allow you to watch it offline or share it with others. Here are some of the unofficial ways to download Euphoria season 1 reddit:</p> <h4>Torrent sites</h4>
94
- <p>One of the most common unofficial ways to download Euphoria season 1 reddit is to use torrent sites, such as The Pirate Bay, RARBG, 1337x, YTS, EZTV and others. Torrent sites are websites that allow users to share files through peer-to-peer networks. You can download Euphoria season 1 reddit by finding a torrent file that contains the episodes and using a torrent client, such as BitTorrent, uTorrent, qBittorrent or others, to download the file to your device. However, you should be aware that torrenting is illegal in many countries and can expose you to legal actions, fines or even jail time. You should also be careful of malware, viruses or fake files that can harm your device or steal your data.</p>
95
- <h4>Streaming sites</h4>
96
- <p>Another unofficial way to download Euphoria season 1 reddit is to use streaming sites, such as Putlocker, Fmovies, 123movies, Solarmovie, Gomovies and others. Streaming sites are websites that allow users to watch videos online without downloading them. You can watch Euphoria season 1 reddit by finding a streaming site that hosts the episodes and clicking on the play button. However, you should be aware that streaming is also illegal in many countries and can expose you to legal actions, fines or even jail time. You should also be careful of pop-up ads, redirects or phishing attempts that can annoy you or compromise your security.</p>
97
- <h4>Reddit links</h4>
98
- <p>A third unofficial way to download Euphoria season 1 reddit is to use reddit links, such as r/EuphoriaHBO, r/EuphoriaSeason1 or r/EuphoriaDownload. Reddit links are posts or comments on reddit that provide links to download or stream Euphoria season 1 reddit from other sources. You can download Euphoria season 1 reddit by finding a reddit link that has a working and reliable link and following the instructions. However, you should be aware that reddit links are also illegal in many countries and can expose you to legal actions, fines or even jail time. You should also be careful of broken links, low-quality videos or spam messages that can waste your time or mislead you.</p>
99
- <h2>The pros and cons of downloading Euphoria season 1 reddit</h2>
100
- <p>Downloading Euphoria season 1 reddit has its advantages and disadvantages. Here are some of the pros and cons of downloading Euphoria season 1 reddit:</p>
101
- <h3>The pros of downloading Euphoria season 1 reddit</h3>
102
- <p>Some of the benefits of downloading Euphoria season 1 reddit are:</p>
103
- <ul>
104
- <li>Free or cheap access to the show: You can watch Euphoria season 1 reddit without paying a subscription fee or buying a DVD. You can also find discounts or deals on some platforms.</li>
105
- <li>Offline viewing and sharing options: You can watch Euphoria season 1 reddit anytime and anywhere without an internet connection. You can also share the show with your friends or family.</li>
106
- <li>No ads or interruptions: You can watch Euphoria season 1 reddit without any annoying ads or interruptions that can ruin your experience.</li>
107
- </ul>
108
- <h3>The cons of downloading Euphoria season 1 reddit</h3>
109
- <p>Some of the drawbacks of downloading Euphoria season 1 reddit are:</p>
110
- <ul>
111
- <li>Legal and ethical issues: You can face legal actions, fines or even jail time for downloading Euphoria season 1 reddit illegally. You can also harm the creators and producers of the show by depriving them of their rightful income.</li>
112
- <li>Quality and security risks: You can encounter low-quality videos, malware, viruses or fake files that can damage your device or steal your data.</li>
113
- <li>Missing out on bonus content and updates: You can miss out on the bonus content and updates that are available on the official platforms, such as behind-the-scenes footage, interviews, trailers, teasers, news and more.</li>
114
- </ul>
115
- <h2>Conclusion: Download Euphoria season 1 reddit at your own risk</h2>
116
- <p>Euphoria is a captivating and compelling show that explores the dark and complex realities of teenage life in today's world. The show has received rave reviews from critics and audiences alike for its stunning visuals, powerful performances and gripping stories. If you want to watch Euphoria season 1 online, you have two options: the official ways or the unofficial ways. The official ways are the legal and authorized methods to stream the show from HBO or other platforms that have the rights to distribute it. The unofficial ways are the illegal or unauthorized methods to download the show from torrent sites, streaming sites or reddit links. Each option has its pros and cons that you should weigh carefully before making your decision. Downloading Euphoria season season 1 reddit can be tempting, but it also comes with many risks and challenges. You should download Euphoria season 1 reddit at your own risk and responsibility. <p>Here are some FAQs that you may have about downloading Euphoria season 1 reddit:</p>
117
- <h4>FAQs</h4>
118
- <ol>
119
- <li>Is Euphoria season 1 available on Netflix?</li>
120
- <p>No, Euphoria season 1 is not available on Netflix. The show is exclusive to HBO and its affiliated platforms.</p>
121
- <li>Is Euphoria season 1 worth watching?</li>
122
- <p>Yes, Euphoria season 1 is worth watching. The show is a captivating and compelling drama that explores the dark and complex realities of teenage life in today's world. The show has received rave reviews from critics and audiences alike for its stunning visuals, powerful performances and gripping stories.</p>
123
- <li>Is Euphoria season 1 suitable for all ages?</li>
124
- <p>No, Euphoria season 1 is not suitable for all ages. The show contains graphic scenes of violence, sex, nudity, drug use, language and other mature themes that may be disturbing or inappropriate for younger or sensitive viewers. The show is rated TV-MA for mature audiences only.</p>
125
- <li>When will Euphoria season 2 come out?</li>
126
- <p>Euphoria season 2 does not have a confirmed release date yet. The show was renewed for a second season in July 2019, but the production was delayed due to the COVID-19 pandemic. The show is expected to resume filming in early 2021 and release later in the year or in early 2022.</p>
127
- <li>How can I support the creators and producers of Euphoria?</li>
128
- <p>You can support the creators and producers of Euphoria by watching the show legally and ethically on the official platforms, such as HBO.com, Hulu or JustWatch. You can also buy the DVD or Blu-ray of the show, subscribe to the HBO newsletter or social media accounts, or donate to the charities or causes that the show supports.</p>
129
- </ol></p> 197e85843d<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/ddpm/pipeline_ddpm.py DELETED
@@ -1,108 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import List, Optional, Tuple, Union
17
-
18
- import paddle
19
-
20
- from ...configuration_utils import FrozenDict
21
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
- from ...utils import deprecate
23
-
24
-
25
- class DDPMPipeline(DiffusionPipeline):
26
- r"""
27
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
28
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
29
-
30
- Parameters:
31
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
32
- scheduler ([`SchedulerMixin`]):
33
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
34
- [`DDPMScheduler`], or [`DDIMScheduler`].
35
- """
36
-
37
- def __init__(self, unet, scheduler):
38
- super().__init__()
39
- self.register_modules(unet=unet, scheduler=scheduler)
40
-
41
- @paddle.no_grad()
42
- def __call__(
43
- self,
44
- batch_size: int = 1,
45
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
46
- num_inference_steps: int = 1000,
47
- output_type: Optional[str] = "pil",
48
- return_dict: bool = True,
49
- **kwargs,
50
- ) -> Union[ImagePipelineOutput, Tuple]:
51
- r"""
52
- Args:
53
- batch_size (`int`, *optional*, defaults to 1):
54
- The number of images to generate.
55
- generator (`paddle.Generator`, *optional*):
56
- One or a list of paddle generator(s) to make generation deterministic.
57
- num_inference_steps (`int`, *optional*, defaults to 1000):
58
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
59
- expense of slower inference.
60
- output_type (`str`, *optional*, defaults to `"pil"`):
61
- The output format of the generate image. Choose between
62
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
63
- return_dict (`bool`, *optional*, defaults to `True`):
64
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
65
-
66
- Returns:
67
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
68
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
69
- generated images.
70
- """
71
- message = (
72
- "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler ="
73
- " DDPMScheduler.from_pretrained(<model_id>, prediction_type='epsilon')`."
74
- )
75
- predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs)
76
-
77
- if predict_epsilon is not None:
78
- new_config = dict(self.scheduler.config)
79
- new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample"
80
- self.scheduler._internal_dict = FrozenDict(new_config)
81
-
82
- # Sample gaussian noise to begin loop
83
- if isinstance(self.unet.sample_size, int):
84
- image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size)
85
- else:
86
- image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size)
87
-
88
- image = paddle.randn(image_shape, generator=generator)
89
-
90
- # set step values
91
- self.scheduler.set_timesteps(num_inference_steps)
92
-
93
- for t in self.progress_bar(self.scheduler.timesteps):
94
- # 1. predict noise model_output
95
- model_output = self.unet(image, t).sample
96
-
97
- # 2. compute previous image: x_t -> x_t-1
98
- image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample
99
-
100
- image = (image / 2 + 0.5).clip(0, 1)
101
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
102
- if output_type == "pil":
103
- image = self.numpy_to_pil(image)
104
-
105
- if not return_dict:
106
- return (image,)
107
-
108
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44brabal/runwayml-stable-diffusion-v1-5/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Runwayml Stable Diffusion V1 5
3
- emoji: 🏃
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.45.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/ui/badge.tsx DELETED
@@ -1,36 +0,0 @@
1
- import * as React from 'react'
2
- import { cva, type VariantProps } from 'class-variance-authority'
3
-
4
- import { cn } from '@/lib/utils'
5
-
6
- const badgeVariants = cva(
7
- 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2',
8
- {
9
- variants: {
10
- variant: {
11
- default:
12
- 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80',
13
- secondary:
14
- 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80',
15
- destructive:
16
- 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80',
17
- outline: 'text-foreground'
18
- }
19
- },
20
- defaultVariants: {
21
- variant: 'default'
22
- }
23
- }
24
- )
25
-
26
- export interface BadgeProps
27
- extends React.HTMLAttributes<HTMLDivElement>,
28
- VariantProps<typeof badgeVariants> {}
29
-
30
- function Badge({ className, variant, ...props }: BadgeProps) {
31
- return (
32
- <div className={cn(badgeVariants({ variant }), className)} {...props} />
33
- )
34
- }
35
-
36
- export { Badge, badgeVariants }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZ2H/02-Gradio-Art-From-Text-And-Images/app.py DELETED
@@ -1,224 +0,0 @@
1
- import os
2
-
3
- os.system("git clone --recursive https://github.com/JD-P/cloob-latent-diffusion")
4
- os.system("cd cloob-latent-diffusion;pip install omegaconf pillow pytorch-lightning einops wandb ftfy regex ./CLIP")
5
-
6
- import argparse
7
- from functools import partial
8
- from pathlib import Path
9
- import sys
10
- sys.path.append('./cloob-latent-diffusion')
11
- sys.path.append('./cloob-latent-diffusion/cloob-training')
12
- sys.path.append('./cloob-latent-diffusion/latent-diffusion')
13
- sys.path.append('./cloob-latent-diffusion/taming-transformers')
14
- sys.path.append('./cloob-latent-diffusion/v-diffusion-pytorch')
15
- from omegaconf import OmegaConf
16
- from PIL import Image
17
- import torch
18
- from torch import nn
19
- from torch.nn import functional as F
20
- from torchvision import transforms
21
- from torchvision.transforms import functional as TF
22
- from tqdm import trange
23
- from CLIP import clip
24
- from cloob_training import model_pt, pretrained
25
- import ldm.models.autoencoder
26
- from diffusion import sampling, utils
27
- import train_latent_diffusion as train
28
- from huggingface_hub import hf_hub_url, cached_download
29
- import random
30
-
31
- # Download the model files
32
- checkpoint = cached_download(hf_hub_url("huggan/distill-ccld-wa", filename="model_student.ckpt"))
33
- ae_model_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.ckpt"))
34
- ae_config_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.yaml"))
35
-
36
- # Define a few utility functions
37
-
38
-
39
- def parse_prompt(prompt, default_weight=3.):
40
- if prompt.startswith('http://') or prompt.startswith('https://'):
41
- vals = prompt.rsplit(':', 2)
42
- vals = [vals[0] + ':' + vals[1], *vals[2:]]
43
- else:
44
- vals = prompt.rsplit(':', 1)
45
- vals = vals + ['', default_weight][len(vals):]
46
- return vals[0], float(vals[1])
47
-
48
-
49
- def resize_and_center_crop(image, size):
50
- fac = max(size[0] / image.size[0], size[1] / image.size[1])
51
- image = image.resize((int(fac * image.size[0]), int(fac * image.size[1])), Image.LANCZOS)
52
- return TF.center_crop(image, size[::-1])
53
-
54
-
55
- # Load the models
56
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
57
- print('Using device:', device)
58
- print('loading models')
59
-
60
- # autoencoder
61
- ae_config = OmegaConf.load(ae_config_path)
62
- ae_model = ldm.models.autoencoder.AutoencoderKL(**ae_config.model.params)
63
- ae_model.eval().requires_grad_(False).to(device)
64
- ae_model.load_state_dict(torch.load(ae_model_path))
65
- n_ch, side_y, side_x = 4, 32, 32
66
-
67
- # diffusion model
68
- model = train.DiffusionModel(192, [1,1,2,2], autoencoder_scale=torch.tensor(4.3084))
69
- model.load_state_dict(torch.load(checkpoint, map_location='cpu'))
70
- model = model.to(device).eval().requires_grad_(False)
71
-
72
- # CLOOB
73
- cloob_config = pretrained.get_config('cloob_laion_400m_vit_b_16_16_epochs')
74
- cloob = model_pt.get_pt_model(cloob_config)
75
- checkpoint = pretrained.download_checkpoint(cloob_config)
76
- cloob.load_state_dict(model_pt.get_pt_params(cloob_config, checkpoint))
77
- cloob.eval().requires_grad_(False).to(device)
78
-
79
-
80
- # The key function: returns a list of n PIL images
81
- def generate(n=1, prompts=['a red circle'], images=[], seed=42, steps=15,
82
- method='plms', eta=None):
83
- zero_embed = torch.zeros([1, cloob.config['d_embed']], device=device)
84
- target_embeds, weights = [zero_embed], []
85
-
86
- for prompt in prompts:
87
- txt, weight = parse_prompt(prompt)
88
- target_embeds.append(cloob.text_encoder(cloob.tokenize(txt).to(device)).float())
89
- weights.append(weight)
90
-
91
- for prompt in images:
92
- path, weight = parse_prompt(prompt)
93
- img = Image.open(utils.fetch(path)).convert('RGB')
94
- clip_size = cloob.config['image_encoder']['image_size']
95
- img = resize_and_center_crop(img, (clip_size, clip_size))
96
- batch = TF.to_tensor(img)[None].to(device)
97
- embed = F.normalize(cloob.image_encoder(cloob.normalize(batch)).float(), dim=-1)
98
- target_embeds.append(embed)
99
- weights.append(weight)
100
-
101
- weights = torch.tensor([1 - sum(weights), *weights], device=device)
102
-
103
- torch.manual_seed(seed)
104
-
105
- def cfg_model_fn(x, t):
106
- n = x.shape[0]
107
- n_conds = len(target_embeds)
108
- x_in = x.repeat([n_conds, 1, 1, 1])
109
- t_in = t.repeat([n_conds])
110
- clip_embed_in = torch.cat([*target_embeds]).repeat_interleave(n, 0)
111
- vs = model(x_in, t_in, clip_embed_in).view([n_conds, n, *x.shape[1:]])
112
- v = vs.mul(weights[:, None, None, None, None]).sum(0)
113
- return v
114
-
115
- def run(x, steps):
116
- if method == 'ddpm':
117
- return sampling.sample(cfg_model_fn, x, steps, 1., {})
118
- if method == 'ddim':
119
- return sampling.sample(cfg_model_fn, x, steps, eta, {})
120
- if method == 'prk':
121
- return sampling.prk_sample(cfg_model_fn, x, steps, {})
122
- if method == 'plms':
123
- return sampling.plms_sample(cfg_model_fn, x, steps, {})
124
- if method == 'pie':
125
- return sampling.pie_sample(cfg_model_fn, x, steps, {})
126
- if method == 'plms2':
127
- return sampling.plms2_sample(cfg_model_fn, x, steps, {})
128
- assert False
129
-
130
- batch_size = n
131
- x = torch.randn([n, n_ch, side_y, side_x], device=device)
132
- t = torch.linspace(1, 0, steps + 1, device=device)[:-1]
133
- steps = utils.get_spliced_ddpm_cosine_schedule(t)
134
- pil_ims = []
135
- for i in trange(0, n, batch_size):
136
- cur_batch_size = min(n - i, batch_size)
137
- out_latents = run(x[i:i+cur_batch_size], steps)
138
- outs = ae_model.decode(out_latents * torch.tensor(2.55).to(device))
139
- for j, out in enumerate(outs):
140
- pil_ims.append(utils.to_pil_image(out))
141
-
142
- return pil_ims
143
-
144
-
145
- import gradio as gr
146
-
147
- def gen_ims(prompt, im_prompt=None, seed=None, n_steps=10, method='plms'):
148
- if seed == None :
149
- seed = random.randint(0, 10000)
150
- print( prompt, im_prompt, seed, n_steps)
151
- prompts = [prompt]
152
- im_prompts = []
153
- if im_prompt != None:
154
- im_prompts = [im_prompt]
155
- pil_ims = generate(n=1, prompts=prompts, images=im_prompts, seed=seed, steps=n_steps, method=method)
156
- return pil_ims[0]
157
-
158
- iface = gr.Interface(fn=gen_ims,
159
- inputs=[#gr.inputs.Slider(minimum=1, maximum=1, step=1, default=1,label="Number of images"),
160
- #gr.inputs.Slider(minimum=0, maximum=200, step=1, label='Random seed', default=0),
161
- gr.inputs.Textbox(label="Text prompt"),
162
- gr.inputs.Image(optional=True, label="Image prompt", type='filepath'),
163
- #gr.inputs.Slider(minimum=10, maximum=35, step=1, default=15,label="Number of steps")
164
- ],
165
- outputs=[gr.outputs.Image(type="pil", label="Generated Image")],
166
- examples=[
167
- ["Futurism, in the style of Wassily Kandinsky"],
168
- ["Art Nouveau, in the style of John Singer Sargent"],
169
- ["Surrealism, in the style of Edgar Degas"],
170
- ["Expressionism, in the style of Wassily Kandinsky"],
171
- ["Futurism, in the style of Egon Schiele"],
172
- ["Neoclassicism, in the style of Gustav Klimt"],
173
- ["Cubism, in the style of Gustav Klimt"],
174
- ["Op Art, in the style of Marc Chagall"],
175
- ["Romanticism, in the style of M.C. Escher"],
176
- ["Futurism, in the style of M.C. Escher"],
177
- ["Abstract Art, in the style of M.C. Escher"],
178
- ["Mannerism, in the style of Paul Klee"],
179
- ["Romanesque Art, in the style of Leonardo da Vinci"],
180
- ["High Renaissance, in the style of Rembrandt"],
181
- ["Magic Realism, in the style of Gustave Dore"],
182
- ["Realism, in the style of Jean-Michel Basquiat"],
183
- ["Art Nouveau, in the style of Paul Gauguin"],
184
- ["Avant-garde, in the style of Pierre-Auguste Renoir"],
185
- ["Baroque, in the style of Edward Hopper"],
186
- ["Post-Impressionism, in the style of Wassily Kandinsky"],
187
- ["Naturalism, in the style of Rene Magritte"],
188
- ["Constructivism, in the style of Paul Cezanne"],
189
- ["Abstract Expressionism, in the style of Henri Matisse"],
190
- ["Pop Art, in the style of Vincent van Gogh"],
191
- ["Futurism, in the style of Wassily Kandinsky"],
192
- ["Futurism, in the style of Zdzislaw Beksinski"],
193
- ['Surrealism, in the style of Salvador Dali'],
194
- ["Aaron Wacker, oil on canvas"],
195
- ["abstract"],
196
- ["landscape"],
197
- ["portrait"],
198
- ["sculpture"],
199
- ["genre painting"],
200
- ["installation"],
201
- ["photo"],
202
- ["figurative"],
203
- ["illustration"],
204
- ["still life"],
205
- ["history painting"],
206
- ["cityscape"],
207
- ["marina"],
208
- ["animal painting"],
209
- ["design"],
210
- ["calligraphy"],
211
- ["symbolic painting"],
212
- ["graffiti"],
213
- ["performance"],
214
- ["mythological painting"],
215
- ["battle painting"],
216
- ["self-portrait"],
217
- ["Impressionism, oil on canvas"]
218
- ],
219
- title='Art Generator and Style Mixer from 🧠 Cloob and 🎨 WikiArt - Visual Art Encyclopedia:',
220
- description="Trained on images from the [WikiArt](https://www.wikiart.org/) dataset, comprised of visual arts",
221
- article = 'Model used is: [model card](https://huggingface.co/huggan/distill-ccld-wa)..'
222
-
223
- )
224
- iface.launch(enable_queue=True) # , debug=True for colab debugging
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/you.py DELETED
@@ -1,79 +0,0 @@
1
- import sys
2
- import json
3
- import urllib.parse
4
-
5
- from curl_cffi import requests
6
-
7
- config = json.loads(sys.argv[1])
8
- messages = config['messages']
9
- prompt = ''
10
-
11
-
12
- def transform(messages: list) -> list:
13
- result = []
14
- i = 0
15
-
16
- while i < len(messages):
17
- if messages[i]['role'] == 'user':
18
- question = messages[i]['content']
19
- i += 1
20
-
21
- if i < len(messages) and messages[i]['role'] == 'assistant':
22
- answer = messages[i]['content']
23
- i += 1
24
- else:
25
- answer = ''
26
-
27
- result.append({'question': question, 'answer': answer})
28
-
29
- elif messages[i]['role'] == 'assistant':
30
- result.append({'question': '', 'answer': messages[i]['content']})
31
- i += 1
32
-
33
- elif messages[i]['role'] == 'system':
34
- result.append({'question': messages[i]['content'], 'answer': ''})
35
- i += 1
36
-
37
- return result
38
-
39
- headers = {
40
- 'Content-Type': 'application/x-www-form-urlencoded',
41
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42
- 'Sec-Fetch-Site': 'same-origin',
43
- 'Accept-Language': 'en-GB,en;q=0.9',
44
- 'Sec-Fetch-Mode': 'navigate',
45
- 'Host': 'you.com',
46
- 'Origin': 'https://you.com',
47
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
48
- 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
49
- 'Connection': 'keep-alive',
50
- 'Sec-Fetch-Dest': 'document',
51
- 'Priority': 'u=0, i',
52
- }
53
-
54
- if messages[-1]['role'] == 'user':
55
- prompt = messages[-1]['content']
56
- messages = messages[:-1]
57
-
58
- params = urllib.parse.urlencode({
59
- 'q': prompt,
60
- 'domain': 'youchat',
61
- 'chat': transform(messages)
62
- })
63
-
64
- def output(chunk):
65
- if b'"youChatToken"' in chunk:
66
- chunk_json = json.loads(chunk.decode().split('data: ')[1])
67
-
68
- print(chunk_json['youChatToken'], flush=True, end = '')
69
-
70
- while True:
71
- try:
72
- response = requests.get(f'https://you.com/api/streamingSearch?{params}',
73
- headers=headers, content_callback=output, impersonate='safari15_5')
74
-
75
- exit(0)
76
-
77
- except Exception as e:
78
- print('an error occured, retrying... |', e, flush=True)
79
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toggleswitchshape.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import ToggleSwitchShape from './gameobjects/shape/toggleswitch/ToggleSwitchShape';
2
- export default ToggleSwitchShape;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import Puff from './Puff.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('puff', function (config) {
6
- var gameObject = new Puff(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.Spinner.Puff', Puff);
12
-
13
- export default Puff;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AiMimicry/sovits-models/hubert/hubert_model.py DELETED
@@ -1,222 +0,0 @@
1
- import copy
2
- import random
3
- from typing import Optional, Tuple
4
-
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as t_func
8
- from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
9
-
10
-
11
- class Hubert(nn.Module):
12
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
13
- super().__init__()
14
- self._mask = mask
15
- self.feature_extractor = FeatureExtractor()
16
- self.feature_projection = FeatureProjection()
17
- self.positional_embedding = PositionalConvEmbedding()
18
- self.norm = nn.LayerNorm(768)
19
- self.dropout = nn.Dropout(0.1)
20
- self.encoder = TransformerEncoder(
21
- nn.TransformerEncoderLayer(
22
- 768, 12, 3072, activation="gelu", batch_first=True
23
- ),
24
- 12,
25
- )
26
- self.proj = nn.Linear(768, 256)
27
-
28
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
29
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
30
-
31
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
32
- mask = None
33
- if self.training and self._mask:
34
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
35
- x[mask] = self.masked_spec_embed.to(x.dtype)
36
- return x, mask
37
-
38
- def encode(
39
- self, x: torch.Tensor, layer: Optional[int] = None
40
- ) -> Tuple[torch.Tensor, torch.Tensor]:
41
- x = self.feature_extractor(x)
42
- x = self.feature_projection(x.transpose(1, 2))
43
- x, mask = self.mask(x)
44
- x = x + self.positional_embedding(x)
45
- x = self.dropout(self.norm(x))
46
- x = self.encoder(x, output_layer=layer)
47
- return x, mask
48
-
49
- def logits(self, x: torch.Tensor) -> torch.Tensor:
50
- logits = torch.cosine_similarity(
51
- x.unsqueeze(2),
52
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
53
- dim=-1,
54
- )
55
- return logits / 0.1
56
-
57
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
58
- x, mask = self.encode(x)
59
- x = self.proj(x)
60
- logits = self.logits(x)
61
- return logits, mask
62
-
63
-
64
- class HubertSoft(Hubert):
65
- def __init__(self):
66
- super().__init__()
67
-
68
- @torch.inference_mode()
69
- def units(self, wav: torch.Tensor) -> torch.Tensor:
70
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
71
- x, _ = self.encode(wav)
72
- return self.proj(x)
73
-
74
-
75
- class FeatureExtractor(nn.Module):
76
- def __init__(self):
77
- super().__init__()
78
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
79
- self.norm0 = nn.GroupNorm(512, 512)
80
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
81
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
82
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
83
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
84
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
85
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
86
-
87
- def forward(self, x: torch.Tensor) -> torch.Tensor:
88
- x = t_func.gelu(self.norm0(self.conv0(x)))
89
- x = t_func.gelu(self.conv1(x))
90
- x = t_func.gelu(self.conv2(x))
91
- x = t_func.gelu(self.conv3(x))
92
- x = t_func.gelu(self.conv4(x))
93
- x = t_func.gelu(self.conv5(x))
94
- x = t_func.gelu(self.conv6(x))
95
- return x
96
-
97
-
98
- class FeatureProjection(nn.Module):
99
- def __init__(self):
100
- super().__init__()
101
- self.norm = nn.LayerNorm(512)
102
- self.projection = nn.Linear(512, 768)
103
- self.dropout = nn.Dropout(0.1)
104
-
105
- def forward(self, x: torch.Tensor) -> torch.Tensor:
106
- x = self.norm(x)
107
- x = self.projection(x)
108
- x = self.dropout(x)
109
- return x
110
-
111
-
112
- class PositionalConvEmbedding(nn.Module):
113
- def __init__(self):
114
- super().__init__()
115
- self.conv = nn.Conv1d(
116
- 768,
117
- 768,
118
- kernel_size=128,
119
- padding=128 // 2,
120
- groups=16,
121
- )
122
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
123
-
124
- def forward(self, x: torch.Tensor) -> torch.Tensor:
125
- x = self.conv(x.transpose(1, 2))
126
- x = t_func.gelu(x[:, :, :-1])
127
- return x.transpose(1, 2)
128
-
129
-
130
- class TransformerEncoder(nn.Module):
131
- def __init__(
132
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
133
- ) -> None:
134
- super(TransformerEncoder, self).__init__()
135
- self.layers = nn.ModuleList(
136
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
137
- )
138
- self.num_layers = num_layers
139
-
140
- def forward(
141
- self,
142
- src: torch.Tensor,
143
- mask: torch.Tensor = None,
144
- src_key_padding_mask: torch.Tensor = None,
145
- output_layer: Optional[int] = None,
146
- ) -> torch.Tensor:
147
- output = src
148
- for layer in self.layers[:output_layer]:
149
- output = layer(
150
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
151
- )
152
- return output
153
-
154
-
155
- def _compute_mask(
156
- shape: Tuple[int, int],
157
- mask_prob: float,
158
- mask_length: int,
159
- device: torch.device,
160
- min_masks: int = 0,
161
- ) -> torch.Tensor:
162
- batch_size, sequence_length = shape
163
-
164
- if mask_length < 1:
165
- raise ValueError("`mask_length` has to be bigger than 0.")
166
-
167
- if mask_length > sequence_length:
168
- raise ValueError(
169
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
170
- )
171
-
172
- # compute number of masked spans in batch
173
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
174
- num_masked_spans = max(num_masked_spans, min_masks)
175
-
176
- # make sure num masked indices <= sequence_length
177
- if num_masked_spans * mask_length > sequence_length:
178
- num_masked_spans = sequence_length // mask_length
179
-
180
- # SpecAugment mask to fill
181
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
182
-
183
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
184
- uniform_dist = torch.ones(
185
- (batch_size, sequence_length - (mask_length - 1)), device=device
186
- )
187
-
188
- # get random indices to mask
189
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
190
-
191
- # expand masked indices to masked spans
192
- mask_indices = (
193
- mask_indices.unsqueeze(dim=-1)
194
- .expand((batch_size, num_masked_spans, mask_length))
195
- .reshape(batch_size, num_masked_spans * mask_length)
196
- )
197
- offsets = (
198
- torch.arange(mask_length, device=device)[None, None, :]
199
- .expand((batch_size, num_masked_spans, mask_length))
200
- .reshape(batch_size, num_masked_spans * mask_length)
201
- )
202
- mask_idxs = mask_indices + offsets
203
-
204
- # scatter indices to mask
205
- mask = mask.scatter(1, mask_idxs, True)
206
-
207
- return mask
208
-
209
-
210
- def hubert_soft(
211
- path: str,
212
- ) -> HubertSoft:
213
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
214
- Args:
215
- path (str): path of a pretrained model
216
- """
217
- hubert = HubertSoft()
218
- checkpoint = torch.load(path)
219
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
220
- hubert.load_state_dict(checkpoint)
221
- hubert.eval()
222
- return hubert
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/setup.py DELETED
@@ -1,9 +0,0 @@
1
- from distutils.core import setup
2
- from Cython.Build import cythonize
3
- import numpy
4
-
5
- setup(
6
- name = 'monotonic_align',
7
- ext_modules = cythonize("core.pyx"),
8
- include_dirs=[numpy.get_include()]
9
- )
 
 
 
 
 
 
 
 
 
 
spaces/Alfasign/HuggingGPT-Lite/awesome_chat.py DELETED
@@ -1,933 +0,0 @@
1
- import base64
2
- import copy
3
- import datetime
4
- from io import BytesIO
5
- import io
6
- import os
7
- import random
8
- import time
9
- import traceback
10
- import uuid
11
- import requests
12
- import re
13
- import json
14
- import logging
15
- import argparse
16
- import yaml
17
- from PIL import Image, ImageDraw
18
- from diffusers.utils import load_image
19
- from pydub import AudioSegment
20
- import threading
21
- from queue import Queue
22
- from get_token_ids import get_token_ids_for_task_parsing, get_token_ids_for_choose_model, count_tokens, get_max_context_length
23
- from huggingface_hub.inference_api import InferenceApi
24
- from huggingface_hub.inference_api import ALL_TASKS
25
- from models_server import models, status
26
- from functools import partial
27
- from huggingface_hub import Repository
28
-
29
- parser = argparse.ArgumentParser()
30
- parser.add_argument("--config", type=str, default="config.yaml.dev")
31
- parser.add_argument("--mode", type=str, default="cli")
32
- args = parser.parse_args()
33
-
34
- if __name__ != "__main__":
35
- args.config = "config.gradio.yaml"
36
-
37
- config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
38
-
39
- if not os.path.exists("logs"):
40
- os.mkdir("logs")
41
-
42
- now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
43
-
44
- DATASET_REPO_URL = "https://huggingface.co/datasets/tricktreat/HuggingGPT_logs"
45
- LOG_HF_TOKEN = os.environ.get("LOG_HF_TOKEN")
46
- if LOG_HF_TOKEN:
47
- repo = Repository(
48
- local_dir="logs", clone_from=DATASET_REPO_URL, use_auth_token=LOG_HF_TOKEN
49
- )
50
-
51
- logger = logging.getLogger(__name__)
52
- logger.setLevel(logging.CRITICAL)
53
-
54
- handler = logging.StreamHandler()
55
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
56
- handler.setFormatter(formatter)
57
- if not config["debug"]:
58
- handler.setLevel(logging.INFO)
59
- logger.addHandler(handler)
60
-
61
- log_file = config["log_file"]
62
- if log_file:
63
- log_file = log_file.replace("TIMESTAMP", now)
64
- filehandler = logging.FileHandler(log_file)
65
- filehandler.setLevel(logging.DEBUG)
66
- filehandler.setFormatter(formatter)
67
- logger.addHandler(filehandler)
68
-
69
- LLM = config["model"]
70
- use_completion = config["use_completion"]
71
-
72
- # consistent: wrong msra model name
73
- LLM_encoding = LLM
74
- if LLM == "gpt-3.5-turbo":
75
- LLM_encoding = "text-davinci-003"
76
- task_parsing_highlight_ids = get_token_ids_for_task_parsing(LLM_encoding)
77
- choose_model_highlight_ids = get_token_ids_for_choose_model(LLM_encoding)
78
-
79
- # ENDPOINT MODEL NAME
80
- # /v1/chat/completions gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, gpt-3.5-turbo, gpt-3.5-turbo-0301
81
- # /v1/completions text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, davinci, curie, babbage, ada
82
-
83
- if use_completion:
84
- api_name = "completions"
85
- else:
86
- api_name = "chat/completions"
87
-
88
- if not config["dev"]:
89
- if not config["openai"]["key"].startswith("sk-") and not config["openai"]["key"]=="gradio":
90
- raise ValueError("Incrorrect OpenAI key. Please check your config.yaml file.")
91
- OPENAI_KEY = config["openai"]["key"]
92
- endpoint = f"https://api.openai.com/v1/{api_name}"
93
- if OPENAI_KEY.startswith("sk-"):
94
- HEADER = {
95
- "Authorization": f"Bearer {OPENAI_KEY}"
96
- }
97
- else:
98
- HEADER = None
99
- else:
100
- endpoint = f"{config['local']['endpoint']}/v1/{api_name}"
101
- HEADER = None
102
-
103
- PROXY = None
104
- if config["proxy"]:
105
- PROXY = {
106
- "https": config["proxy"],
107
- }
108
-
109
- inference_mode = config["inference_mode"]
110
-
111
- parse_task_demos_or_presteps = open(config["demos_or_presteps"]["parse_task"], "r").read()
112
- choose_model_demos_or_presteps = open(config["demos_or_presteps"]["choose_model"], "r").read()
113
- response_results_demos_or_presteps = open(config["demos_or_presteps"]["response_results"], "r").read()
114
-
115
- parse_task_prompt = config["prompt"]["parse_task"]
116
- choose_model_prompt = config["prompt"]["choose_model"]
117
- response_results_prompt = config["prompt"]["response_results"]
118
-
119
- parse_task_tprompt = config["tprompt"]["parse_task"]
120
- choose_model_tprompt = config["tprompt"]["choose_model"]
121
- response_results_tprompt = config["tprompt"]["response_results"]
122
-
123
- MODELS = [json.loads(line) for line in open("data/p0_models.jsonl", "r").readlines()]
124
- MODELS_MAP = {}
125
- for model in MODELS:
126
- tag = model["task"]
127
- if tag not in MODELS_MAP:
128
- MODELS_MAP[tag] = []
129
- MODELS_MAP[tag].append(model)
130
- METADATAS = {}
131
- for model in MODELS:
132
- METADATAS[model["id"]] = model
133
-
134
- def convert_chat_to_completion(data):
135
- messages = data.pop('messages', [])
136
- tprompt = ""
137
- if messages[0]['role'] == "system":
138
- tprompt = messages[0]['content']
139
- messages = messages[1:]
140
- final_prompt = ""
141
- for message in messages:
142
- if message['role'] == "user":
143
- final_prompt += ("<im_start>"+ "user" + "\n" + message['content'] + "<im_end>\n")
144
- elif message['role'] == "assistant":
145
- final_prompt += ("<im_start>"+ "assistant" + "\n" + message['content'] + "<im_end>\n")
146
- else:
147
- final_prompt += ("<im_start>"+ "system" + "\n" + message['content'] + "<im_end>\n")
148
- final_prompt = tprompt + final_prompt
149
- final_prompt = final_prompt + "<im_start>assistant"
150
- data["prompt"] = final_prompt
151
- data['stop'] = data.get('stop', ["<im_end>"])
152
- data['max_tokens'] = data.get('max_tokens', max(get_max_context_length(LLM) - count_tokens(LLM_encoding, final_prompt), 1))
153
- return data
154
-
155
- def send_request(data):
156
- global HEADER
157
- openaikey = data.pop("openaikey")
158
- if use_completion:
159
- data = convert_chat_to_completion(data)
160
- if openaikey and openaikey.startswith("sk-"):
161
- HEADER = {
162
- "Authorization": f"Bearer {openaikey}"
163
- }
164
-
165
- response = requests.post(endpoint, json=data, headers=HEADER, proxies=PROXY)
166
- logger.debug(response.text.strip())
167
- if "choices" not in response.json():
168
- return response.json()
169
- if use_completion:
170
- return response.json()["choices"][0]["text"].strip()
171
- else:
172
- return response.json()["choices"][0]["message"]["content"].strip()
173
-
174
- def replace_slot(text, entries):
175
- for key, value in entries.items():
176
- if not isinstance(value, str):
177
- value = str(value)
178
- text = text.replace("{{" + key +"}}", value.replace('"', "'").replace('\n', ""))
179
- return text
180
-
181
- def find_json(s):
182
- s = s.replace("\'", "\"")
183
- start = s.find("{")
184
- end = s.rfind("}")
185
- res = s[start:end+1]
186
- res = res.replace("\n", "")
187
- return res
188
-
189
- def field_extract(s, field):
190
- try:
191
- field_rep = re.compile(f'{field}.*?:.*?"(.*?)"', re.IGNORECASE)
192
- extracted = field_rep.search(s).group(1).replace("\"", "\'")
193
- except:
194
- field_rep = re.compile(f'{field}:\ *"(.*?)"', re.IGNORECASE)
195
- extracted = field_rep.search(s).group(1).replace("\"", "\'")
196
- return extracted
197
-
198
- def get_id_reason(choose_str):
199
- reason = field_extract(choose_str, "reason")
200
- id = field_extract(choose_str, "id")
201
- choose = {"id": id, "reason": reason}
202
- return id.strip(), reason.strip(), choose
203
-
204
- def record_case(success, **args):
205
- if not success:
206
- return
207
- f = open(f"logs/log_success_{now}.jsonl", "a")
208
- log = args
209
- f.write(json.dumps(log) + "\n")
210
- f.close()
211
- if LOG_HF_TOKEN:
212
- commit_url = repo.push_to_hub(blocking=False)
213
-
214
- def image_to_bytes(img_url):
215
- img_byte = io.BytesIO()
216
- type = img_url.split(".")[-1]
217
- load_image(img_url).save(img_byte, format="png")
218
- img_data = img_byte.getvalue()
219
- return img_data
220
-
221
- def resource_has_dep(command):
222
- args = command["args"]
223
- for _, v in args.items():
224
- if "<GENERATED>" in v:
225
- return True
226
- return False
227
-
228
- def fix_dep(tasks):
229
- for task in tasks:
230
- args = task["args"]
231
- task["dep"] = []
232
- for k, v in args.items():
233
- if "<GENERATED>" in v:
234
- dep_task_id = int(v.split("-")[1])
235
- if dep_task_id not in task["dep"]:
236
- task["dep"].append(dep_task_id)
237
- if len(task["dep"]) == 0:
238
- task["dep"] = [-1]
239
- return tasks
240
-
241
- def unfold(tasks):
242
- flag_unfold_task = False
243
- try:
244
- for task in tasks:
245
- for key, value in task["args"].items():
246
- if "<GENERATED>" in value:
247
- generated_items = value.split(",")
248
- if len(generated_items) > 1:
249
- flag_unfold_task = True
250
- for item in generated_items:
251
- new_task = copy.deepcopy(task)
252
- dep_task_id = int(item.split("-")[1])
253
- new_task["dep"] = [dep_task_id]
254
- new_task["args"][key] = item
255
- tasks.append(new_task)
256
- tasks.remove(task)
257
- except Exception as e:
258
- print(e)
259
- traceback.print_exc()
260
- logger.debug("unfold task failed.")
261
-
262
- if flag_unfold_task:
263
- logger.debug(f"unfold tasks: {tasks}")
264
-
265
- return tasks
266
-
267
- def chitchat(messages, openaikey=None):
268
- data = {
269
- "model": LLM,
270
- "messages": messages,
271
- "openaikey": openaikey
272
- }
273
- return send_request(data)
274
-
275
- def parse_task(context, input, openaikey=None):
276
- demos_or_presteps = parse_task_demos_or_presteps
277
- messages = json.loads(demos_or_presteps)
278
- messages.insert(0, {"role": "system", "content": parse_task_tprompt})
279
-
280
- # cut chat logs
281
- start = 0
282
- while start <= len(context):
283
- history = context[start:]
284
- prompt = replace_slot(parse_task_prompt, {
285
- "input": input,
286
- "context": history
287
- })
288
- messages.append({"role": "user", "content": prompt})
289
- history_text = "<im_end>\nuser<im_start>".join([m["content"] for m in messages])
290
- num = count_tokens(LLM_encoding, history_text)
291
- if get_max_context_length(LLM) - num > 800:
292
- break
293
- messages.pop()
294
- start += 2
295
-
296
- logger.debug(messages)
297
- data = {
298
- "model": LLM,
299
- "messages": messages,
300
- "temperature": 0,
301
- "logit_bias": {item: config["logit_bias"]["parse_task"] for item in task_parsing_highlight_ids},
302
- "openaikey": openaikey
303
- }
304
- return send_request(data)
305
-
306
- def choose_model(input, task, metas, openaikey = None):
307
- prompt = replace_slot(choose_model_prompt, {
308
- "input": input,
309
- "task": task,
310
- "metas": metas,
311
- })
312
- demos_or_presteps = replace_slot(choose_model_demos_or_presteps, {
313
- "input": input,
314
- "task": task,
315
- "metas": metas
316
- })
317
- messages = json.loads(demos_or_presteps)
318
- messages.insert(0, {"role": "system", "content": choose_model_tprompt})
319
- messages.append({"role": "user", "content": prompt})
320
- logger.debug(messages)
321
- data = {
322
- "model": LLM,
323
- "messages": messages,
324
- "temperature": 0,
325
- "logit_bias": {item: config["logit_bias"]["choose_model"] for item in choose_model_highlight_ids}, # 5
326
- "openaikey": openaikey
327
- }
328
- return send_request(data)
329
-
330
-
331
- def response_results(input, results, openaikey=None):
332
- results = [v for k, v in sorted(results.items(), key=lambda item: item[0])]
333
- prompt = replace_slot(response_results_prompt, {
334
- "input": input,
335
- })
336
- demos_or_presteps = replace_slot(response_results_demos_or_presteps, {
337
- "input": input,
338
- "processes": results
339
- })
340
- messages = json.loads(demos_or_presteps)
341
- messages.insert(0, {"role": "system", "content": response_results_tprompt})
342
- messages.append({"role": "user", "content": prompt})
343
- logger.debug(messages)
344
- data = {
345
- "model": LLM,
346
- "messages": messages,
347
- "temperature": 0,
348
- "openaikey": openaikey
349
- }
350
- return send_request(data)
351
-
352
- def huggingface_model_inference(model_id, data, task, huggingfacetoken=None):
353
- if huggingfacetoken is None:
354
- HUGGINGFACE_HEADERS = {}
355
- else:
356
- HUGGINGFACE_HEADERS = {
357
- "Authorization": f"Bearer {huggingfacetoken}",
358
- }
359
- task_url = f"https://api-inference.huggingface.co/models/{model_id}" # InferenceApi does not yet support some tasks
360
- inference = InferenceApi(repo_id=model_id, token=huggingfacetoken)
361
-
362
- # NLP tasks
363
- if task == "question-answering":
364
- inputs = {"question": data["text"], "context": (data["context"] if "context" in data else "" )}
365
- result = inference(inputs)
366
- if task == "sentence-similarity":
367
- inputs = {"source_sentence": data["text1"], "target_sentence": data["text2"]}
368
- result = inference(inputs)
369
- if task in ["text-classification", "token-classification", "text2text-generation", "summarization", "translation", "conversational", "text-generation"]:
370
- inputs = data["text"]
371
- result = inference(inputs)
372
-
373
- # CV tasks
374
- if task == "visual-question-answering" or task == "document-question-answering":
375
- img_url = data["image"]
376
- text = data["text"]
377
- img_data = image_to_bytes(img_url)
378
- img_base64 = base64.b64encode(img_data).decode("utf-8")
379
- json_data = {}
380
- json_data["inputs"] = {}
381
- json_data["inputs"]["question"] = text
382
- json_data["inputs"]["image"] = img_base64
383
- result = requests.post(task_url, headers=HUGGINGFACE_HEADERS, json=json_data).json()
384
- # result = inference(inputs) # not support
385
-
386
- if task == "image-to-image":
387
- img_url = data["image"]
388
- img_data = image_to_bytes(img_url)
389
- # result = inference(data=img_data) # not support
390
- HUGGINGFACE_HEADERS["Content-Length"] = str(len(img_data))
391
- r = requests.post(task_url, headers=HUGGINGFACE_HEADERS, data=img_data)
392
- result = r.json()
393
- if "path" in result:
394
- result["generated image"] = result.pop("path")
395
-
396
- if task == "text-to-image":
397
- inputs = data["text"]
398
- img = inference(inputs)
399
- name = str(uuid.uuid4())[:4]
400
- img.save(f"public/images/{name}.png")
401
- result = {}
402
- result["generated image"] = f"/images/{name}.png"
403
-
404
- if task == "image-segmentation":
405
- img_url = data["image"]
406
- img_data = image_to_bytes(img_url)
407
- image = Image.open(BytesIO(img_data))
408
- predicted = inference(data=img_data)
409
- colors = []
410
- for i in range(len(predicted)):
411
- colors.append((random.randint(100, 255), random.randint(100, 255), random.randint(100, 255), 155))
412
- for i, pred in enumerate(predicted):
413
- label = pred["label"]
414
- mask = pred.pop("mask").encode("utf-8")
415
- mask = base64.b64decode(mask)
416
- mask = Image.open(BytesIO(mask), mode='r')
417
- mask = mask.convert('L')
418
-
419
- layer = Image.new('RGBA', mask.size, colors[i])
420
- image.paste(layer, (0, 0), mask)
421
- name = str(uuid.uuid4())[:4]
422
- image.save(f"public/images/{name}.jpg")
423
- result = {}
424
- result["generated image with segmentation mask"] = f"/images/{name}.jpg"
425
- result["predicted"] = predicted
426
-
427
- if task == "object-detection":
428
- img_url = data["image"]
429
- img_data = image_to_bytes(img_url)
430
- predicted = inference(data=img_data)
431
- image = Image.open(BytesIO(img_data))
432
- draw = ImageDraw.Draw(image)
433
- labels = list(item['label'] for item in predicted)
434
- color_map = {}
435
- for label in labels:
436
- if label not in color_map:
437
- color_map[label] = (random.randint(0, 255), random.randint(0, 100), random.randint(0, 255))
438
- for label in predicted:
439
- box = label["box"]
440
- draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2)
441
- draw.text((box["xmin"]+5, box["ymin"]-15), label["label"], fill=color_map[label["label"]])
442
- name = str(uuid.uuid4())[:4]
443
- image.save(f"public/images/{name}.jpg")
444
- result = {}
445
- result["generated image with predicted box"] = f"/images/{name}.jpg"
446
- result["predicted"] = predicted
447
-
448
- if task in ["image-classification"]:
449
- img_url = data["image"]
450
- img_data = image_to_bytes(img_url)
451
- result = inference(data=img_data)
452
-
453
- if task == "image-to-text":
454
- img_url = data["image"]
455
- img_data = image_to_bytes(img_url)
456
- HUGGINGFACE_HEADERS["Content-Length"] = str(len(img_data))
457
- r = requests.post(task_url, headers=HUGGINGFACE_HEADERS, data=img_data)
458
- result = {}
459
- if "generated_text" in r.json()[0]:
460
- result["generated text"] = r.json()[0].pop("generated_text")
461
-
462
- # AUDIO tasks
463
- if task == "text-to-speech":
464
- inputs = data["text"]
465
- response = inference(inputs, raw_response=True)
466
- # response = requests.post(task_url, headers=HUGGINGFACE_HEADERS, json={"inputs": text})
467
- name = str(uuid.uuid4())[:4]
468
- with open(f"public/audios/{name}.flac", "wb") as f:
469
- f.write(response.content)
470
- result = {"generated audio": f"/audios/{name}.flac"}
471
- if task in ["automatic-speech-recognition", "audio-to-audio", "audio-classification"]:
472
- audio_url = data["audio"]
473
- audio_data = requests.get(audio_url, timeout=10).content
474
- response = inference(data=audio_data, raw_response=True)
475
- result = response.json()
476
- if task == "audio-to-audio":
477
- content = None
478
- type = None
479
- for k, v in result[0].items():
480
- if k == "blob":
481
- content = base64.b64decode(v.encode("utf-8"))
482
- if k == "content-type":
483
- type = "audio/flac".split("/")[-1]
484
- audio = AudioSegment.from_file(BytesIO(content))
485
- name = str(uuid.uuid4())[:4]
486
- audio.export(f"public/audios/{name}.{type}", format=type)
487
- result = {"generated audio": f"/audios/{name}.{type}"}
488
- return result
489
-
490
- def local_model_inference(model_id, data, task):
491
- inference = partial(models, model_id)
492
- # contronlet
493
- if model_id.startswith("lllyasviel/sd-controlnet-"):
494
- img_url = data["image"]
495
- text = data["text"]
496
- results = inference({"img_url": img_url, "text": text})
497
- if "path" in results:
498
- results["generated image"] = results.pop("path")
499
- return results
500
- if model_id.endswith("-control"):
501
- img_url = data["image"]
502
- results = inference({"img_url": img_url})
503
- if "path" in results:
504
- results["generated image"] = results.pop("path")
505
- return results
506
-
507
- if task == "text-to-video":
508
- results = inference(data)
509
- if "path" in results:
510
- results["generated video"] = results.pop("path")
511
- return results
512
-
513
- # NLP tasks
514
- if task == "question-answering" or task == "sentence-similarity":
515
- results = inference(json=data)
516
- return results
517
- if task in ["text-classification", "token-classification", "text2text-generation", "summarization", "translation", "conversational", "text-generation"]:
518
- results = inference(json=data)
519
- return results
520
-
521
- # CV tasks
522
- if task == "depth-estimation":
523
- img_url = data["image"]
524
- results = inference({"img_url": img_url})
525
- if "path" in results:
526
- results["generated depth image"] = results.pop("path")
527
- return results
528
- if task == "image-segmentation":
529
- img_url = data["image"]
530
- results = inference({"img_url": img_url})
531
- results["generated image with segmentation mask"] = results.pop("path")
532
- return results
533
- if task == "image-to-image":
534
- img_url = data["image"]
535
- results = inference({"img_url": img_url})
536
- if "path" in results:
537
- results["generated image"] = results.pop("path")
538
- return results
539
- if task == "text-to-image":
540
- results = inference(data)
541
- if "path" in results:
542
- results["generated image"] = results.pop("path")
543
- return results
544
- if task == "object-detection":
545
- img_url = data["image"]
546
- predicted = inference({"img_url": img_url})
547
- if "error" in predicted:
548
- return predicted
549
- image = load_image(img_url)
550
- draw = ImageDraw.Draw(image)
551
- labels = list(item['label'] for item in predicted)
552
- color_map = {}
553
- for label in labels:
554
- if label not in color_map:
555
- color_map[label] = (random.randint(0, 255), random.randint(0, 100), random.randint(0, 255))
556
- for label in predicted:
557
- box = label["box"]
558
- draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2)
559
- draw.text((box["xmin"]+5, box["ymin"]-15), label["label"], fill=color_map[label["label"]])
560
- name = str(uuid.uuid4())[:4]
561
- image.save(f"public/images/{name}.jpg")
562
- results = {}
563
- results["generated image with predicted box"] = f"/images/{name}.jpg"
564
- results["predicted"] = predicted
565
- return results
566
- if task in ["image-classification", "image-to-text", "document-question-answering", "visual-question-answering"]:
567
- img_url = data["image"]
568
- text = None
569
- if "text" in data:
570
- text = data["text"]
571
- results = inference({"img_url": img_url, "text": text})
572
- return results
573
- # AUDIO tasks
574
- if task == "text-to-speech":
575
- results = inference(data)
576
- if "path" in results:
577
- results["generated audio"] = results.pop("path")
578
- return results
579
- if task in ["automatic-speech-recognition", "audio-to-audio", "audio-classification"]:
580
- audio_url = data["audio"]
581
- results = inference({"audio_url": audio_url})
582
- return results
583
-
584
-
585
- def model_inference(model_id, data, hosted_on, task, huggingfacetoken=None):
586
- if huggingfacetoken:
587
- HUGGINGFACE_HEADERS = {
588
- "Authorization": f"Bearer {huggingfacetoken}",
589
- }
590
- else:
591
- HUGGINGFACE_HEADERS = None
592
- if hosted_on == "unknown":
593
- r = status(model_id)
594
- logger.debug("Local Server Status: " + str(r))
595
- if "loaded" in r and r["loaded"]:
596
- hosted_on = "local"
597
- else:
598
- huggingfaceStatusUrl = f"https://api-inference.huggingface.co/status/{model_id}"
599
- r = requests.get(huggingfaceStatusUrl, headers=HUGGINGFACE_HEADERS, proxies=PROXY)
600
- logger.debug("Huggingface Status: " + str(r.json()))
601
- if "loaded" in r and r["loaded"]:
602
- hosted_on = "huggingface"
603
- try:
604
- if hosted_on == "local":
605
- inference_result = local_model_inference(model_id, data, task)
606
- elif hosted_on == "huggingface":
607
- inference_result = huggingface_model_inference(model_id, data, task, huggingfacetoken)
608
- except Exception as e:
609
- print(e)
610
- traceback.print_exc()
611
- inference_result = {"error":{"message": str(e)}}
612
- return inference_result
613
-
614
-
615
- def get_model_status(model_id, url, headers, queue = None):
616
- endpoint_type = "huggingface" if "huggingface" in url else "local"
617
- if "huggingface" in url:
618
- r = requests.get(url, headers=headers, proxies=PROXY)
619
- else:
620
- r = status(model_id)
621
- if "loaded" in r and r["loaded"]:
622
- if queue:
623
- queue.put((model_id, True, endpoint_type))
624
- return True
625
- else:
626
- if queue:
627
- queue.put((model_id, False, None))
628
- return False
629
-
630
- def get_avaliable_models(candidates, topk=10, huggingfacetoken = None):
631
- all_available_models = {"local": [], "huggingface": []}
632
- threads = []
633
- result_queue = Queue()
634
- HUGGINGFACE_HEADERS = {
635
- "Authorization": f"Bearer {huggingfacetoken}",
636
- }
637
- for candidate in candidates:
638
- model_id = candidate["id"]
639
-
640
- if inference_mode != "local":
641
- huggingfaceStatusUrl = f"https://api-inference.huggingface.co/status/{model_id}"
642
- thread = threading.Thread(target=get_model_status, args=(model_id, huggingfaceStatusUrl, HUGGINGFACE_HEADERS, result_queue))
643
- threads.append(thread)
644
- thread.start()
645
-
646
- if inference_mode != "huggingface" and config["local_deployment"] != "minimal":
647
- thread = threading.Thread(target=get_model_status, args=(model_id, "", {}, result_queue))
648
- threads.append(thread)
649
- thread.start()
650
-
651
- result_count = len(threads)
652
- while result_count:
653
- model_id, status, endpoint_type = result_queue.get()
654
- if status and model_id not in all_available_models:
655
- all_available_models[endpoint_type].append(model_id)
656
- if len(all_available_models["local"] + all_available_models["huggingface"]) >= topk:
657
- break
658
- result_count -= 1
659
-
660
- for thread in threads:
661
- thread.join()
662
-
663
- return all_available_models
664
-
665
- def collect_result(command, choose, inference_result):
666
- result = {"task": command}
667
- result["inference result"] = inference_result
668
- result["choose model result"] = choose
669
- logger.debug(f"inference result: {inference_result}")
670
- return result
671
-
672
-
673
- def run_task(input, command, results, openaikey = None, huggingfacetoken = None):
674
- id = command["id"]
675
- args = command["args"]
676
- task = command["task"]
677
- deps = command["dep"]
678
- if deps[0] != -1:
679
- dep_tasks = [results[dep] for dep in deps]
680
- else:
681
- dep_tasks = []
682
-
683
- logger.debug(f"Run task: {id} - {task}")
684
- logger.debug("Deps: " + json.dumps(dep_tasks))
685
-
686
- if deps[0] != -1:
687
- if "image" in args and "<GENERATED>-" in args["image"]:
688
- resource_id = int(args["image"].split("-")[1])
689
- if "generated image" in results[resource_id]["inference result"]:
690
- args["image"] = results[resource_id]["inference result"]["generated image"]
691
- if "audio" in args and "<GENERATED>-" in args["audio"]:
692
- resource_id = int(args["audio"].split("-")[1])
693
- if "generated audio" in results[resource_id]["inference result"]:
694
- args["audio"] = results[resource_id]["inference result"]["generated audio"]
695
- if "text" in args and "<GENERATED>-" in args["text"]:
696
- resource_id = int(args["text"].split("-")[1])
697
- if "generated text" in results[resource_id]["inference result"]:
698
- args["text"] = results[resource_id]["inference result"]["generated text"]
699
-
700
- text = image = audio = None
701
- for dep_task in dep_tasks:
702
- if "generated text" in dep_task["inference result"]:
703
- text = dep_task["inference result"]["generated text"]
704
- logger.debug("Detect the generated text of dependency task (from results):" + text)
705
- elif "text" in dep_task["task"]["args"]:
706
- text = dep_task["task"]["args"]["text"]
707
- logger.debug("Detect the text of dependency task (from args): " + text)
708
- if "generated image" in dep_task["inference result"]:
709
- image = dep_task["inference result"]["generated image"]
710
- logger.debug("Detect the generated image of dependency task (from results): " + image)
711
- elif "image" in dep_task["task"]["args"]:
712
- image = dep_task["task"]["args"]["image"]
713
- logger.debug("Detect the image of dependency task (from args): " + image)
714
- if "generated audio" in dep_task["inference result"]:
715
- audio = dep_task["inference result"]["generated audio"]
716
- logger.debug("Detect the generated audio of dependency task (from results): " + audio)
717
- elif "audio" in dep_task["task"]["args"]:
718
- audio = dep_task["task"]["args"]["audio"]
719
- logger.debug("Detect the audio of dependency task (from args): " + audio)
720
-
721
- if "image" in args and "<GENERATED>" in args["image"]:
722
- if image:
723
- args["image"] = image
724
- if "audio" in args and "<GENERATED>" in args["audio"]:
725
- if audio:
726
- args["audio"] = audio
727
- if "text" in args and "<GENERATED>" in args["text"]:
728
- if text:
729
- args["text"] = text
730
-
731
- for resource in ["image", "audio"]:
732
- if resource in args and not args[resource].startswith("public/") and len(args[resource]) > 0 and not args[resource].startswith("http"):
733
- args[resource] = f"public/{args[resource]}"
734
-
735
- if "-text-to-image" in command['task'] and "text" not in args:
736
- logger.debug("control-text-to-image task, but text is empty, so we use control-generation instead.")
737
- control = task.split("-")[0]
738
-
739
- if control == "seg":
740
- task = "image-segmentation"
741
- command['task'] = task
742
- elif control == "depth":
743
- task = "depth-estimation"
744
- command['task'] = task
745
- else:
746
- task = f"{control}-control"
747
-
748
- command["args"] = args
749
- logger.debug(f"parsed task: {command}")
750
-
751
- if task.endswith("-text-to-image") or task.endswith("-control"):
752
- if inference_mode != "huggingface":
753
- if task.endswith("-text-to-image"):
754
- control = task.split("-")[0]
755
- best_model_id = f"lllyasviel/sd-controlnet-{control}"
756
- else:
757
- best_model_id = task
758
- hosted_on = "local"
759
- reason = "ControlNet is the best model for this task."
760
- choose = {"id": best_model_id, "reason": reason}
761
- logger.debug(f"chosen model: {choose}")
762
- else:
763
- logger.warning(f"Task {command['task']} is not available. ControlNet need to be deployed locally.")
764
- record_case(success=False, **{"input": input, "task": command, "reason": f"Task {command['task']} is not available. ControlNet need to be deployed locally.", "op":"message"})
765
- inference_result = {"error": f"service related to ControlNet is not available."}
766
- results[id] = collect_result(command, "", inference_result)
767
- return False
768
- elif task in ["summarization", "translation", "conversational", "text-generation", "text2text-generation"]: # ChatGPT Can do
769
- best_model_id = "ChatGPT"
770
- reason = "ChatGPT performs well on some NLP tasks as well."
771
- choose = {"id": best_model_id, "reason": reason}
772
- messages = [{
773
- "role": "user",
774
- "content": f"[ {input} ] contains a task in JSON format {command}, 'task' indicates the task type and 'args' indicates the arguments required for the task. Don't explain the task to me, just help me do it and give me the result. The result must be in text form without any urls."
775
- }]
776
- response = chitchat(messages, openaikey)
777
- results[id] = collect_result(command, choose, {"response": response})
778
- return True
779
- else:
780
- if task not in MODELS_MAP:
781
- logger.warning(f"no available models on {task} task.")
782
- record_case(success=False, **{"input": input, "task": command, "reason": f"task not support: {command['task']}", "op":"message"})
783
- inference_result = {"error": f"{command['task']} not found in available tasks."}
784
- results[id] = collect_result(command, "", inference_result)
785
- return False
786
-
787
- candidates = MODELS_MAP[task][:20]
788
- all_avaliable_models = get_avaliable_models(candidates, config["num_candidate_models"], huggingfacetoken)
789
- all_avaliable_model_ids = all_avaliable_models["local"] + all_avaliable_models["huggingface"]
790
- logger.debug(f"avaliable models on {command['task']}: {all_avaliable_models}")
791
-
792
- if len(all_avaliable_model_ids) == 0:
793
- logger.warning(f"no available models on {command['task']}")
794
- record_case(success=False, **{"input": input, "task": command, "reason": f"no available models: {command['task']}", "op":"message"})
795
- inference_result = {"error": f"no available models on {command['task']} task."}
796
- results[id] = collect_result(command, "", inference_result)
797
- return False
798
-
799
- if len(all_avaliable_model_ids) == 1:
800
- best_model_id = all_avaliable_model_ids[0]
801
- hosted_on = "local" if best_model_id in all_avaliable_models["local"] else "huggingface"
802
- reason = "Only one model available."
803
- choose = {"id": best_model_id, "reason": reason}
804
- logger.debug(f"chosen model: {choose}")
805
- else:
806
- cand_models_info = [
807
- {
808
- "id": model["id"],
809
- "inference endpoint": all_avaliable_models.get(
810
- "local" if model["id"] in all_avaliable_models["local"] else "huggingface"
811
- ),
812
- "likes": model.get("likes"),
813
- "description": model.get("description", "")[:config["max_description_length"]],
814
- "language": model.get("language"),
815
- "tags": model.get("tags"),
816
- }
817
- for model in candidates
818
- if model["id"] in all_avaliable_model_ids
819
- ]
820
-
821
- choose_str = choose_model(input, command, cand_models_info, openaikey)
822
- logger.debug(f"chosen model: {choose_str}")
823
- try:
824
- choose = json.loads(choose_str)
825
- reason = choose["reason"]
826
- best_model_id = choose["id"]
827
- hosted_on = "local" if best_model_id in all_avaliable_models["local"] else "huggingface"
828
- except Exception as e:
829
- logger.warning(f"the response [ {choose_str} ] is not a valid JSON, try to find the model id and reason in the response.")
830
- choose_str = find_json(choose_str)
831
- best_model_id, reason, choose = get_id_reason(choose_str)
832
- hosted_on = "local" if best_model_id in all_avaliable_models["local"] else "huggingface"
833
- inference_result = model_inference(best_model_id, args, hosted_on, command['task'], huggingfacetoken)
834
-
835
- if "error" in inference_result:
836
- logger.warning(f"Inference error: {inference_result['error']}")
837
- record_case(success=False, **{"input": input, "task": command, "reason": f"inference error: {inference_result['error']}", "op":"message"})
838
- results[id] = collect_result(command, choose, inference_result)
839
- return False
840
-
841
- results[id] = collect_result(command, choose, inference_result)
842
- return True
843
-
844
- def chat_huggingface(messages, openaikey = None, huggingfacetoken = None, return_planning = False, return_results = False):
845
- start = time.time()
846
- context = messages[:-1]
847
- input = messages[-1]["content"]
848
- logger.info("*"*80)
849
- logger.info(f"input: {input}")
850
-
851
- task_str = parse_task(context, input, openaikey)
852
- logger.info(task_str)
853
-
854
- if "error" in task_str:
855
- return str(task_str), {}
856
- else:
857
- task_str = task_str.strip()
858
-
859
- try:
860
- tasks = json.loads(task_str)
861
- except Exception as e:
862
- logger.debug(e)
863
- response = chitchat(messages, openaikey)
864
- record_case(success=False, **{"input": input, "task": task_str, "reason": "task parsing fail", "op":"chitchat"})
865
- return response, {}
866
-
867
- if task_str == "[]": # using LLM response for empty task
868
- record_case(success=False, **{"input": input, "task": [], "reason": "task parsing fail: empty", "op": "chitchat"})
869
- response = chitchat(messages, openaikey)
870
- return response, {}
871
-
872
- if len(tasks)==1 and tasks[0]["task"] in ["summarization", "translation", "conversational", "text-generation", "text2text-generation"]:
873
- record_case(success=True, **{"input": input, "task": tasks, "reason": "task parsing fail: empty", "op": "chitchat"})
874
- response = chitchat(messages, openaikey)
875
- best_model_id = "ChatGPT"
876
- reason = "ChatGPT performs well on some NLP tasks as well."
877
- choose = {"id": best_model_id, "reason": reason}
878
- return response, collect_result(tasks[0], choose, {"response": response})
879
-
880
-
881
- tasks = unfold(tasks)
882
- tasks = fix_dep(tasks)
883
- logger.debug(tasks)
884
-
885
- if return_planning:
886
- return tasks
887
-
888
- results = {}
889
- threads = []
890
- tasks = tasks[:]
891
- d = dict()
892
- retry = 0
893
- while True:
894
- num_threads = len(threads)
895
- for task in tasks:
896
- dep = task["dep"]
897
- # logger.debug(f"d.keys(): {d.keys()}, dep: {dep}")
898
- for dep_id in dep:
899
- if dep_id >= task["id"]:
900
- task["dep"] = [-1]
901
- dep = [-1]
902
- break
903
- if len(list(set(dep).intersection(d.keys()))) == len(dep) or dep[0] == -1:
904
- tasks.remove(task)
905
- thread = threading.Thread(target=run_task, args=(input, task, d, openaikey, huggingfacetoken))
906
- thread.start()
907
- threads.append(thread)
908
- if num_threads == len(threads):
909
- time.sleep(0.5)
910
- retry += 1
911
- if retry > 80:
912
- logger.debug("User has waited too long, Loop break.")
913
- break
914
- if len(tasks) == 0:
915
- break
916
- for thread in threads:
917
- thread.join()
918
-
919
- results = d.copy()
920
-
921
- logger.debug(results)
922
- if return_results:
923
- return results
924
-
925
- response = response_results(input, results, openaikey).strip()
926
-
927
- end = time.time()
928
- during = end - start
929
-
930
- answer = {"message": response}
931
- record_case(success=True, **{"input": input, "task": task_str, "results": results, "response": response, "during": during, "op":"response"})
932
- logger.info(f"response: {response}")
933
- return response, results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/criteria/localitly_regulizer.py DELETED
@@ -1,65 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from PTI.criteria import l2_loss
4
- from PTI.configs import hyperparameters
5
- from PTI.configs import global_config
6
-
7
-
8
- class Space_Regulizer:
9
- def __init__(self, original_G, lpips_net):
10
- self.original_G = original_G
11
- self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha
12
- self.lpips_loss = lpips_net
13
-
14
- def get_morphed_w_code(self, new_w_code, fixed_w):
15
- interpolation_direction = new_w_code - fixed_w
16
- interpolation_direction_norm = torch.norm(interpolation_direction, p=2)
17
- direction_to_move = hyperparameters.regulizer_alpha * \
18
- interpolation_direction / interpolation_direction_norm
19
- result_w = fixed_w + direction_to_move
20
- self.morphing_regulizer_alpha * fixed_w + \
21
- (1 - self.morphing_regulizer_alpha) * new_w_code
22
-
23
- return result_w
24
-
25
- def get_image_from_ws(self, w_codes, G):
26
- return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes])
27
-
28
- def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False):
29
- loss = 0.0
30
-
31
- z_samples = np.random.randn(
32
- num_of_sampled_latents, self.original_G.z_dim)
33
- w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None,
34
- truncation_psi=0.5)
35
- territory_indicator_ws = [self.get_morphed_w_code(
36
- w_code.unsqueeze(0), w_batch) for w_code in w_samples]
37
-
38
- for w_code in territory_indicator_ws:
39
- new_img = new_G.synthesis(
40
- w_code, noise_mode='none', force_fp32=True)
41
- with torch.no_grad():
42
- old_img = self.original_G.synthesis(
43
- w_code, noise_mode='none', force_fp32=True)
44
-
45
- if hyperparameters.regulizer_l2_lambda > 0:
46
- l2_loss_val = l2_loss.l2_loss(old_img, new_img)
47
- if use_wandb:
48
- wandb.log({f'space_regulizer_l2_loss_val': l2_loss_val.detach().cpu()},
49
- step=global_config.training_step)
50
- loss += l2_loss_val * hyperparameters.regulizer_l2_lambda
51
-
52
- if hyperparameters.regulizer_lpips_lambda > 0:
53
- loss_lpips = self.lpips_loss(old_img, new_img)
54
- loss_lpips = torch.mean(torch.squeeze(loss_lpips))
55
- if use_wandb:
56
- wandb.log({f'space_regulizer_lpips_loss_val': loss_lpips.detach().cpu()},
57
- step=global_config.training_step)
58
- loss += loss_lpips * hyperparameters.regulizer_lpips_lambda
59
-
60
- return loss / len(territory_indicator_ws)
61
-
62
- def space_regulizer_loss(self, new_G, w_batch, use_wandb):
63
- ret_val = self.ball_holder_loss_lazy(
64
- new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb)
65
- return ret_val
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.h DELETED
@@ -1,90 +0,0 @@
1
- // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- //
3
- // NVIDIA CORPORATION and its licensors retain all intellectual property
4
- // and proprietary rights in and to this software, related documentation
5
- // and any modifications thereto. Any use, reproduction, disclosure or
6
- // distribution of this software and related documentation without an express
7
- // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- #include <cuda_runtime.h>
10
-
11
- //------------------------------------------------------------------------
12
- // CUDA kernel parameters.
13
-
14
- struct filtered_lrelu_kernel_params
15
- {
16
- // These parameters decide which kernel to use.
17
- int up; // upsampling ratio (1, 2, 4)
18
- int down; // downsampling ratio (1, 2, 4)
19
- int2 fuShape; // [size, 1] | [size, size]
20
- int2 fdShape; // [size, 1] | [size, size]
21
-
22
- int _dummy; // Alignment.
23
-
24
- // Rest of the parameters.
25
- const void* x; // Input tensor.
26
- void* y; // Output tensor.
27
- const void* b; // Bias tensor.
28
- unsigned char* s; // Sign tensor in/out. NULL if unused.
29
- const float* fu; // Upsampling filter.
30
- const float* fd; // Downsampling filter.
31
-
32
- int2 pad0; // Left/top padding.
33
- float gain; // Additional gain factor.
34
- float slope; // Leaky ReLU slope on negative side.
35
- float clamp; // Clamp after nonlinearity.
36
- int flip; // Filter kernel flip for gradient computation.
37
-
38
- int tilesXdim; // Original number of horizontal output tiles.
39
- int tilesXrep; // Number of horizontal tiles per CTA.
40
- int blockZofs; // Block z offset to support large minibatch, channel dimensions.
41
-
42
- int4 xShape; // [width, height, channel, batch]
43
- int4 yShape; // [width, height, channel, batch]
44
- int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused.
45
- int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
46
- int swLimit; // Active width of sign tensor in bytes.
47
-
48
- longlong4 xStride; // Strides of all tensors except signs, same component order as shapes.
49
- longlong4 yStride; //
50
- int64_t bStride; //
51
- longlong3 fuStride; //
52
- longlong3 fdStride; //
53
- };
54
-
55
- struct filtered_lrelu_act_kernel_params
56
- {
57
- void* x; // Input/output, modified in-place.
58
- unsigned char* s; // Sign tensor in/out. NULL if unused.
59
-
60
- float gain; // Additional gain factor.
61
- float slope; // Leaky ReLU slope on negative side.
62
- float clamp; // Clamp after nonlinearity.
63
-
64
- int4 xShape; // [width, height, channel, batch]
65
- longlong4 xStride; // Input/output tensor strides, same order as in shape.
66
- int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused.
67
- int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
68
- };
69
-
70
- //------------------------------------------------------------------------
71
- // CUDA kernel specialization.
72
-
73
- struct filtered_lrelu_kernel_spec
74
- {
75
- void* setup; // Function for filter kernel setup.
76
- void* exec; // Function for main operation.
77
- int2 tileOut; // Width/height of launch tile.
78
- int numWarps; // Number of warps per thread block, determines launch block size.
79
- int xrep; // For processing multiple horizontal tiles per thread block.
80
- int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants.
81
- };
82
-
83
- //------------------------------------------------------------------------
84
- // CUDA kernel selection.
85
-
86
- template <class T, class index_t, bool signWrite, bool signRead> filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
87
- template <class T, bool signWrite, bool signRead> void* choose_filtered_lrelu_act_kernel(void);
88
- template <bool signWrite, bool signRead> cudaError_t copy_filters(cudaStream_t stream);
89
-
90
- //------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint_img2img.py DELETED
@@ -1,1119 +0,0 @@
1
- # Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
2
-
3
- import inspect
4
- from typing import Any, Callable, Dict, List, Optional, Union
5
-
6
- import numpy as np
7
- import PIL.Image
8
- import torch
9
- import torch.nn.functional as F
10
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
11
-
12
- from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
13
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
14
- from diffusers.schedulers import KarrasDiffusionSchedulers
15
- from diffusers.utils import (
16
- PIL_INTERPOLATION,
17
- is_accelerate_available,
18
- is_accelerate_version,
19
- randn_tensor,
20
- replace_example_docstring,
21
- )
22
-
23
-
24
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
25
-
26
- EXAMPLE_DOC_STRING = """
27
- Examples:
28
- ```py
29
- >>> import numpy as np
30
- >>> import torch
31
- >>> from PIL import Image
32
- >>> from stable_diffusion_controlnet_inpaint_img2img import StableDiffusionControlNetInpaintImg2ImgPipeline
33
-
34
- >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
35
- >>> from diffusers import ControlNetModel, UniPCMultistepScheduler
36
- >>> from diffusers.utils import load_image
37
-
38
- >>> def ade_palette():
39
- return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
40
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
41
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
42
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
43
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
44
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
45
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
46
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
47
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
48
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
49
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
50
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
51
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
52
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
53
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
54
- [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
55
- [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
56
- [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
57
- [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
58
- [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
59
- [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
60
- [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
61
- [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
62
- [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
63
- [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
64
- [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
65
- [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
66
- [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
67
- [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
68
- [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
69
- [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
70
- [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
71
- [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
72
- [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
73
- [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
74
- [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
75
- [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
76
- [102, 255, 0], [92, 0, 255]]
77
-
78
- >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
79
- >>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
80
-
81
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
82
-
83
- >>> pipe = StableDiffusionControlNetInpaintImg2ImgPipeline.from_pretrained(
84
- "runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
85
- )
86
-
87
- >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
88
- >>> pipe.enable_xformers_memory_efficient_attention()
89
- >>> pipe.enable_model_cpu_offload()
90
-
91
- >>> def image_to_seg(image):
92
- pixel_values = image_processor(image, return_tensors="pt").pixel_values
93
- with torch.no_grad():
94
- outputs = image_segmentor(pixel_values)
95
- seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
96
- color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
97
- palette = np.array(ade_palette())
98
- for label, color in enumerate(palette):
99
- color_seg[seg == label, :] = color
100
- color_seg = color_seg.astype(np.uint8)
101
- seg_image = Image.fromarray(color_seg)
102
- return seg_image
103
-
104
- >>> image = load_image(
105
- "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
106
- )
107
-
108
- >>> mask_image = load_image(
109
- "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
110
- )
111
-
112
- >>> controlnet_conditioning_image = image_to_seg(image)
113
-
114
- >>> image = pipe(
115
- "Face of a yellow cat, high resolution, sitting on a park bench",
116
- image,
117
- mask_image,
118
- controlnet_conditioning_image,
119
- num_inference_steps=20,
120
- ).images[0]
121
-
122
- >>> image.save("out.png")
123
- ```
124
- """
125
-
126
-
127
- def prepare_image(image):
128
- if isinstance(image, torch.Tensor):
129
- # Batch single image
130
- if image.ndim == 3:
131
- image = image.unsqueeze(0)
132
-
133
- image = image.to(dtype=torch.float32)
134
- else:
135
- # preprocess image
136
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
137
- image = [image]
138
-
139
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
140
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
141
- image = np.concatenate(image, axis=0)
142
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
143
- image = np.concatenate([i[None, :] for i in image], axis=0)
144
-
145
- image = image.transpose(0, 3, 1, 2)
146
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
147
-
148
- return image
149
-
150
-
151
- def prepare_mask_image(mask_image):
152
- if isinstance(mask_image, torch.Tensor):
153
- if mask_image.ndim == 2:
154
- # Batch and add channel dim for single mask
155
- mask_image = mask_image.unsqueeze(0).unsqueeze(0)
156
- elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
157
- # Single mask, the 0'th dimension is considered to be
158
- # the existing batch size of 1
159
- mask_image = mask_image.unsqueeze(0)
160
- elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
161
- # Batch of mask, the 0'th dimension is considered to be
162
- # the batching dimension
163
- mask_image = mask_image.unsqueeze(1)
164
-
165
- # Binarize mask
166
- mask_image[mask_image < 0.5] = 0
167
- mask_image[mask_image >= 0.5] = 1
168
- else:
169
- # preprocess mask
170
- if isinstance(mask_image, (PIL.Image.Image, np.ndarray)):
171
- mask_image = [mask_image]
172
-
173
- if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image):
174
- mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0)
175
- mask_image = mask_image.astype(np.float32) / 255.0
176
- elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray):
177
- mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
178
-
179
- mask_image[mask_image < 0.5] = 0
180
- mask_image[mask_image >= 0.5] = 1
181
- mask_image = torch.from_numpy(mask_image)
182
-
183
- return mask_image
184
-
185
-
186
- def prepare_controlnet_conditioning_image(
187
- controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype
188
- ):
189
- if not isinstance(controlnet_conditioning_image, torch.Tensor):
190
- if isinstance(controlnet_conditioning_image, PIL.Image.Image):
191
- controlnet_conditioning_image = [controlnet_conditioning_image]
192
-
193
- if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
194
- controlnet_conditioning_image = [
195
- np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
196
- for i in controlnet_conditioning_image
197
- ]
198
- controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
199
- controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
200
- controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
201
- controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
202
- elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
203
- controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
204
-
205
- image_batch_size = controlnet_conditioning_image.shape[0]
206
-
207
- if image_batch_size == 1:
208
- repeat_by = batch_size
209
- else:
210
- # image batch size is the same as prompt batch size
211
- repeat_by = num_images_per_prompt
212
-
213
- controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
214
-
215
- controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
216
-
217
- return controlnet_conditioning_image
218
-
219
-
220
- class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline):
221
- """
222
- Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
223
- """
224
-
225
- _optional_components = ["safety_checker", "feature_extractor"]
226
-
227
- def __init__(
228
- self,
229
- vae: AutoencoderKL,
230
- text_encoder: CLIPTextModel,
231
- tokenizer: CLIPTokenizer,
232
- unet: UNet2DConditionModel,
233
- controlnet: ControlNetModel,
234
- scheduler: KarrasDiffusionSchedulers,
235
- safety_checker: StableDiffusionSafetyChecker,
236
- feature_extractor: CLIPImageProcessor,
237
- requires_safety_checker: bool = True,
238
- ):
239
- super().__init__()
240
-
241
- if safety_checker is None and requires_safety_checker:
242
- logger.warning(
243
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
244
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
245
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
246
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
247
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
248
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
249
- )
250
-
251
- if safety_checker is not None and feature_extractor is None:
252
- raise ValueError(
253
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
254
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
255
- )
256
-
257
- self.register_modules(
258
- vae=vae,
259
- text_encoder=text_encoder,
260
- tokenizer=tokenizer,
261
- unet=unet,
262
- controlnet=controlnet,
263
- scheduler=scheduler,
264
- safety_checker=safety_checker,
265
- feature_extractor=feature_extractor,
266
- )
267
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
268
- self.register_to_config(requires_safety_checker=requires_safety_checker)
269
-
270
- def enable_vae_slicing(self):
271
- r"""
272
- Enable sliced VAE decoding.
273
-
274
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
275
- steps. This is useful to save some memory and allow larger batch sizes.
276
- """
277
- self.vae.enable_slicing()
278
-
279
- def disable_vae_slicing(self):
280
- r"""
281
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
282
- computing decoding in one step.
283
- """
284
- self.vae.disable_slicing()
285
-
286
- def enable_sequential_cpu_offload(self, gpu_id=0):
287
- r"""
288
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
289
- text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
290
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
291
- Note that offloading happens on a submodule basis. Memory savings are higher than with
292
- `enable_model_cpu_offload`, but performance is lower.
293
- """
294
- if is_accelerate_available():
295
- from accelerate import cpu_offload
296
- else:
297
- raise ImportError("Please install accelerate via `pip install accelerate`")
298
-
299
- device = torch.device(f"cuda:{gpu_id}")
300
-
301
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
302
- cpu_offload(cpu_offloaded_model, device)
303
-
304
- if self.safety_checker is not None:
305
- cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
306
-
307
- def enable_model_cpu_offload(self, gpu_id=0):
308
- r"""
309
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
310
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
311
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
312
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
313
- """
314
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
315
- from accelerate import cpu_offload_with_hook
316
- else:
317
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
318
-
319
- device = torch.device(f"cuda:{gpu_id}")
320
-
321
- hook = None
322
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
323
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
324
-
325
- if self.safety_checker is not None:
326
- # the safety checker can offload the vae again
327
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
328
-
329
- # control net hook has be manually offloaded as it alternates with unet
330
- cpu_offload_with_hook(self.controlnet, device)
331
-
332
- # We'll offload the last model manually.
333
- self.final_offload_hook = hook
334
-
335
- @property
336
- def _execution_device(self):
337
- r"""
338
- Returns the device on which the pipeline's models will be executed. After calling
339
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
340
- hooks.
341
- """
342
- if not hasattr(self.unet, "_hf_hook"):
343
- return self.device
344
- for module in self.unet.modules():
345
- if (
346
- hasattr(module, "_hf_hook")
347
- and hasattr(module._hf_hook, "execution_device")
348
- and module._hf_hook.execution_device is not None
349
- ):
350
- return torch.device(module._hf_hook.execution_device)
351
- return self.device
352
-
353
- def _encode_prompt(
354
- self,
355
- prompt,
356
- device,
357
- num_images_per_prompt,
358
- do_classifier_free_guidance,
359
- negative_prompt=None,
360
- prompt_embeds: Optional[torch.FloatTensor] = None,
361
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
362
- ):
363
- r"""
364
- Encodes the prompt into text encoder hidden states.
365
-
366
- Args:
367
- prompt (`str` or `List[str]`, *optional*):
368
- prompt to be encoded
369
- device: (`torch.device`):
370
- torch device
371
- num_images_per_prompt (`int`):
372
- number of images that should be generated per prompt
373
- do_classifier_free_guidance (`bool`):
374
- whether to use classifier free guidance or not
375
- negative_prompt (`str` or `List[str]`, *optional*):
376
- The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
377
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
378
- prompt_embeds (`torch.FloatTensor`, *optional*):
379
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
380
- provided, text embeddings will be generated from `prompt` input argument.
381
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
382
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
383
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
384
- argument.
385
- """
386
- if prompt is not None and isinstance(prompt, str):
387
- batch_size = 1
388
- elif prompt is not None and isinstance(prompt, list):
389
- batch_size = len(prompt)
390
- else:
391
- batch_size = prompt_embeds.shape[0]
392
-
393
- if prompt_embeds is None:
394
- text_inputs = self.tokenizer(
395
- prompt,
396
- padding="max_length",
397
- max_length=self.tokenizer.model_max_length,
398
- truncation=True,
399
- return_tensors="pt",
400
- )
401
- text_input_ids = text_inputs.input_ids
402
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
403
-
404
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
405
- text_input_ids, untruncated_ids
406
- ):
407
- removed_text = self.tokenizer.batch_decode(
408
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
409
- )
410
- logger.warning(
411
- "The following part of your input was truncated because CLIP can only handle sequences up to"
412
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
413
- )
414
-
415
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
416
- attention_mask = text_inputs.attention_mask.to(device)
417
- else:
418
- attention_mask = None
419
-
420
- prompt_embeds = self.text_encoder(
421
- text_input_ids.to(device),
422
- attention_mask=attention_mask,
423
- )
424
- prompt_embeds = prompt_embeds[0]
425
-
426
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
427
-
428
- bs_embed, seq_len, _ = prompt_embeds.shape
429
- # duplicate text embeddings for each generation per prompt, using mps friendly method
430
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
431
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
432
-
433
- # get unconditional embeddings for classifier free guidance
434
- if do_classifier_free_guidance and negative_prompt_embeds is None:
435
- uncond_tokens: List[str]
436
- if negative_prompt is None:
437
- uncond_tokens = [""] * batch_size
438
- elif type(prompt) is not type(negative_prompt):
439
- raise TypeError(
440
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
441
- f" {type(prompt)}."
442
- )
443
- elif isinstance(negative_prompt, str):
444
- uncond_tokens = [negative_prompt]
445
- elif batch_size != len(negative_prompt):
446
- raise ValueError(
447
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
448
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
449
- " the batch size of `prompt`."
450
- )
451
- else:
452
- uncond_tokens = negative_prompt
453
-
454
- max_length = prompt_embeds.shape[1]
455
- uncond_input = self.tokenizer(
456
- uncond_tokens,
457
- padding="max_length",
458
- max_length=max_length,
459
- truncation=True,
460
- return_tensors="pt",
461
- )
462
-
463
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
464
- attention_mask = uncond_input.attention_mask.to(device)
465
- else:
466
- attention_mask = None
467
-
468
- negative_prompt_embeds = self.text_encoder(
469
- uncond_input.input_ids.to(device),
470
- attention_mask=attention_mask,
471
- )
472
- negative_prompt_embeds = negative_prompt_embeds[0]
473
-
474
- if do_classifier_free_guidance:
475
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
476
- seq_len = negative_prompt_embeds.shape[1]
477
-
478
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
479
-
480
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
481
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
482
-
483
- # For classifier free guidance, we need to do two forward passes.
484
- # Here we concatenate the unconditional and text embeddings into a single batch
485
- # to avoid doing two forward passes
486
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
487
-
488
- return prompt_embeds
489
-
490
- def run_safety_checker(self, image, device, dtype):
491
- if self.safety_checker is not None:
492
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
493
- image, has_nsfw_concept = self.safety_checker(
494
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
495
- )
496
- else:
497
- has_nsfw_concept = None
498
- return image, has_nsfw_concept
499
-
500
- def decode_latents(self, latents):
501
- latents = 1 / self.vae.config.scaling_factor * latents
502
- image = self.vae.decode(latents).sample
503
- image = (image / 2 + 0.5).clamp(0, 1)
504
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
505
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
506
- return image
507
-
508
- def prepare_extra_step_kwargs(self, generator, eta):
509
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
510
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
511
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
512
- # and should be between [0, 1]
513
-
514
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
515
- extra_step_kwargs = {}
516
- if accepts_eta:
517
- extra_step_kwargs["eta"] = eta
518
-
519
- # check if the scheduler accepts generator
520
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
521
- if accepts_generator:
522
- extra_step_kwargs["generator"] = generator
523
- return extra_step_kwargs
524
-
525
- def check_inputs(
526
- self,
527
- prompt,
528
- image,
529
- mask_image,
530
- controlnet_conditioning_image,
531
- height,
532
- width,
533
- callback_steps,
534
- negative_prompt=None,
535
- prompt_embeds=None,
536
- negative_prompt_embeds=None,
537
- strength=None,
538
- ):
539
- if height % 8 != 0 or width % 8 != 0:
540
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
541
-
542
- if (callback_steps is None) or (
543
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
544
- ):
545
- raise ValueError(
546
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
547
- f" {type(callback_steps)}."
548
- )
549
-
550
- if prompt is not None and prompt_embeds is not None:
551
- raise ValueError(
552
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
553
- " only forward one of the two."
554
- )
555
- elif prompt is None and prompt_embeds is None:
556
- raise ValueError(
557
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
558
- )
559
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
560
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
561
-
562
- if negative_prompt is not None and negative_prompt_embeds is not None:
563
- raise ValueError(
564
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
565
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
566
- )
567
-
568
- if prompt_embeds is not None and negative_prompt_embeds is not None:
569
- if prompt_embeds.shape != negative_prompt_embeds.shape:
570
- raise ValueError(
571
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
572
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
573
- f" {negative_prompt_embeds.shape}."
574
- )
575
-
576
- controlnet_cond_image_is_pil = isinstance(controlnet_conditioning_image, PIL.Image.Image)
577
- controlnet_cond_image_is_tensor = isinstance(controlnet_conditioning_image, torch.Tensor)
578
- controlnet_cond_image_is_pil_list = isinstance(controlnet_conditioning_image, list) and isinstance(
579
- controlnet_conditioning_image[0], PIL.Image.Image
580
- )
581
- controlnet_cond_image_is_tensor_list = isinstance(controlnet_conditioning_image, list) and isinstance(
582
- controlnet_conditioning_image[0], torch.Tensor
583
- )
584
-
585
- if (
586
- not controlnet_cond_image_is_pil
587
- and not controlnet_cond_image_is_tensor
588
- and not controlnet_cond_image_is_pil_list
589
- and not controlnet_cond_image_is_tensor_list
590
- ):
591
- raise TypeError(
592
- "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
593
- )
594
-
595
- if controlnet_cond_image_is_pil:
596
- controlnet_cond_image_batch_size = 1
597
- elif controlnet_cond_image_is_tensor:
598
- controlnet_cond_image_batch_size = controlnet_conditioning_image.shape[0]
599
- elif controlnet_cond_image_is_pil_list:
600
- controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
601
- elif controlnet_cond_image_is_tensor_list:
602
- controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
603
-
604
- if prompt is not None and isinstance(prompt, str):
605
- prompt_batch_size = 1
606
- elif prompt is not None and isinstance(prompt, list):
607
- prompt_batch_size = len(prompt)
608
- elif prompt_embeds is not None:
609
- prompt_batch_size = prompt_embeds.shape[0]
610
-
611
- if controlnet_cond_image_batch_size != 1 and controlnet_cond_image_batch_size != prompt_batch_size:
612
- raise ValueError(
613
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {controlnet_cond_image_batch_size}, prompt batch size: {prompt_batch_size}"
614
- )
615
-
616
- if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
617
- raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
618
-
619
- if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image):
620
- raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image")
621
-
622
- if isinstance(image, torch.Tensor):
623
- if image.ndim != 3 and image.ndim != 4:
624
- raise ValueError("`image` must have 3 or 4 dimensions")
625
-
626
- if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4:
627
- raise ValueError("`mask_image` must have 2, 3, or 4 dimensions")
628
-
629
- if image.ndim == 3:
630
- image_batch_size = 1
631
- image_channels, image_height, image_width = image.shape
632
- elif image.ndim == 4:
633
- image_batch_size, image_channels, image_height, image_width = image.shape
634
-
635
- if mask_image.ndim == 2:
636
- mask_image_batch_size = 1
637
- mask_image_channels = 1
638
- mask_image_height, mask_image_width = mask_image.shape
639
- elif mask_image.ndim == 3:
640
- mask_image_channels = 1
641
- mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape
642
- elif mask_image.ndim == 4:
643
- mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape
644
-
645
- if image_channels != 3:
646
- raise ValueError("`image` must have 3 channels")
647
-
648
- if mask_image_channels != 1:
649
- raise ValueError("`mask_image` must have 1 channel")
650
-
651
- if image_batch_size != mask_image_batch_size:
652
- raise ValueError("`image` and `mask_image` mush have the same batch sizes")
653
-
654
- if image_height != mask_image_height or image_width != mask_image_width:
655
- raise ValueError("`image` and `mask_image` must have the same height and width dimensions")
656
-
657
- if image.min() < -1 or image.max() > 1:
658
- raise ValueError("`image` should be in range [-1, 1]")
659
-
660
- if mask_image.min() < 0 or mask_image.max() > 1:
661
- raise ValueError("`mask_image` should be in range [0, 1]")
662
- else:
663
- mask_image_channels = 1
664
- image_channels = 3
665
-
666
- single_image_latent_channels = self.vae.config.latent_channels
667
-
668
- total_latent_channels = single_image_latent_channels * 2 + mask_image_channels
669
-
670
- if total_latent_channels != self.unet.config.in_channels:
671
- raise ValueError(
672
- f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
673
- f" non inpainting latent channels: {single_image_latent_channels},"
674
- f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}."
675
- f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs."
676
- )
677
-
678
- if strength < 0 or strength > 1:
679
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
680
-
681
- def get_timesteps(self, num_inference_steps, strength, device):
682
- # get the original timestep using init_timestep
683
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
684
-
685
- t_start = max(num_inference_steps - init_timestep, 0)
686
- timesteps = self.scheduler.timesteps[t_start:]
687
-
688
- return timesteps, num_inference_steps - t_start
689
-
690
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
691
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
692
- raise ValueError(
693
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
694
- )
695
-
696
- image = image.to(device=device, dtype=dtype)
697
-
698
- batch_size = batch_size * num_images_per_prompt
699
- if isinstance(generator, list) and len(generator) != batch_size:
700
- raise ValueError(
701
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
702
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
703
- )
704
-
705
- if isinstance(generator, list):
706
- init_latents = [
707
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
708
- ]
709
- init_latents = torch.cat(init_latents, dim=0)
710
- else:
711
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
712
-
713
- init_latents = self.vae.config.scaling_factor * init_latents
714
-
715
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
716
- raise ValueError(
717
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
718
- )
719
- else:
720
- init_latents = torch.cat([init_latents], dim=0)
721
-
722
- shape = init_latents.shape
723
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
724
-
725
- # get latents
726
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
727
- latents = init_latents
728
-
729
- return latents
730
-
731
- def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance):
732
- # resize the mask to latents shape as we concatenate the mask to the latents
733
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
734
- # and half precision
735
- mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor))
736
- mask_image = mask_image.to(device=device, dtype=dtype)
737
-
738
- # duplicate mask for each generation per prompt, using mps friendly method
739
- if mask_image.shape[0] < batch_size:
740
- if not batch_size % mask_image.shape[0] == 0:
741
- raise ValueError(
742
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
743
- f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number"
744
- " of masks that you pass is divisible by the total requested batch size."
745
- )
746
- mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1)
747
-
748
- mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image
749
-
750
- mask_image_latents = mask_image
751
-
752
- return mask_image_latents
753
-
754
- def prepare_masked_image_latents(
755
- self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
756
- ):
757
- masked_image = masked_image.to(device=device, dtype=dtype)
758
-
759
- # encode the mask image into latents space so we can concatenate it to the latents
760
- if isinstance(generator, list):
761
- masked_image_latents = [
762
- self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
763
- for i in range(batch_size)
764
- ]
765
- masked_image_latents = torch.cat(masked_image_latents, dim=0)
766
- else:
767
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
768
- masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
769
-
770
- # duplicate masked_image_latents for each generation per prompt, using mps friendly method
771
- if masked_image_latents.shape[0] < batch_size:
772
- if not batch_size % masked_image_latents.shape[0] == 0:
773
- raise ValueError(
774
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
775
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
776
- " Make sure the number of images that you pass is divisible by the total requested batch size."
777
- )
778
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
779
-
780
- masked_image_latents = (
781
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
782
- )
783
-
784
- # aligning device to prevent device errors when concating it with the latent model input
785
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
786
- return masked_image_latents
787
-
788
- def _default_height_width(self, height, width, image):
789
- if isinstance(image, list):
790
- image = image[0]
791
-
792
- if height is None:
793
- if isinstance(image, PIL.Image.Image):
794
- height = image.height
795
- elif isinstance(image, torch.Tensor):
796
- height = image.shape[3]
797
-
798
- height = (height // 8) * 8 # round down to nearest multiple of 8
799
-
800
- if width is None:
801
- if isinstance(image, PIL.Image.Image):
802
- width = image.width
803
- elif isinstance(image, torch.Tensor):
804
- width = image.shape[2]
805
-
806
- width = (width // 8) * 8 # round down to nearest multiple of 8
807
-
808
- return height, width
809
-
810
- @torch.no_grad()
811
- @replace_example_docstring(EXAMPLE_DOC_STRING)
812
- def __call__(
813
- self,
814
- prompt: Union[str, List[str]] = None,
815
- image: Union[torch.Tensor, PIL.Image.Image] = None,
816
- mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
817
- controlnet_conditioning_image: Union[
818
- torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
819
- ] = None,
820
- strength: float = 0.8,
821
- height: Optional[int] = None,
822
- width: Optional[int] = None,
823
- num_inference_steps: int = 50,
824
- guidance_scale: float = 7.5,
825
- negative_prompt: Optional[Union[str, List[str]]] = None,
826
- num_images_per_prompt: Optional[int] = 1,
827
- eta: float = 0.0,
828
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
829
- latents: Optional[torch.FloatTensor] = None,
830
- prompt_embeds: Optional[torch.FloatTensor] = None,
831
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
832
- output_type: Optional[str] = "pil",
833
- return_dict: bool = True,
834
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
835
- callback_steps: int = 1,
836
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
837
- controlnet_conditioning_scale: float = 1.0,
838
- ):
839
- r"""
840
- Function invoked when calling the pipeline for generation.
841
-
842
- Args:
843
- prompt (`str` or `List[str]`, *optional*):
844
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
845
- instead.
846
- image (`torch.Tensor` or `PIL.Image.Image`):
847
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
848
- be masked out with `mask_image` and repainted according to `prompt`.
849
- mask_image (`torch.Tensor` or `PIL.Image.Image`):
850
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
851
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
852
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
853
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
854
- controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
855
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
856
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
857
- also be accepted as an image. The control image is automatically resized to fit the output image.
858
- strength (`float`, *optional*):
859
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
860
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
861
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
862
- be maximum and the denoising process will run for the full number of iterations specified in
863
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
864
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
865
- The height in pixels of the generated image.
866
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
867
- The width in pixels of the generated image.
868
- num_inference_steps (`int`, *optional*, defaults to 50):
869
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
870
- expense of slower inference.
871
- guidance_scale (`float`, *optional*, defaults to 7.5):
872
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
873
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
874
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
875
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
876
- usually at the expense of lower image quality.
877
- negative_prompt (`str` or `List[str]`, *optional*):
878
- The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
879
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
880
- num_images_per_prompt (`int`, *optional*, defaults to 1):
881
- The number of images to generate per prompt.
882
- eta (`float`, *optional*, defaults to 0.0):
883
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
884
- [`schedulers.DDIMScheduler`], will be ignored for others.
885
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
886
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
887
- to make generation deterministic.
888
- latents (`torch.FloatTensor`, *optional*):
889
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
- tensor will ge generated by sampling using the supplied random `generator`.
892
- prompt_embeds (`torch.FloatTensor`, *optional*):
893
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
- provided, text embeddings will be generated from `prompt` input argument.
895
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
896
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
897
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
898
- argument.
899
- output_type (`str`, *optional*, defaults to `"pil"`):
900
- The output format of the generate image. Choose between
901
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
902
- return_dict (`bool`, *optional*, defaults to `True`):
903
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
904
- plain tuple.
905
- callback (`Callable`, *optional*):
906
- A function that will be called every `callback_steps` steps during inference. The function will be
907
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
908
- callback_steps (`int`, *optional*, defaults to 1):
909
- The frequency at which the `callback` function will be called. If not specified, the callback will be
910
- called at every step.
911
- cross_attention_kwargs (`dict`, *optional*):
912
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
913
- `self.processor` in
914
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
915
- controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
916
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
917
- to the residual in the original unet.
918
-
919
- Examples:
920
-
921
- Returns:
922
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
923
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
924
- When returning a tuple, the first element is a list with the generated images, and the second element is a
925
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
926
- (nsfw) content, according to the `safety_checker`.
927
- """
928
- # 0. Default height and width to unet
929
- height, width = self._default_height_width(height, width, controlnet_conditioning_image)
930
-
931
- # 1. Check inputs. Raise error if not correct
932
- self.check_inputs(
933
- prompt,
934
- image,
935
- mask_image,
936
- controlnet_conditioning_image,
937
- height,
938
- width,
939
- callback_steps,
940
- negative_prompt,
941
- prompt_embeds,
942
- negative_prompt_embeds,
943
- strength,
944
- )
945
-
946
- # 2. Define call parameters
947
- if prompt is not None and isinstance(prompt, str):
948
- batch_size = 1
949
- elif prompt is not None and isinstance(prompt, list):
950
- batch_size = len(prompt)
951
- else:
952
- batch_size = prompt_embeds.shape[0]
953
-
954
- device = self._execution_device
955
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
956
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
957
- # corresponds to doing no classifier free guidance.
958
- do_classifier_free_guidance = guidance_scale > 1.0
959
-
960
- # 3. Encode input prompt
961
- prompt_embeds = self._encode_prompt(
962
- prompt,
963
- device,
964
- num_images_per_prompt,
965
- do_classifier_free_guidance,
966
- negative_prompt,
967
- prompt_embeds=prompt_embeds,
968
- negative_prompt_embeds=negative_prompt_embeds,
969
- )
970
-
971
- # 4. Prepare mask, image, and controlnet_conditioning_image
972
- image = prepare_image(image)
973
-
974
- mask_image = prepare_mask_image(mask_image)
975
-
976
- controlnet_conditioning_image = prepare_controlnet_conditioning_image(
977
- controlnet_conditioning_image,
978
- width,
979
- height,
980
- batch_size * num_images_per_prompt,
981
- num_images_per_prompt,
982
- device,
983
- self.controlnet.dtype,
984
- )
985
-
986
- masked_image = image * (mask_image < 0.5)
987
-
988
- # 5. Prepare timesteps
989
- self.scheduler.set_timesteps(num_inference_steps, device=device)
990
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
991
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
992
-
993
- # 6. Prepare latent variables
994
- latents = self.prepare_latents(
995
- image,
996
- latent_timestep,
997
- batch_size,
998
- num_images_per_prompt,
999
- prompt_embeds.dtype,
1000
- device,
1001
- generator,
1002
- )
1003
-
1004
- mask_image_latents = self.prepare_mask_latents(
1005
- mask_image,
1006
- batch_size * num_images_per_prompt,
1007
- height,
1008
- width,
1009
- prompt_embeds.dtype,
1010
- device,
1011
- do_classifier_free_guidance,
1012
- )
1013
-
1014
- masked_image_latents = self.prepare_masked_image_latents(
1015
- masked_image,
1016
- batch_size * num_images_per_prompt,
1017
- height,
1018
- width,
1019
- prompt_embeds.dtype,
1020
- device,
1021
- generator,
1022
- do_classifier_free_guidance,
1023
- )
1024
-
1025
- if do_classifier_free_guidance:
1026
- controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
1027
-
1028
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1029
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1030
-
1031
- # 8. Denoising loop
1032
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1033
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1034
- for i, t in enumerate(timesteps):
1035
- # expand the latents if we are doing classifier free guidance
1036
- non_inpainting_latent_model_input = (
1037
- torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1038
- )
1039
-
1040
- non_inpainting_latent_model_input = self.scheduler.scale_model_input(
1041
- non_inpainting_latent_model_input, t
1042
- )
1043
-
1044
- inpainting_latent_model_input = torch.cat(
1045
- [non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1
1046
- )
1047
-
1048
- down_block_res_samples, mid_block_res_sample = self.controlnet(
1049
- non_inpainting_latent_model_input,
1050
- t,
1051
- encoder_hidden_states=prompt_embeds,
1052
- controlnet_cond=controlnet_conditioning_image,
1053
- return_dict=False,
1054
- )
1055
-
1056
- down_block_res_samples = [
1057
- down_block_res_sample * controlnet_conditioning_scale
1058
- for down_block_res_sample in down_block_res_samples
1059
- ]
1060
- mid_block_res_sample *= controlnet_conditioning_scale
1061
-
1062
- # predict the noise residual
1063
- noise_pred = self.unet(
1064
- inpainting_latent_model_input,
1065
- t,
1066
- encoder_hidden_states=prompt_embeds,
1067
- cross_attention_kwargs=cross_attention_kwargs,
1068
- down_block_additional_residuals=down_block_res_samples,
1069
- mid_block_additional_residual=mid_block_res_sample,
1070
- ).sample
1071
-
1072
- # perform guidance
1073
- if do_classifier_free_guidance:
1074
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1075
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1076
-
1077
- # compute the previous noisy sample x_t -> x_t-1
1078
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1079
-
1080
- # call the callback, if provided
1081
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1082
- progress_bar.update()
1083
- if callback is not None and i % callback_steps == 0:
1084
- callback(i, t, latents)
1085
-
1086
- # If we do sequential model offloading, let's offload unet and controlnet
1087
- # manually for max memory savings
1088
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1089
- self.unet.to("cpu")
1090
- self.controlnet.to("cpu")
1091
- torch.cuda.empty_cache()
1092
-
1093
- if output_type == "latent":
1094
- image = latents
1095
- has_nsfw_concept = None
1096
- elif output_type == "pil":
1097
- # 8. Post-processing
1098
- image = self.decode_latents(latents)
1099
-
1100
- # 9. Run safety checker
1101
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1102
-
1103
- # 10. Convert to PIL
1104
- image = self.numpy_to_pil(image)
1105
- else:
1106
- # 8. Post-processing
1107
- image = self.decode_latents(latents)
1108
-
1109
- # 9. Run safety checker
1110
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1111
-
1112
- # Offload last model to CPU
1113
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1114
- self.final_offload_hook.offload()
1115
-
1116
- if not return_dict:
1117
- return (image, has_nsfw_concept)
1118
-
1119
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/README.md DELETED
@@ -1,16 +0,0 @@
1
- # Fast R-CNN
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- ```latex
8
- @inproceedings{girshick2015fast,
9
- title={Fast r-cnn},
10
- author={Girshick, Ross},
11
- booktitle={Proceedings of the IEEE international conference on computer vision},
12
- year={2015}
13
- }
14
- ```
15
-
16
- ## Results and models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 22])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py DELETED
@@ -1,18 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py',
3
- '../_base_/default_runtime.py'
4
- ]
5
- model = dict(bbox_head=dict(num_classes=1))
6
- # optimizer
7
- optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4)
8
- optimizer_config = dict()
9
- # learning policy
10
- lr_config = dict(
11
- policy='step',
12
- warmup='linear',
13
- warmup_iters=1000,
14
- warmup_ratio=0.001,
15
- step=[16, 20])
16
- # runtime settings
17
- runner = dict(type='EpochBasedRunner', max_epochs=24)
18
- log_config = dict(interval=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fast_rcnn.py DELETED
@@ -1,52 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .two_stage import TwoStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class FastRCNN(TwoStageDetector):
7
- """Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
8
-
9
- def __init__(self,
10
- backbone,
11
- roi_head,
12
- train_cfg,
13
- test_cfg,
14
- neck=None,
15
- pretrained=None):
16
- super(FastRCNN, self).__init__(
17
- backbone=backbone,
18
- neck=neck,
19
- roi_head=roi_head,
20
- train_cfg=train_cfg,
21
- test_cfg=test_cfg,
22
- pretrained=pretrained)
23
-
24
- def forward_test(self, imgs, img_metas, proposals, **kwargs):
25
- """
26
- Args:
27
- imgs (List[Tensor]): the outer list indicates test-time
28
- augmentations and inner Tensor should have a shape NxCxHxW,
29
- which contains all images in the batch.
30
- img_metas (List[List[dict]]): the outer list indicates test-time
31
- augs (multiscale, flip, etc.) and the inner list indicates
32
- images in a batch.
33
- proposals (List[List[Tensor]]): the outer list indicates test-time
34
- augs (multiscale, flip, etc.) and the inner list indicates
35
- images in a batch. The Tensor should have a shape Px4, where
36
- P is the number of proposals.
37
- """
38
- for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
39
- if not isinstance(var, list):
40
- raise TypeError(f'{name} must be a list, but got {type(var)}')
41
-
42
- num_augs = len(imgs)
43
- if num_augs != len(img_metas):
44
- raise ValueError(f'num of augmentations ({len(imgs)}) '
45
- f'!= num of image meta ({len(img_metas)})')
46
-
47
- if num_augs == 1:
48
- return self.simple_test(imgs[0], img_metas[0], proposals[0],
49
- **kwargs)
50
- else:
51
- # TODO: support test-time augmentation
52
- assert NotImplementedError
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/pisa_roi_head.py DELETED
@@ -1,159 +0,0 @@
1
- from mmdet.core import bbox2roi
2
- from ..builder import HEADS
3
- from ..losses.pisa_loss import carl_loss, isr_p
4
- from .standard_roi_head import StandardRoIHead
5
-
6
-
7
- @HEADS.register_module()
8
- class PISARoIHead(StandardRoIHead):
9
- r"""The RoI head for `Prime Sample Attention in Object Detection
10
- <https://arxiv.org/abs/1904.04821>`_."""
11
-
12
- def forward_train(self,
13
- x,
14
- img_metas,
15
- proposal_list,
16
- gt_bboxes,
17
- gt_labels,
18
- gt_bboxes_ignore=None,
19
- gt_masks=None):
20
- """Forward function for training.
21
-
22
- Args:
23
- x (list[Tensor]): List of multi-level img features.
24
- img_metas (list[dict]): List of image info dict where each dict
25
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
26
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
27
- For details on the values of these keys see
28
- `mmdet/datasets/pipelines/formatting.py:Collect`.
29
- proposals (list[Tensors]): List of region proposals.
30
- gt_bboxes (list[Tensor]): Each item are the truth boxes for each
31
- image in [tl_x, tl_y, br_x, br_y] format.
32
- gt_labels (list[Tensor]): Class indices corresponding to each box
33
- gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
34
- boxes can be ignored when computing the loss.
35
- gt_masks (None | Tensor) : True segmentation masks for each box
36
- used if the architecture supports a segmentation task.
37
-
38
- Returns:
39
- dict[str, Tensor]: a dictionary of loss components
40
- """
41
- # assign gts and sample proposals
42
- if self.with_bbox or self.with_mask:
43
- num_imgs = len(img_metas)
44
- if gt_bboxes_ignore is None:
45
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
46
- sampling_results = []
47
- neg_label_weights = []
48
- for i in range(num_imgs):
49
- assign_result = self.bbox_assigner.assign(
50
- proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
51
- gt_labels[i])
52
- sampling_result = self.bbox_sampler.sample(
53
- assign_result,
54
- proposal_list[i],
55
- gt_bboxes[i],
56
- gt_labels[i],
57
- feats=[lvl_feat[i][None] for lvl_feat in x])
58
- # neg label weight is obtained by sampling when using ISR-N
59
- neg_label_weight = None
60
- if isinstance(sampling_result, tuple):
61
- sampling_result, neg_label_weight = sampling_result
62
- sampling_results.append(sampling_result)
63
- neg_label_weights.append(neg_label_weight)
64
-
65
- losses = dict()
66
- # bbox head forward and loss
67
- if self.with_bbox:
68
- bbox_results = self._bbox_forward_train(
69
- x,
70
- sampling_results,
71
- gt_bboxes,
72
- gt_labels,
73
- img_metas,
74
- neg_label_weights=neg_label_weights)
75
- losses.update(bbox_results['loss_bbox'])
76
-
77
- # mask head forward and loss
78
- if self.with_mask:
79
- mask_results = self._mask_forward_train(x, sampling_results,
80
- bbox_results['bbox_feats'],
81
- gt_masks, img_metas)
82
- losses.update(mask_results['loss_mask'])
83
-
84
- return losses
85
-
86
- def _bbox_forward(self, x, rois):
87
- """Box forward function used in both training and testing."""
88
- # TODO: a more flexible way to decide which feature maps to use
89
- bbox_feats = self.bbox_roi_extractor(
90
- x[:self.bbox_roi_extractor.num_inputs], rois)
91
- if self.with_shared_head:
92
- bbox_feats = self.shared_head(bbox_feats)
93
- cls_score, bbox_pred = self.bbox_head(bbox_feats)
94
-
95
- bbox_results = dict(
96
- cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
97
- return bbox_results
98
-
99
- def _bbox_forward_train(self,
100
- x,
101
- sampling_results,
102
- gt_bboxes,
103
- gt_labels,
104
- img_metas,
105
- neg_label_weights=None):
106
- """Run forward function and calculate loss for box head in training."""
107
- rois = bbox2roi([res.bboxes for res in sampling_results])
108
-
109
- bbox_results = self._bbox_forward(x, rois)
110
-
111
- bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
112
- gt_labels, self.train_cfg)
113
-
114
- # neg_label_weights obtained by sampler is image-wise, mapping back to
115
- # the corresponding location in label weights
116
- if neg_label_weights[0] is not None:
117
- label_weights = bbox_targets[1]
118
- cur_num_rois = 0
119
- for i in range(len(sampling_results)):
120
- num_pos = sampling_results[i].pos_inds.size(0)
121
- num_neg = sampling_results[i].neg_inds.size(0)
122
- label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
123
- num_neg] = neg_label_weights[i]
124
- cur_num_rois += num_pos + num_neg
125
-
126
- cls_score = bbox_results['cls_score']
127
- bbox_pred = bbox_results['bbox_pred']
128
-
129
- # Apply ISR-P
130
- isr_cfg = self.train_cfg.get('isr', None)
131
- if isr_cfg is not None:
132
- bbox_targets = isr_p(
133
- cls_score,
134
- bbox_pred,
135
- bbox_targets,
136
- rois,
137
- sampling_results,
138
- self.bbox_head.loss_cls,
139
- self.bbox_head.bbox_coder,
140
- **isr_cfg,
141
- num_class=self.bbox_head.num_classes)
142
- loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
143
- *bbox_targets)
144
-
145
- # Add CARL Loss
146
- carl_cfg = self.train_cfg.get('carl', None)
147
- if carl_cfg is not None:
148
- loss_carl = carl_loss(
149
- cls_score,
150
- bbox_targets[0],
151
- bbox_pred,
152
- bbox_targets[2],
153
- self.bbox_head.loss_bbox,
154
- **carl_cfg,
155
- num_class=self.bbox_head.num_classes)
156
- loss_bbox.update(loss_carl)
157
-
158
- bbox_results.update(loss_bbox=loss_bbox)
159
- return bbox_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/encnet_r50-d8.py DELETED
@@ -1,48 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='EncHead',
19
- in_channels=[512, 1024, 2048],
20
- in_index=(1, 2, 3),
21
- channels=512,
22
- num_codes=32,
23
- use_se_loss=True,
24
- add_lateral=False,
25
- dropout_ratio=0.1,
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
31
- loss_se_decode=dict(
32
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
33
- auxiliary_head=dict(
34
- type='FCNHead',
35
- in_channels=1024,
36
- in_index=2,
37
- channels=256,
38
- num_convs=1,
39
- concat_input=False,
40
- dropout_ratio=0.1,
41
- num_classes=19,
42
- norm_cfg=norm_cfg,
43
- align_corners=False,
44
- loss_decode=dict(
45
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
46
- # model training and testing settings
47
- train_cfg=dict(),
48
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = './fcn_hr18_512x1024_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w48',
4
- backbone=dict(
5
- extra=dict(
6
- stage2=dict(num_channels=(48, 96)),
7
- stage3=dict(num_channels=(48, 96, 192)),
8
- stage4=dict(num_channels=(48, 96, 192, 384)))),
9
- decode_head=dict(
10
- in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
 
 
 
 
 
 
 
 
 
 
 
spaces/Aniquel/WizApp/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: WizApp
3
- emoji: ⚡
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/RWKV-model.md DELETED
@@ -1,72 +0,0 @@
1
- > RWKV: RNN with Transformer-level LLM Performance
2
- >
3
- > It combines the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding (using the final hidden state).
4
-
5
- https://github.com/BlinkDL/RWKV-LM
6
-
7
- https://github.com/BlinkDL/ChatRWKV
8
-
9
- ## Using RWKV in the web UI
10
-
11
- ### Hugging Face weights
12
-
13
- Simply download the weights from https://huggingface.co/RWKV and load them as you would for any other model.
14
-
15
- There is a bug in transformers==4.29.2 that prevents RWKV from being loaded in 8-bit mode. You can install the dev branch to solve this bug: `pip install git+https://github.com/huggingface/transformers`
16
-
17
- ### Original .pth weights
18
-
19
- The instructions below are from before RWKV was supported in transformers, and they are kept for legacy purposes. The old implementation is possibly faster, but it lacks the full range of samplers that the transformers library offers.
20
-
21
- #### 0. Install the RWKV library
22
-
23
- ```
24
- pip install rwkv
25
- ```
26
-
27
- `0.7.3` was the last version that I tested. If you experience any issues, try ```pip install rwkv==0.7.3```.
28
-
29
- #### 1. Download the model
30
-
31
- It is available in different sizes:
32
-
33
- * https://huggingface.co/BlinkDL/rwkv-4-pile-3b/
34
- * https://huggingface.co/BlinkDL/rwkv-4-pile-7b/
35
- * https://huggingface.co/BlinkDL/rwkv-4-pile-14b/
36
-
37
- There are also older releases with smaller sizes like:
38
-
39
- * https://huggingface.co/BlinkDL/rwkv-4-pile-169m/resolve/main/RWKV-4-Pile-169M-20220807-8023.pth
40
-
41
- Download the chosen `.pth` and put it directly in the `models` folder.
42
-
43
- #### 2. Download the tokenizer
44
-
45
- [20B_tokenizer.json](https://raw.githubusercontent.com/BlinkDL/ChatRWKV/main/v2/20B_tokenizer.json)
46
-
47
- Also put it directly in the `models` folder. Make sure to not rename it. It should be called `20B_tokenizer.json`.
48
-
49
- #### 3. Launch the web UI
50
-
51
- No additional steps are required. Just launch it as you would with any other model.
52
-
53
- ```
54
- python server.py --listen --no-stream --model RWKV-4-Pile-169M-20220807-8023.pth
55
- ```
56
-
57
- #### Setting a custom strategy
58
-
59
- It is possible to have very fine control over the offloading and precision for the model with the `--rwkv-strategy` flag. Possible values include:
60
-
61
- ```
62
- "cpu fp32" # CPU mode
63
- "cuda fp16" # GPU mode with float16 precision
64
- "cuda fp16 *30 -> cpu fp32" # GPU+CPU offloading. The higher the number after *, the higher the GPU allocation.
65
- "cuda fp16i8" # GPU mode with 8-bit precision
66
- ```
67
-
68
- See the README for the PyPl package for more details: https://pypi.org/project/rwkv/
69
-
70
- #### Compiling the CUDA kernel
71
-
72
- You can compile the CUDA kernel for the model with `--rwkv-cuda-on`. This should improve the performance a lot but I haven't been able to get it to work yet.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnonAndDesu/Desu_Proxy/Dockerfile DELETED
@@ -1,11 +0,0 @@
1
- FROM node:18-bullseye-slim
2
- RUN apt-get update && \
3
- apt-get install -y git
4
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
5
- WORKDIR /app
6
- RUN npm install
7
- COPY Dockerfile greeting.md* .env* ./
8
- RUN npm run build
9
- EXPOSE 7860
10
- ENV NODE_ENV=production
11
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/geometry.py DELETED
@@ -1,96 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
-
5
- def coords_grid(b, h, w, homogeneous=False, device=None):
6
- y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
7
-
8
- stacks = [x, y]
9
-
10
- if homogeneous:
11
- ones = torch.ones_like(x) # [H, W]
12
- stacks.append(ones)
13
-
14
- grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
15
-
16
- grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
17
-
18
- if device is not None:
19
- grid = grid.to(device)
20
-
21
- return grid
22
-
23
-
24
- def generate_window_grid(h_min, h_max, w_min, w_max, len_h, len_w, device=None):
25
- assert device is not None
26
-
27
- x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w, device=device),
28
- torch.linspace(h_min, h_max, len_h, device=device)],
29
- )
30
- grid = torch.stack((x, y), -1).transpose(0, 1).float() # [H, W, 2]
31
-
32
- return grid
33
-
34
-
35
- def normalize_coords(coords, h, w):
36
- # coords: [B, H, W, 2]
37
- c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).float().to(coords.device)
38
- return (coords - c) / c # [-1, 1]
39
-
40
-
41
- def bilinear_sample(img, sample_coords, mode='bilinear', padding_mode='zeros', return_mask=False):
42
- # img: [B, C, H, W]
43
- # sample_coords: [B, 2, H, W] in image scale
44
- if sample_coords.size(1) != 2: # [B, H, W, 2]
45
- sample_coords = sample_coords.permute(0, 3, 1, 2)
46
-
47
- b, _, h, w = sample_coords.shape
48
-
49
- # Normalize to [-1, 1]
50
- x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
51
- y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
52
-
53
- grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
54
-
55
- img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
56
-
57
- if return_mask:
58
- mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
59
-
60
- return img, mask
61
-
62
- return img
63
-
64
-
65
- def flow_warp(feature, flow, mask=False, padding_mode='zeros'):
66
- b, c, h, w = feature.size()
67
- assert flow.size(1) == 2
68
-
69
- grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
70
-
71
- return bilinear_sample(feature, grid, padding_mode=padding_mode,
72
- return_mask=mask)
73
-
74
-
75
- def forward_backward_consistency_check(fwd_flow, bwd_flow,
76
- alpha=0.01,
77
- beta=0.5
78
- ):
79
- # fwd_flow, bwd_flow: [B, 2, H, W]
80
- # alpha and beta values are following UnFlow (https://arxiv.org/abs/1711.07837)
81
- assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
82
- assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
83
- flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
84
-
85
- warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
86
- warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
87
-
88
- diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
89
- diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
90
-
91
- threshold = alpha * flow_mag + beta
92
-
93
- fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
94
- bwd_occ = (diff_bwd > threshold).float()
95
-
96
- return fwd_occ, bwd_occ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/aws/resume.py DELETED
@@ -1,40 +0,0 @@
1
- # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2
- # Usage: $ python utils/aws/resume.py
3
-
4
- import os
5
- import sys
6
- from pathlib import Path
7
-
8
- import torch
9
- import yaml
10
-
11
- FILE = Path(__file__).resolve()
12
- ROOT = FILE.parents[2] # YOLOv5 root directory
13
- if str(ROOT) not in sys.path:
14
- sys.path.append(str(ROOT)) # add ROOT to PATH
15
-
16
- port = 0 # --master_port
17
- path = Path('').resolve()
18
- for last in path.rglob('*/**/last.pt'):
19
- ckpt = torch.load(last)
20
- if ckpt['optimizer'] is None:
21
- continue
22
-
23
- # Load opt.yaml
24
- with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25
- opt = yaml.safe_load(f)
26
-
27
- # Get device count
28
- d = opt['device'].split(',') # devices
29
- nd = len(d) # number of devices
30
- ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31
-
32
- if ddp: # multi-GPU
33
- port += 1
34
- cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35
- else: # single-GPU
36
- cmd = f'python train.py --resume {last}'
37
-
38
- cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39
- print(cmd)
40
- os.system(cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py DELETED
@@ -1,37 +0,0 @@
1
- import logging
2
- import os
3
- from typing import Optional
4
-
5
- from pip._vendor.pyproject_hooks import BuildBackendHookCaller
6
-
7
- from pip._internal.utils.subprocess import runner_with_spinner_message
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- def build_wheel_pep517(
13
- name: str,
14
- backend: BuildBackendHookCaller,
15
- metadata_directory: str,
16
- tempd: str,
17
- ) -> Optional[str]:
18
- """Build one InstallRequirement using the PEP 517 build process.
19
-
20
- Returns path to wheel if successfully built. Otherwise, returns None.
21
- """
22
- assert metadata_directory is not None
23
- try:
24
- logger.debug("Destination directory: %s", tempd)
25
-
26
- runner = runner_with_spinner_message(
27
- f"Building wheel for {name} (pyproject.toml)"
28
- )
29
- with backend.subprocess_runner(runner):
30
- wheel_name = backend.build_wheel(
31
- tempd,
32
- metadata_directory=metadata_directory,
33
- )
34
- except Exception:
35
- logger.error("Failed building wheel for %s", name)
36
- return None
37
- return os.path.join(tempd, wheel_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py DELETED
@@ -1,142 +0,0 @@
1
- """
2
- pygments.formatters
3
- ~~~~~~~~~~~~~~~~~~~
4
-
5
- Pygments formatters.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import sys
12
- import types
13
- from fnmatch import fnmatch
14
- from os.path import basename
15
-
16
- from pip._vendor.pygments.formatters._mapping import FORMATTERS
17
- from pip._vendor.pygments.plugin import find_plugin_formatters
18
- from pip._vendor.pygments.util import ClassNotFound
19
-
20
- __all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
21
- 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
22
-
23
- _formatter_cache = {} # classes by name
24
-
25
- def _load_formatters(module_name):
26
- """Load a formatter (and all others in the module too)."""
27
- mod = __import__(module_name, None, None, ['__all__'])
28
- for formatter_name in mod.__all__:
29
- cls = getattr(mod, formatter_name)
30
- _formatter_cache[cls.name] = cls
31
-
32
-
33
- def get_all_formatters():
34
- """Return a generator for all formatter classes."""
35
- # NB: this returns formatter classes, not info like get_all_lexers().
36
- for info in FORMATTERS.values():
37
- if info[1] not in _formatter_cache:
38
- _load_formatters(info[0])
39
- yield _formatter_cache[info[1]]
40
- for _, formatter in find_plugin_formatters():
41
- yield formatter
42
-
43
-
44
- def find_formatter_class(alias):
45
- """Lookup a formatter by alias.
46
-
47
- Returns None if not found.
48
- """
49
- for module_name, name, aliases, _, _ in FORMATTERS.values():
50
- if alias in aliases:
51
- if name not in _formatter_cache:
52
- _load_formatters(module_name)
53
- return _formatter_cache[name]
54
- for _, cls in find_plugin_formatters():
55
- if alias in cls.aliases:
56
- return cls
57
-
58
-
59
- def get_formatter_by_name(_alias, **options):
60
- """Lookup and instantiate a formatter by alias.
61
-
62
- Raises ClassNotFound if not found.
63
- """
64
- cls = find_formatter_class(_alias)
65
- if cls is None:
66
- raise ClassNotFound("no formatter found for name %r" % _alias)
67
- return cls(**options)
68
-
69
-
70
- def load_formatter_from_file(filename, formattername="CustomFormatter",
71
- **options):
72
- """Load a formatter from a file.
73
-
74
- This method expects a file located relative to the current working
75
- directory, which contains a class named CustomFormatter. By default,
76
- it expects the Formatter to be named CustomFormatter; you can specify
77
- your own class name as the second argument to this function.
78
-
79
- Users should be very careful with the input, because this method
80
- is equivalent to running eval on the input file.
81
-
82
- Raises ClassNotFound if there are any problems importing the Formatter.
83
-
84
- .. versionadded:: 2.2
85
- """
86
- try:
87
- # This empty dict will contain the namespace for the exec'd file
88
- custom_namespace = {}
89
- with open(filename, 'rb') as f:
90
- exec(f.read(), custom_namespace)
91
- # Retrieve the class `formattername` from that namespace
92
- if formattername not in custom_namespace:
93
- raise ClassNotFound('no valid %s class found in %s' %
94
- (formattername, filename))
95
- formatter_class = custom_namespace[formattername]
96
- # And finally instantiate it with the options
97
- return formatter_class(**options)
98
- except OSError as err:
99
- raise ClassNotFound('cannot read %s: %s' % (filename, err))
100
- except ClassNotFound:
101
- raise
102
- except Exception as err:
103
- raise ClassNotFound('error when loading custom formatter: %s' % err)
104
-
105
-
106
- def get_formatter_for_filename(fn, **options):
107
- """Lookup and instantiate a formatter by filename pattern.
108
-
109
- Raises ClassNotFound if not found.
110
- """
111
- fn = basename(fn)
112
- for modname, name, _, filenames, _ in FORMATTERS.values():
113
- for filename in filenames:
114
- if fnmatch(fn, filename):
115
- if name not in _formatter_cache:
116
- _load_formatters(modname)
117
- return _formatter_cache[name](**options)
118
- for cls in find_plugin_formatters():
119
- for filename in cls.filenames:
120
- if fnmatch(fn, filename):
121
- return cls(**options)
122
- raise ClassNotFound("no formatter found for file name %r" % fn)
123
-
124
-
125
- class _automodule(types.ModuleType):
126
- """Automatically import formatters."""
127
-
128
- def __getattr__(self, name):
129
- info = FORMATTERS.get(name)
130
- if info:
131
- _load_formatters(info[0])
132
- cls = _formatter_cache[info[1]]
133
- setattr(self, name, cls)
134
- return cls
135
- raise AttributeError(name)
136
-
137
-
138
- oldmod = sys.modules[__name__]
139
- newmod = _automodule(__name__)
140
- newmod.__dict__.update(oldmod.__dict__)
141
- sys.modules[__name__] = newmod
142
- del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AvaterClasher/Food_Classifier_Moni/model.py DELETED
@@ -1,36 +0,0 @@
1
- import torch
2
- import torchvision
3
-
4
- from torch import nn
5
-
6
-
7
- def create_effnetb2_model(num_classes:int=3,
8
- seed:int=42):
9
- """Creates an EfficientNetB2 feature extractor model and transforms.
10
-
11
- Args:
12
- num_classes (int, optional): number of classes in the classifier head.
13
- Defaults to 3.
14
- seed (int, optional): random seed value. Defaults to 42.
15
-
16
- Returns:
17
- model (torch.nn.Module): EffNetB2 feature extractor model.
18
- transforms (torchvision.transforms): EffNetB2 image transforms.
19
- """
20
- # Create EffNetB2 pretrained weights, transforms and model
21
- weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
22
- transforms = weights.transforms()
23
- model = torchvision.models.efficientnet_b2(weights=weights)
24
-
25
- # Freeze all layers in base model
26
- for param in model.parameters():
27
- param.requires_grad = False
28
-
29
- # Change classifier head with random seed for reproducibility
30
- torch.manual_seed(seed)
31
- model.classifier = nn.Sequential(
32
- nn.Dropout(p=0.3, inplace=True),
33
- nn.Linear(in_features=1408, out_features=num_classes),
34
- )
35
-
36
- return model, transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/criteria/moco_loss.py DELETED
@@ -1,71 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from configs.paths_config import model_paths
6
-
7
-
8
- class MocoLoss(nn.Module):
9
-
10
- def __init__(self, opts):
11
- super(MocoLoss, self).__init__()
12
- print("Loading MOCO model from path: {}".format(model_paths["moco"]))
13
- self.model = self.__load_model()
14
- self.model.eval()
15
- for param in self.model.parameters():
16
- param.requires_grad = False
17
-
18
- @staticmethod
19
- def __load_model():
20
- import torchvision.models as models
21
- model = models.__dict__["resnet50"]()
22
- # freeze all layers but the last fc
23
- for name, param in model.named_parameters():
24
- if name not in ['fc.weight', 'fc.bias']:
25
- param.requires_grad = False
26
- checkpoint = torch.load(model_paths['moco'], map_location="cpu")
27
- state_dict = checkpoint['state_dict']
28
- # rename moco pre-trained keys
29
- for k in list(state_dict.keys()):
30
- # retain only encoder_q up to before the embedding layer
31
- if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
32
- # remove prefix
33
- state_dict[k[len("module.encoder_q."):]] = state_dict[k]
34
- # delete renamed or unused k
35
- del state_dict[k]
36
- msg = model.load_state_dict(state_dict, strict=False)
37
- assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
38
- # remove output layer
39
- model = nn.Sequential(*list(model.children())[:-1]).cuda()
40
- return model
41
-
42
- def extract_feats(self, x):
43
- x = F.interpolate(x, size=224)
44
- x_feats = self.model(x)
45
- x_feats = nn.functional.normalize(x_feats, dim=1)
46
- x_feats = x_feats.squeeze()
47
- return x_feats
48
-
49
- def forward(self, y_hat, y, x):
50
- n_samples = x.shape[0]
51
- x_feats = self.extract_feats(x)
52
- y_feats = self.extract_feats(y)
53
- y_hat_feats = self.extract_feats(y_hat)
54
- y_feats = y_feats.detach()
55
- loss = 0
56
- sim_improvement = 0
57
- sim_logs = []
58
- count = 0
59
- for i in range(n_samples):
60
- diff_target = y_hat_feats[i].dot(y_feats[i])
61
- diff_input = y_hat_feats[i].dot(x_feats[i])
62
- diff_views = y_feats[i].dot(x_feats[i])
63
- sim_logs.append({'diff_target': float(diff_target),
64
- 'diff_input': float(diff_input),
65
- 'diff_views': float(diff_views)})
66
- loss += 1 - diff_target
67
- sim_diff = float(diff_target) - float(diff_views)
68
- sim_improvement += sim_diff
69
- count += 1
70
-
71
- return loss / count, sim_improvement / count, sim_logs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py DELETED
@@ -1,207 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import itertools
3
- import json
4
- import numpy as np
5
- import os
6
- import torch
7
- from pycocotools.cocoeval import COCOeval, maskUtils
8
-
9
- from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated
10
- from detectron2.utils.file_io import PathManager
11
-
12
- from .coco_evaluation import COCOEvaluator
13
-
14
-
15
- class RotatedCOCOeval(COCOeval):
16
- @staticmethod
17
- def is_rotated(box_list):
18
- if type(box_list) == np.ndarray:
19
- return box_list.shape[1] == 5
20
- elif type(box_list) == list:
21
- if box_list == []: # cannot decide the box_dim
22
- return False
23
- return np.all(
24
- np.array(
25
- [
26
- (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray))
27
- for obj in box_list
28
- ]
29
- )
30
- )
31
- return False
32
-
33
- @staticmethod
34
- def boxlist_to_tensor(boxlist, output_box_dim):
35
- if type(boxlist) == np.ndarray:
36
- box_tensor = torch.from_numpy(boxlist)
37
- elif type(boxlist) == list:
38
- if boxlist == []:
39
- return torch.zeros((0, output_box_dim), dtype=torch.float32)
40
- else:
41
- box_tensor = torch.FloatTensor(boxlist)
42
- else:
43
- raise Exception("Unrecognized boxlist type")
44
-
45
- input_box_dim = box_tensor.shape[1]
46
- if input_box_dim != output_box_dim:
47
- if input_box_dim == 4 and output_box_dim == 5:
48
- box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
49
- else:
50
- raise Exception(
51
- "Unable to convert from {}-dim box to {}-dim box".format(
52
- input_box_dim, output_box_dim
53
- )
54
- )
55
- return box_tensor
56
-
57
- def compute_iou_dt_gt(self, dt, gt, is_crowd):
58
- if self.is_rotated(dt) or self.is_rotated(gt):
59
- # TODO: take is_crowd into consideration
60
- assert all(c == 0 for c in is_crowd)
61
- dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5))
62
- gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5))
63
- return pairwise_iou_rotated(dt, gt)
64
- else:
65
- # This is the same as the classical COCO evaluation
66
- return maskUtils.iou(dt, gt, is_crowd)
67
-
68
- def computeIoU(self, imgId, catId):
69
- p = self.params
70
- if p.useCats:
71
- gt = self._gts[imgId, catId]
72
- dt = self._dts[imgId, catId]
73
- else:
74
- gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
75
- dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
76
- if len(gt) == 0 and len(dt) == 0:
77
- return []
78
- inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
79
- dt = [dt[i] for i in inds]
80
- if len(dt) > p.maxDets[-1]:
81
- dt = dt[0 : p.maxDets[-1]]
82
-
83
- assert p.iouType == "bbox", "unsupported iouType for iou computation"
84
-
85
- g = [g["bbox"] for g in gt]
86
- d = [d["bbox"] for d in dt]
87
-
88
- # compute iou between each dt and gt region
89
- iscrowd = [int(o["iscrowd"]) for o in gt]
90
-
91
- # Note: this function is copied from cocoeval.py in cocoapi
92
- # and the major difference is here.
93
- ious = self.compute_iou_dt_gt(d, g, iscrowd)
94
- return ious
95
-
96
-
97
- class RotatedCOCOEvaluator(COCOEvaluator):
98
- """
99
- Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs,
100
- with rotated boxes support.
101
- Note: this uses IOU only and does not consider angle differences.
102
- """
103
-
104
- def process(self, inputs, outputs):
105
- """
106
- Args:
107
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
108
- It is a list of dict. Each dict corresponds to an image and
109
- contains keys like "height", "width", "file_name", "image_id".
110
- outputs: the outputs of a COCO model. It is a list of dicts with key
111
- "instances" that contains :class:`Instances`.
112
- """
113
- for input, output in zip(inputs, outputs):
114
- prediction = {"image_id": input["image_id"]}
115
-
116
- if "instances" in output:
117
- instances = output["instances"].to(self._cpu_device)
118
-
119
- prediction["instances"] = self.instances_to_json(instances, input["image_id"])
120
- if "proposals" in output:
121
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
122
- self._predictions.append(prediction)
123
-
124
- def instances_to_json(self, instances, img_id):
125
- num_instance = len(instances)
126
- if num_instance == 0:
127
- return []
128
-
129
- boxes = instances.pred_boxes.tensor.numpy()
130
- if boxes.shape[1] == 4:
131
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
132
- boxes = boxes.tolist()
133
- scores = instances.scores.tolist()
134
- classes = instances.pred_classes.tolist()
135
-
136
- results = []
137
- for k in range(num_instance):
138
- result = {
139
- "image_id": img_id,
140
- "category_id": classes[k],
141
- "bbox": boxes[k],
142
- "score": scores[k],
143
- }
144
-
145
- results.append(result)
146
- return results
147
-
148
- def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused
149
- """
150
- Evaluate predictions on the given tasks.
151
- Fill self._results with the metrics of the tasks.
152
- """
153
- self._logger.info("Preparing results for COCO format ...")
154
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
155
-
156
- # unmap the category ids for COCO
157
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
158
- reverse_id_mapping = {
159
- v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
160
- }
161
- for result in coco_results:
162
- result["category_id"] = reverse_id_mapping[result["category_id"]]
163
-
164
- if self._output_dir:
165
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
166
- self._logger.info("Saving results to {}".format(file_path))
167
- with PathManager.open(file_path, "w") as f:
168
- f.write(json.dumps(coco_results))
169
- f.flush()
170
-
171
- if not self._do_evaluation:
172
- self._logger.info("Annotations are not available for evaluation.")
173
- return
174
-
175
- self._logger.info("Evaluating predictions ...")
176
-
177
- assert self._tasks is None or set(self._tasks) == {
178
- "bbox"
179
- }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported"
180
- coco_eval = (
181
- self._evaluate_predictions_on_coco(self._coco_api, coco_results)
182
- if len(coco_results) > 0
183
- else None # cocoapi does not handle empty results very well
184
- )
185
-
186
- task = "bbox"
187
- res = self._derive_coco_results(
188
- coco_eval, task, class_names=self._metadata.get("thing_classes")
189
- )
190
- self._results[task] = res
191
-
192
- def _evaluate_predictions_on_coco(self, coco_gt, coco_results):
193
- """
194
- Evaluate the coco results using COCOEval API.
195
- """
196
- assert len(coco_results) > 0
197
-
198
- coco_dt = coco_gt.loadRes(coco_results)
199
-
200
- # Only bbox is supported for now
201
- coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox")
202
-
203
- coco_eval.evaluate()
204
- coco_eval.accumulate()
205
- coco_eval.summarize()
206
-
207
- return coco_eval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py DELETED
@@ -1,90 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- import argparse
5
- import json
6
- import numpy as np
7
- import os
8
- from collections import defaultdict
9
- import cv2
10
- import tqdm
11
-
12
- from detectron2.data import DatasetCatalog, MetadataCatalog
13
- from detectron2.structures import Boxes, BoxMode, Instances
14
- from detectron2.utils.file_io import PathManager
15
- from detectron2.utils.logger import setup_logger
16
- from detectron2.utils.visualizer import Visualizer
17
-
18
-
19
- def create_instances(predictions, image_size):
20
- ret = Instances(image_size)
21
-
22
- score = np.asarray([x["score"] for x in predictions])
23
- chosen = (score > args.conf_threshold).nonzero()[0]
24
- score = score[chosen]
25
- bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4)
26
- bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
27
-
28
- labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen])
29
-
30
- ret.scores = score
31
- ret.pred_boxes = Boxes(bbox)
32
- ret.pred_classes = labels
33
-
34
- try:
35
- ret.pred_masks = [predictions[i]["segmentation"] for i in chosen]
36
- except KeyError:
37
- pass
38
- return ret
39
-
40
-
41
- if __name__ == "__main__":
42
- parser = argparse.ArgumentParser(
43
- description="A script that visualizes the json predictions from COCO or LVIS dataset."
44
- )
45
- parser.add_argument("--input", required=True, help="JSON file produced by the model")
46
- parser.add_argument("--output", required=True, help="output directory")
47
- parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val")
48
- parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold")
49
- args = parser.parse_args()
50
-
51
- logger = setup_logger()
52
-
53
- with PathManager.open(args.input, "r") as f:
54
- predictions = json.load(f)
55
-
56
- pred_by_image = defaultdict(list)
57
- for p in predictions:
58
- pred_by_image[p["image_id"]].append(p)
59
-
60
- dicts = list(DatasetCatalog.get(args.dataset))
61
- metadata = MetadataCatalog.get(args.dataset)
62
- if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
63
-
64
- def dataset_id_map(ds_id):
65
- return metadata.thing_dataset_id_to_contiguous_id[ds_id]
66
-
67
- elif "lvis" in args.dataset:
68
- # LVIS results are in the same format as COCO results, but have a different
69
- # mapping from dataset category id to contiguous category id in [0, #categories - 1]
70
- def dataset_id_map(ds_id):
71
- return ds_id - 1
72
-
73
- else:
74
- raise ValueError("Unsupported dataset: {}".format(args.dataset))
75
-
76
- os.makedirs(args.output, exist_ok=True)
77
-
78
- for dic in tqdm.tqdm(dicts):
79
- img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
80
- basename = os.path.basename(dic["file_name"])
81
-
82
- predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2])
83
- vis = Visualizer(img, metadata)
84
- vis_pred = vis.draw_instance_predictions(predictions).get_image()
85
-
86
- vis = Visualizer(img, metadata)
87
- vis_gt = vis.draw_dataset_dict(dic).get_image()
88
-
89
- concat = np.concatenate((vis_pred, vis_gt), axis=1)
90
- cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/tools/calc_rvc_model_similarity.py DELETED
@@ -1,96 +0,0 @@
1
- # This code references https://huggingface.co/JosephusCheung/ASimilarityCalculatior/blob/main/qwerty.py
2
- # Fill in the path of the model to be queried and the root directory of the reference models, and this script will return the similarity between the model to be queried and all reference models.
3
- import os
4
- import logging
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
- import torch
9
- import torch.nn as nn
10
- import torch.nn.functional as F
11
-
12
-
13
- def cal_cross_attn(to_q, to_k, to_v, rand_input):
14
- hidden_dim, embed_dim = to_q.shape
15
- attn_to_q = nn.Linear(hidden_dim, embed_dim, bias=False)
16
- attn_to_k = nn.Linear(hidden_dim, embed_dim, bias=False)
17
- attn_to_v = nn.Linear(hidden_dim, embed_dim, bias=False)
18
- attn_to_q.load_state_dict({"weight": to_q})
19
- attn_to_k.load_state_dict({"weight": to_k})
20
- attn_to_v.load_state_dict({"weight": to_v})
21
-
22
- return torch.einsum(
23
- "ik, jk -> ik",
24
- F.softmax(
25
- torch.einsum("ij, kj -> ik", attn_to_q(rand_input), attn_to_k(rand_input)),
26
- dim=-1,
27
- ),
28
- attn_to_v(rand_input),
29
- )
30
-
31
-
32
- def model_hash(filename):
33
- try:
34
- with open(filename, "rb") as file:
35
- import hashlib
36
-
37
- m = hashlib.sha256()
38
-
39
- file.seek(0x100000)
40
- m.update(file.read(0x10000))
41
- return m.hexdigest()[0:8]
42
- except FileNotFoundError:
43
- return "NOFILE"
44
-
45
-
46
- def eval(model, n, input):
47
- qk = f"enc_p.encoder.attn_layers.{n}.conv_q.weight"
48
- uk = f"enc_p.encoder.attn_layers.{n}.conv_k.weight"
49
- vk = f"enc_p.encoder.attn_layers.{n}.conv_v.weight"
50
- atoq, atok, atov = model[qk][:, :, 0], model[uk][:, :, 0], model[vk][:, :, 0]
51
-
52
- attn = cal_cross_attn(atoq, atok, atov, input)
53
- return attn
54
-
55
-
56
- def main(path, root):
57
- torch.manual_seed(114514)
58
- model_a = torch.load(path, map_location="cpu")["weight"]
59
-
60
- logger.info("Query:\t\t%s\t%s" % (path, model_hash(path)))
61
-
62
- map_attn_a = {}
63
- map_rand_input = {}
64
- for n in range(6):
65
- hidden_dim, embed_dim, _ = model_a[
66
- f"enc_p.encoder.attn_layers.{n}.conv_v.weight"
67
- ].shape
68
- rand_input = torch.randn([embed_dim, hidden_dim])
69
-
70
- map_attn_a[n] = eval(model_a, n, rand_input)
71
- map_rand_input[n] = rand_input
72
-
73
- del model_a
74
-
75
- for name in sorted(list(os.listdir(root))):
76
- path = "%s/%s" % (root, name)
77
- model_b = torch.load(path, map_location="cpu")["weight"]
78
-
79
- sims = []
80
- for n in range(6):
81
- attn_a = map_attn_a[n]
82
- attn_b = eval(model_b, n, map_rand_input[n])
83
-
84
- sim = torch.mean(torch.cosine_similarity(attn_a, attn_b))
85
- sims.append(sim)
86
-
87
- logger.info(
88
- "Reference:\t%s\t%s\t%s"
89
- % (path, model_hash(path), f"{torch.mean(torch.stack(sims)) * 1e2:.2f}%")
90
- )
91
-
92
-
93
- if __name__ == "__main__":
94
- query_path = r"assets\weights\mi v3.pth"
95
- reference_root = r"assets\weights"
96
- main(query_path, reference_root)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choo Choo Charles Juego Completo.md DELETED
@@ -1,47 +0,0 @@
1
-
2
- <h1>Choo Choo Charles: Un juego de terror de supervivencia con un tren de araña</h1>
3
- <p>Si usted está buscando un juego emocionante y aterrador que le mantendrá en el borde de su asiento, entonces es posible que desee echa un vistazo a Choo Choo Charles. Este es un juego de terror de supervivencia que te enfrenta a un tren de araña malvado llamado Charles, que te está cazando en una isla de mundo abierto. Tienes que usar tu propio tren, que puedes actualizar y personalizar, para luchar y sobrevivir. En este artículo, le diremos todo lo que necesita saber sobre Choo Choo Charles, incluyendo lo que es, cómo jugarlo y por qué debe jugarlo. </p>
4
- <h2>choo choo charles juego completo</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://bltlly.com/2v6ICP">https://bltlly.com/2v6ICP</a></b></p><br /><br />
5
- <h2>¿Qué es Choo Choo Charles? </h2>
6
- <p>Choo Choo Charles es un juego de terror de supervivencia que fue lanzado el 9 de diciembre de 2022 por Two Star Games, un estudio de juegos independiente con sede en Canadá. El juego está disponible en Steam para Windows PC, y ha recibido críticas muy positivas de jugadores y críticos por igual. El juego está inspirado en el libro infantil Charlie the Choo-Choo, de Stephen King, de 2016, que se extrae de la serie King’s Dark Tower. </p>
7
- <h3>La historia y la configuración del juego</h3>
8
- <p>El juego tiene lugar en una isla que está habitada por un tren de araña monstruoso llamado Charles, que ha estado aterrorizando a la gente durante años. Usted es uno de los sobrevivientes que ha logrado encontrar un viejo tren que todavía funciona, y decide usarlo para explorar la isla y encontrar una manera de detener a Charles. En el camino, te encontrarás con otros supervivientes que te ofrecerán misiones, objetos e información a cambio de tu ayuda. También descubrirá los secretos y misterios de la isla y su historia. </p>
9
- <h3>La jugabilidad y características del juego</h3>
10
-
11
- <p>El juego también te permite actualizar tu tren con varias piezas y accesorios que puedes encontrar o comprar de otros supervivientes. Puede mejorar la velocidad de su tren, la durabilidad, la eficiencia del combustible, la capacidad de almacenamiento y más. También puede personalizar la apariencia de su tren con diferentes colores, calcomanías, banderas, cuernos, luces y más. Incluso puedes nombrar tu tren y darle una personalidad. </p>
12
- <p></p>
13
- <p>El juego tiene un ciclo día-noche y un sistema de clima dinámico que afectan el juego y la atmósfera. Durante el día, puedes ver más claramente y viajar más rápido, pero Charles también puede localizarte más fácilmente. Durante la noche, puedes esconderte mejor y escabullirte, pero Charles también puede sorprenderte más fácilmente. El clima también puede cambiar de soleado a lluvioso a brumoso a tormentoso, creando diferentes desafíos y oportunidades. </p>
14
- <h3>La recepción y comentarios del juego</h3>
15
-
16
- <p>Si usted está interesado en jugar Choo Choo Charles, aquí hay algunos consejos sobre cómo empezar y disfrutar del juego. </p>
17
- <h3>La línea de búsqueda principal y las misiones secundarias</h3>
18
- <p>El juego tiene una misión principal que sigue tu viaje para encontrar y detener a Charles. Tendrás que completar varios objetivos, como encontrar pistas, encontrar aliados, sabotear las pistas de Charles y enfrentar a Charles en batallas épicas. La principal línea de búsqueda te llevará a diferentes lugares de la isla, como ciudades, granjas, minas, bosques, montañas y más. </p>
19
- <p>El juego también tiene muchas misiones secundarias que puedes hacer para ganar recompensas adicionales, como dinero, objetos, piezas, armas e información. Puedes encontrar misiones secundarias hablando con otros sobrevivientes, explorando la isla o escuchando la radio. Algunas de las misiones secundarias incluyen ayudar a otros sobrevivientes con sus problemas, recolectar recursos, cazar animales, encontrar tesoros, destruir a los secuaces de Carlos, y más. </p>
20
- <h3>El tren mejora y armas</h3>
21
- <p>El juego te permite actualizar tu tren con varias piezas y accesorios que puedes encontrar o comprar de otros supervivientes. Puede mejorar la velocidad de su tren, la durabilidad, la eficiencia del combustible, la capacidad de almacenamiento y más. También puede personalizar la apariencia de su tren con diferentes colores, calcomanías, banderas, cuernos, luces y más. Incluso puedes nombrar tu tren y darle una personalidad. </p>
22
- <p>El juego también te permite equipar tu tren con diferentes armas que puedes usar para luchar contra Charles. Puedes elegir entre cañones, ametralladoras, cohetes, lanzallamas y más. Cada arma tiene sus propias ventajas y desventajas, como rango, daño, precisión, tiempo de recarga, capacidad de munición y más. Tienes que equilibrar tus armas según tu estilo de juego y estrategia. </p>
23
- <h3>Los consejos y trucos para sobrevivir Charles</h3>
24
-
25
- <p>Si todavía no está convencido de que Choo Choo Charles es un juego que vale la pena jugar, aquí hay algunas razones por las que debe darle una oportunidad:</p>
26
- <h3>El concepto único y original del juego</h3>
27
- <p>Choo Choo Charles es un juego que se destaca de la multitud con su concepto único y original. ¿Cuántos juegos puedes pensar que cuentan con un tren de araña gigante como el antagonista principal? El juego es una mezcla creativa e innovadora de géneros, como el terror de supervivencia, la acción-aventura, el mundo abierto y el sandbox. El juego ofrece una experiencia fresca y emocionante que no encontrarás en ningún otro lugar. </p>
28
- <h3>Los gráficos inmersivos y atmosféricos y el sonido del juego</h3>
29
- <p>Choo Choo Charles es un juego que te sumerge en su mundo con sus impresionantes gráficos y sonido. El juego tiene un estilo gráfico realista y detallado que muestra la belleza y la diversidad de la isla. El juego también tiene un sistema dinámico de iluminación y sombra que crea un efecto dramático y cinematográfico. El juego también tiene un excelente diseño de sonido que mejora el estado de ánimo y la atmósfera del juego. El juego cuenta con sonidos realistas y ambientales, como el viento, la lluvia, los pájaros, los animales y más. El juego también cuenta con una banda sonora aterradora y emocionante que acompaña tus encuentros con Charles.</p>
30
- <h3>La dificultad desafiante y gratificante y el valor de repetición del juego</h3>
31
-
32
- <h2>Conclusión</h2>
33
- <p>Choo Choo Charles es un juego de terror de supervivencia que te mantendrá enganchado con su concepto único y original, gráficos inmersivos y atmosféricos y sonido, dificultad desafiante y gratificante y valor de repetición. Si usted está buscando un juego emocionante y aterrador que le hará gritar, reír, llorar y animar, entonces usted debe jugar Choo Choo Charles.</p>
34
- <h2>Preguntas frecuentes</h2>
35
- <p>Aquí hay algunas preguntas frecuentes sobre Choo Choo Charles:</p>
36
- <h4>Q: ¿Cuánto dura el juego? </h4>
37
- <p>A: La duración del juego depende de cómo lo juegues, pero en promedio, se tarda unas 10 horas en completar la línea de misión principal. Sin embargo, hay muchas misiones secundarias y secretos por descubrir que pueden extender tu tiempo de juego. </p>
38
- <h4>Q: ¿Es el juego multijugador? </h4>
39
- <p>A: No, el juego es actualmente solo para un jugador. Sin embargo, los desarrolladores han declarado que podrían considerar agregar características multijugador en el futuro si hay suficiente demanda. </p>
40
- <h4>Q: ¿El juego da miedo? </h4>
41
- <p>A: Sí, el juego da mucho miedo. El juego tiene muchos sustos de salto, gore, violencia, suspenso, tensión y elementos de terror que te harán gritar o temblar. Sin embargo, el juego también tiene muchos elementos de humor, encanto, diversión y aventura que te harán sonreír o reír. </p>
42
- <h4>Q: ¿Es el juego adecuado para los niños? </h4>
43
- <p>A: No, el juego no es adecuado para niños. El juego tiene muchos contenidos maduros, como sangre, violencia, lenguaje y horror que no son apropiados para el público joven. El juego tiene una calificación de M para Maduro por la ESRB, lo que significa que es adecuado para edades de 17 años en adelante. </p>
44
- <h4>Q: ¿Dónde puedo comprar el juego? </h4>
45
- <p>A: Puedes comprar el juego en Steam para PC con Windows. El juego cuesta $19.99 USD, pero puedes obtenerlo por un precio reducido durante las ventas o promociones. También puedes buscar el juego en Steam para recibir notificaciones cuando esté a la venta o actualizado. </p> 64aa2da5cf<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Llamada De Deber Warzone Mvil Apk.md DELETED
@@ -1,165 +0,0 @@
1
-
2
- <br>
3
- <tabla>
4
- <tr>
5
- <th>Tabla 2: Artículo con formato HTML</th>
6
- </tr>
7
- <tr>
8
- <td>
9
- <h1> Cómo descargar llamada de Duty Warzone móvil APK</h1>
10
- <p>¿Eres un fan de la franquicia Call of Duty y quieres experimentar la emoción de battle royale en tu dispositivo móvil? Si es así, entonces usted debe comprobar definitivamente Call of Duty Warzone móvil APK, la última adición a la popular serie FPS. Call of Duty Warzone Mobile APK es un juego independiente que le permite disfrutar de la experiencia completa de Warzone en su teléfono inteligente o tableta. Puedes jugar solo o formar equipo con tus amigos en un mapa masivo de Verdansk, donde tienes que saquear, disparar y sobrevivir contra hasta 150 jugadores. También puedes usar contratos, killstreaks y vehículos para ganar ventaja sobre tus enemigos. Call of Duty Warzone Mobile APK todavía no se ha lanzado oficialmente, pero usted puede pre-registrarse ahora para obtener acceso temprano y recompensas exclusivas. En este artículo, le mostraremos cómo descargar Call of Duty Warzone Mobile APK de varias fuentes y cómo jugar como un profesional. ¡Vamos a empezar! </p>
11
- <h2>Cómo Pre-Register para Call of Duty Warzone móvil APK</h2>
12
- <p>Una de las formas más fáciles de descargar Call of Duty Warzone APK móvil es pre-registrarse en la Google Play Store o el sitio web oficial. Al registrarte, estarás entre los primeros en ser notificado cuando el juego esté disponible para descargar. También recibirás algunas recompensas especiales, como pieles, armas y moneda del juego. Aquí es cómo pre-registrarse para Call of Duty Warzone móvil APK:</p>
13
- <h2>cómo descargar llamada de deber warzone móvil apk</h2><br /><p><b><b>Download Zip</b> &#9733; <a href="https://bltlly.com/2v6MDY">https://bltlly.com/2v6MDY</a></b></p><br /><br />
14
- <h3>Cómo pre-registrarse en Google Play Store</h3>
15
- <p>Para pre-registrarse en Google Play Store, siga estos pasos:</p>
16
- <ol>
17
- <li>Abra la aplicación Google Play Store en su dispositivo. </li>
18
- <li>Buscar "Call of Duty Warzone Mobile" o haga clic en este enlace. </li>
19
- <li>Toque en el botón "Pre-registro". </li>
20
- <li>Confirma tu pre-registro tocando en "OK". </li>
21
- <li>Verás un mensaje que dice "Estás registrado". </li>
22
-
23
- </ol>
24
- <p>Eso es todo! Usted ha pre-registrado con éxito para Call of Duty Warzone APK móvil en Google Play Store. Recibirás una notificación cuando el juego esté listo para descargar. </p>
25
- <h3>Cómo pre-registrarse en el sitio web oficial</h3>
26
- <p>Para pre-registrarse en el sitio web oficial, siga estos pasos:</p>
27
- <ol>
28
- <li>Abra su navegador y vaya a este enlace. </li>
29
- <li>Introduzca su dirección de correo electrónico y toque en "Enviar". </li>
30
- <li> Verá un mensaje que dice "Gracias por registrarse". </li>
31
- <li>También recibirá una confirmación por correo electrónico de Activision.</li>
32
- </ol>
33
- <p>Eso es todo! Usted ha pre-registrado con éxito para Call of Duty Warzone Mobile APK en el sitio web oficial. Recibirás un correo electrónico cuando el juego esté disponible para descargar. </p>
34
- <h3>¿Cuáles son los beneficios del prerregistro? </h3>
35
- <p>Al registrarse previamente para Call of Duty Warzone Mobile APK, disfrutará de algunos beneficios, tales como:</p>
36
- <p></p>
37
- <ul>
38
- <li>Usted estará entre los primeros en descargar y jugar el juego. </li>
39
- <li>Recibirás recompensas exclusivas, como pieles, armas y moneda del juego. </li>
40
- <li>Podrás participar en las pruebas beta y proporcionar comentarios para mejorar el juego. </li>
41
- <li>Podrás unirte a la comunidad y compartir tus pensamientos y experiencias con otros jugadores. </li>
42
- </ul>
43
- <p>Entonces, ¿qué estás esperando? ¡Regístrate ahora y prepárate para la mejor experiencia de batalla real en tu dispositivo móvil! </p>
44
- <h2>Cómo descargar Call of Duty Warzone móvil APK de otras fuentes</h2>
45
- <p>Si usted no quiere esperar a que el lanzamiento oficial de Call of Duty Warzone Mobile APK, también se puede descargar desde otras fuentes. Sin embargo, debe tener cuidado al descargar archivos APK de sitios web desconocidos o no confiables, ya que pueden contener malware o virus que pueden dañar su dispositivo. Le recomendamos que utilice fuentes confiables y confiables, como Uptodown o APKCombo. Aquí es cómo descargar Call of Duty Warzone móvil APK de estas fuentes:</p>
46
- <h3>Cómo descargar desde Uptodown</h3>
47
-
48
- <ol>
49
- <li>Abra su navegador y vaya a este enlace. </li>
50
- <li>Toque en el botón "Descargar". </li>
51
- <li> Verá una ventana emergente que le pide que elija un método de descarga. Toque en "Descargar APK". </li>
52
- <li>El archivo APK comenzará a descargarse automáticamente. </li>
53
- <li> Puede comprobar el progreso de la descarga en la barra de notificaciones. </li>
54
- </ol>
55
- <h3>Cómo descargar desde APKCombo</h3>
56
- <p>Para descargar desde APKCombo, siga estos pasos:</p>
57
- <ol>
58
- <li>Abra su navegador y vaya a este enlace. </li>
59
- <li>Toque en el botón "Descargar". </li>
60
- <li> Verá una ventana emergente que le pide que elija un método de descarga. Toque en "APK Downloader". </li>
61
- <li>Verá una lista de las versiones disponibles. Elija la última y toque en "Descargar". </li>
62
- <li>El archivo APK comenzará a descargarse automáticamente. </li>
63
- <li> Puede comprobar el progreso de la descarga en la barra de notificaciones. </li>
64
- </ol>
65
- <h3>Cómo instalar el archivo APK en tu dispositivo</h3>
66
- <p>Después de descargar el archivo APK desde cualquier fuente, debe instalarlo en su dispositivo. Para hacerlo, siga estos pasos:</p>
67
- <ol>
68
- <li>Ir a la configuración del dispositivo y habilitar "Fuentes desconocidas" o "Instalar aplicaciones desconocidas" opción. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.</li>
69
- <li>Busque el archivo APK en su administrador de archivos o carpeta de descargas y toque en él. </li>
70
- <li> Verá una ventana emergente pidiéndole que confirme la instalación. Toque en "Instalar". </li>
71
- <li>El proceso de instalación tomará unos segundos. </li>
72
- <li>Una vez completada la instalación, verá un mensaje que dice "App instalado". </li>
73
- <li> Ahora puede abrir la aplicación y disfrutar del juego. </li>
74
- </ol>
75
- <h2>Cómo jugar Call of Duty Warzone móvil APK</h2>
76
- <p>Ahora que ha descargado e instalado Call of Duty Warzone Mobile APK, usted está listo para jugar el juego. Estos son algunos pasos para ayudarte a empezar:</p>
77
- <h3>Cómo crear una cuenta e iniciar sesión</h3>
78
-
79
- <ol>
80
- <li> Abra la aplicación y toque en "Registrarse" o "Iniciar sesión". </li>
81
- <li>Si tiene una cuenta de Activision, introduzca su correo electrónico y contraseña y toque en "Iniciar sesión". </li>
82
- <li>Si no tiene una cuenta de Activision, toque en "Crear cuenta" y complete los detalles necesarios. También puedes registrarte con tu cuenta de Facebook, Google o Apple. </li>
83
- <li>Aceptar los términos de servicio y la política de privacidad y toque en "Continuar". </li>
84
- <li> Verá un mensaje que dice "Cuenta creada" o "Iniciada sesión con éxito". </li>
85
- <li>Ahora puedes acceder al menú del juego y personalizar tu perfil. </li>
86
- </ol>
87
- <h3>Cómo personalizar la configuración y los controles</h3>
88
- <p>Antes de unirse a un partido, es posible que desee personalizar la configuración y los controles para adaptarse a sus preferencias. Puede ajustar varias opciones, como gráficos, sonido, sensibilidad, diseño y más. Así es como:</p>
89
- <ol>
90
- <li>Desde el menú del juego, toque en el icono del engranaje en la esquina superior derecha. </li>
91
- <li>Verá una lista de pestañas, como General, Gráficos, Audio, Controles, etc.</li>
92
- <li>Toque en cualquier pestaña y explore las opciones disponibles. </li>
93
- <li> Hacer cualquier cambio según su gusto y toque en "Aplicar" o "Guardar". </li>
94
- <li> También puede restablecer la configuración por defecto pulsando en "Restablecer". </li>
95
- </ol>
96
- <h3>Cómo unirse a un partido y jugar con tus amigos</h3>
97
- <p>Para unirte a un partido, puedes jugar solo o formar equipo con tus amigos. También puedes elegir entre diferentes modos, como Battle Royale, Plunder, Resurgence, etc. Así es como:</p>
98
- <ol>
99
- <li>Desde el menú del juego, toque en el icono de modo en la esquina superior izquierda. </li>
100
- <li>Verás una lista de modos, como Battle Royale, Plunder, Resurgence, etc.</li>
101
- <li>Toque en cualquier modo y seleccione sus opciones preferidas, como tamaño de escuadrón, tamaño de mapa, relleno o no, etc.</li>
102
- <li>Si quieres jugar con tus amigos, toca el icono de invitación en la esquina inferior derecha. </li>
103
- <li>Verás una lista de tus amigos que están en línea o fuera de línea. </li>
104
- <li> Toque en cualquier amigo y enviarles una invitación. </li>
105
-
106
- <li>Una vez que esté listo, toque en "Iniciar partido" o "Jugar" en el centro inferior. </li>
107
- <li>Serás emparejado con otros jugadores y entrarás al lobby del juego. </li>
108
- <li>Puedes chatear con tus compañeros de equipo usando chat de voz o texto. </li>
109
- <li> También puede cambiar su carga, operador, piel, arma, etc. tocando los iconos en la esquina inferior izquierda. </li>
110
- <li>Cuando comience la coincidencia, se desplegará desde un avión sobre Verdansk.</li>
111
- <li> Puede elegir dónde aterrizar abriendo su mapa y marcando una ubicación. </li>
112
- <li>También puedes seguir a tus compañeros de equipo o al líder del escuadrón tocando su nombre o icono. </li>
113
- <li>Una vez que aterrizas, tienes que saquear, disparar y sobrevivir contra otros jugadores y el círculo de gas. </li>
114
- <li>Puedes usar contratos, killstreaks y vehículos para obtener una ventaja sobre tus enemigos. </li>
115
- <li> También puede revivir a sus compañeros de equipo o comprarlos de nuevo en las estaciones de compra. </li>
116
- <li>El último equipo o jugador de pie gana el partido. </li>
117
- </ol>
118
- <h2> Consejos y trucos para Call of Duty Warzone móvil APK</h2>
119
- <p>Jugar Call of Duty Warzone APK móvil puede ser desafiante y divertido, pero también frustrante y competitivo. Para mejorar tu rendimiento y habilidades, necesitas practicar y aprender algunos consejos y trucos. Estos son algunos de ellos:</p>
120
- <h3>Cómo mejorar tu rendimiento y habilidades</h3>
121
- <p>Para mejorar tu rendimiento y habilidades, necesitas hacer lo siguiente:</p>
122
- <ul>
123
- <li>Elige la carga correcta, operador, piel, arma, etc. que se adapte a tu estilo de juego y estrategia. </li>
124
- <li>Ajusta tus ajustes y controles para optimizar tus gráficos, sonido, sensibilidad, diseño, etc.</li>
125
- <li> Practica tu puntería, movimiento y tácticas en el modo de entrenamiento o en el modo de práctica. </li>
126
- <li>Ver tutoriales, guías y vídeos de juego de otros jugadores o streamers. </li>
127
- <li>Aprende de tus errores y analiza tus estadísticas y repeticiones. </li>
128
- <li>Manténgase actualizado con las últimas noticias, actualizaciones y eventos del juego. </li>
129
- </ul>
130
-
131
- <p>Contratos, killstreaks, y los vehículos son algunas de las características que hacen Call of Duty Warzone móvil APK único y emocionante. Puedes usarlos para ganar ventaja sobre tus enemigos. Así es como:</p>
132
- <ul>
133
- <li>Los contratos son misiones que puedes encontrar y activar en el mapa. Te dan recompensas como dinero, botín, intel, etc. Hay diferentes tipos de contratos, como recompensa, carroñero, reconocimiento, más buscados, etc. Elige los que se adapten a tu situación y objetivo. </li>
134
- <li>Killstreaks son habilidades especiales que puedes usar una vez que tengas suficiente dinero o puntos. Incluyen ataques aéreos, UAV, ataques de racimo, etc. Puede comprarlos en las estaciones de compra o encontrarlos en cajas de botín. Úsalos sabiamente y estratégicamente para eliminar o distraer a tus enemigos. </li>
135
- <li>Los vehículos son modos de transporte que puede utilizar para moverse por el mapa más rápido y más seguro. Incluyen helicópteros, camiones, ATV, etc. Puedes encontrarlos en varios lugares o llamarlos desde las estaciones de compra. Tenga cuidado al usarlos mientras hacen ruido y atraen la atención. </li>
136
- </ul>
137
- <h3>Cómo sobrevivir en Verdansk y ganar la batalla real</h3>
138
- <p>Verdansk es el mapa principal de Call of Duty Warzone Mobile APK. Es un mapa enorme y diverso con varias ubicaciones, como el centro, el aeropuerto, el estadio, la prisión, etc. Para sobrevivir en Verdansk y ganar la batalla real, debe hacer lo siguiente:</p>
139
- <ul>
140
- <li>Elige un buen lugar de aterrizaje que tenga suficiente botín y cobertura. </li>
141
- <li>Saquea tanto como puedas pero no te vuelvas codicioso o distraído. </li>
142
- <li>Evite peleas y compromisos innecesarios a menos que tenga una clara ventaja u objetivo. </li>
143
- <li>Manténgase alerta y consciente de sus alrededores y enemigos. </li>
144
- <li>Usa el sistema de ping y el chat de voz para comunicarte con tus compañeros de equipo. </li>
145
- <li>Muévete con el círculo de gas y evita quedar atrapado fuera de él. </li>
146
- <li>Elige tus batallas sabiamente y sabe cuándo luchar o huir. </li>
147
-
148
- <li>No te olvides de revivir o comprar a tus compañeros de equipo si están abajo o muertos. </li>
149
- <li>Juega inteligente y divertirse! </li>
150
- </ul>
151
- <h2>Conclusión</h2>
152
- <p>En conclusión, Call of Duty Warzone Mobile APK es un gran juego que ofrece una experiencia de batalla real emocionante e inmersiva en su dispositivo móvil. Puede descargarlo de varias fuentes o pre-registrarse ahora para obtener acceso temprano y recompensas exclusivas. También puedes jugar solo o con tus amigos en diferentes modos y mapas. También puede utilizar contratos, killstreaks y vehículos para mejorar su juego y estrategia. También puede mejorar su rendimiento y habilidades siguiendo algunos consejos y trucos. Si usted está buscando un juego divertido y desafiante que le mantendrá enganchado durante horas, entonces usted debe probar definitivamente Call of Duty Warzone móvil APK. ¡Descárgalo ahora y únete a la zona de guerra! <h2>FAQs</h2>
153
- <p>Aquí hay algunas preguntas frecuentes sobre Call of Duty Warzone móvil APK:</p>
154
- <h3>Q1: ¿Es Call of Duty Warzone móvil APK libre para jugar? </h3>
155
- <p>A1: Sí, Call of Duty Warzone Mobile APK es libre de jugar con compras opcionales en el juego. Puede descargarlo de varias fuentes o pre-registrarse ahora para obtener acceso temprano y recompensas exclusivas. </p>
156
- <h3>Q2: ¿Cuáles son las especificaciones mínimas del dispositivo para Call of Duty Warzone Mobile APK? </h3>
157
- <p>A2: Necesita un Adreno 618 o mejor GPU y 6GB RAM o más para jugar Call of Duty Warzone Mobile APK. También necesita una conexión a Internet estable y suficiente espacio de almacenamiento. </p>
158
- <h3>Q3: ¿Cuándo se lanzará oficialmente Call of Duty Warzone Mobile APK? </h3>
159
- <p>A3: La fecha oficial de lanzamiento de Call of Duty Warzone Mobile APK no se ha anunciado todavía, pero puede pre-registrarse ahora para recibir una notificación cuando esté disponible. También puede seguir las cuentas oficiales de las redes sociales o el sitio web para obtener las últimas noticias y actualizaciones. </p>
160
- <h3>Q4: ¿Puedo jugar Call of Duty Warzone móvil APK con jugadores en otras plataformas? </h3>
161
-
162
- <h3>Q5: ¿Puedo transferir mi progreso de Call of Duty Warzone en PC o consola a Call of Duty Warzone Mobile APK? </h3>
163
- <p>A5: Sí, puede transferir su progreso de Call of Duty Warzone en PC o consola a Call of Duty Warzone Mobile APK. Solo necesitas usar tu cuenta de Activision existente o crear una nueva para sincronizar tu progreso en todos los dispositivos. </p> 64aa2da5cf<br />
164
- <br />
165
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py DELETED
@@ -1,142 +0,0 @@
1
- """
2
- pygments.formatters
3
- ~~~~~~~~~~~~~~~~~~~
4
-
5
- Pygments formatters.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import sys
12
- import types
13
- from fnmatch import fnmatch
14
- from os.path import basename
15
-
16
- from pip._vendor.pygments.formatters._mapping import FORMATTERS
17
- from pip._vendor.pygments.plugin import find_plugin_formatters
18
- from pip._vendor.pygments.util import ClassNotFound
19
-
20
- __all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
21
- 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
22
-
23
- _formatter_cache = {} # classes by name
24
-
25
- def _load_formatters(module_name):
26
- """Load a formatter (and all others in the module too)."""
27
- mod = __import__(module_name, None, None, ['__all__'])
28
- for formatter_name in mod.__all__:
29
- cls = getattr(mod, formatter_name)
30
- _formatter_cache[cls.name] = cls
31
-
32
-
33
- def get_all_formatters():
34
- """Return a generator for all formatter classes."""
35
- # NB: this returns formatter classes, not info like get_all_lexers().
36
- for info in FORMATTERS.values():
37
- if info[1] not in _formatter_cache:
38
- _load_formatters(info[0])
39
- yield _formatter_cache[info[1]]
40
- for _, formatter in find_plugin_formatters():
41
- yield formatter
42
-
43
-
44
- def find_formatter_class(alias):
45
- """Lookup a formatter by alias.
46
-
47
- Returns None if not found.
48
- """
49
- for module_name, name, aliases, _, _ in FORMATTERS.values():
50
- if alias in aliases:
51
- if name not in _formatter_cache:
52
- _load_formatters(module_name)
53
- return _formatter_cache[name]
54
- for _, cls in find_plugin_formatters():
55
- if alias in cls.aliases:
56
- return cls
57
-
58
-
59
- def get_formatter_by_name(_alias, **options):
60
- """Lookup and instantiate a formatter by alias.
61
-
62
- Raises ClassNotFound if not found.
63
- """
64
- cls = find_formatter_class(_alias)
65
- if cls is None:
66
- raise ClassNotFound("no formatter found for name %r" % _alias)
67
- return cls(**options)
68
-
69
-
70
- def load_formatter_from_file(filename, formattername="CustomFormatter",
71
- **options):
72
- """Load a formatter from a file.
73
-
74
- This method expects a file located relative to the current working
75
- directory, which contains a class named CustomFormatter. By default,
76
- it expects the Formatter to be named CustomFormatter; you can specify
77
- your own class name as the second argument to this function.
78
-
79
- Users should be very careful with the input, because this method
80
- is equivalent to running eval on the input file.
81
-
82
- Raises ClassNotFound if there are any problems importing the Formatter.
83
-
84
- .. versionadded:: 2.2
85
- """
86
- try:
87
- # This empty dict will contain the namespace for the exec'd file
88
- custom_namespace = {}
89
- with open(filename, 'rb') as f:
90
- exec(f.read(), custom_namespace)
91
- # Retrieve the class `formattername` from that namespace
92
- if formattername not in custom_namespace:
93
- raise ClassNotFound('no valid %s class found in %s' %
94
- (formattername, filename))
95
- formatter_class = custom_namespace[formattername]
96
- # And finally instantiate it with the options
97
- return formatter_class(**options)
98
- except OSError as err:
99
- raise ClassNotFound('cannot read %s: %s' % (filename, err))
100
- except ClassNotFound:
101
- raise
102
- except Exception as err:
103
- raise ClassNotFound('error when loading custom formatter: %s' % err)
104
-
105
-
106
- def get_formatter_for_filename(fn, **options):
107
- """Lookup and instantiate a formatter by filename pattern.
108
-
109
- Raises ClassNotFound if not found.
110
- """
111
- fn = basename(fn)
112
- for modname, name, _, filenames, _ in FORMATTERS.values():
113
- for filename in filenames:
114
- if fnmatch(fn, filename):
115
- if name not in _formatter_cache:
116
- _load_formatters(modname)
117
- return _formatter_cache[name](**options)
118
- for cls in find_plugin_formatters():
119
- for filename in cls.filenames:
120
- if fnmatch(fn, filename):
121
- return cls(**options)
122
- raise ClassNotFound("no formatter found for file name %r" % fn)
123
-
124
-
125
- class _automodule(types.ModuleType):
126
- """Automatically import formatters."""
127
-
128
- def __getattr__(self, name):
129
- info = FORMATTERS.get(name)
130
- if info:
131
- _load_formatters(info[0])
132
- cls = _formatter_cache[info[1]]
133
- setattr(self, name, cls)
134
- return cls
135
- raise AttributeError(name)
136
-
137
-
138
- oldmod = sys.modules[__name__]
139
- newmod = _automodule(__name__)
140
- newmod.__dict__.update(oldmod.__dict__)
141
- sys.modules[__name__] = newmod
142
- del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/base_dataset_builder.py DELETED
@@ -1,235 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import logging
9
- import os
10
- import shutil
11
- import warnings
12
-
13
- from omegaconf import OmegaConf
14
- import torch.distributed as dist
15
- from torchvision.datasets.utils import download_url
16
-
17
- import minigpt4.common.utils as utils
18
- from minigpt4.common.dist_utils import is_dist_avail_and_initialized, is_main_process
19
- from minigpt4.common.registry import registry
20
- from minigpt4.processors.base_processor import BaseProcessor
21
-
22
-
23
-
24
- class BaseDatasetBuilder:
25
- train_dataset_cls, eval_dataset_cls = None, None
26
-
27
- def __init__(self, cfg=None):
28
- super().__init__()
29
-
30
- if cfg is None:
31
- # help to create datasets from default config.
32
- self.config = load_dataset_config(self.default_config_path())
33
- elif isinstance(cfg, str):
34
- self.config = load_dataset_config(cfg)
35
- else:
36
- # when called from task.build_dataset()
37
- self.config = cfg
38
-
39
- self.data_type = self.config.data_type
40
-
41
- self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
42
- self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
43
-
44
- def build_datasets(self):
45
- # download, split, etc...
46
- # only called on 1 GPU/TPU in distributed
47
-
48
- if is_main_process():
49
- self._download_data()
50
-
51
- if is_dist_avail_and_initialized():
52
- dist.barrier()
53
-
54
- # at this point, all the annotations and image/videos should be all downloaded to the specified locations.
55
- logging.info("Building datasets...")
56
- datasets = self.build() # dataset['train'/'val'/'test']
57
-
58
- return datasets
59
-
60
- def build_processors(self):
61
- vis_proc_cfg = self.config.get("vis_processor")
62
- txt_proc_cfg = self.config.get("text_processor")
63
-
64
- if vis_proc_cfg is not None:
65
- vis_train_cfg = vis_proc_cfg.get("train")
66
- vis_eval_cfg = vis_proc_cfg.get("eval")
67
-
68
- self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg)
69
- self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg)
70
-
71
- if txt_proc_cfg is not None:
72
- txt_train_cfg = txt_proc_cfg.get("train")
73
- txt_eval_cfg = txt_proc_cfg.get("eval")
74
-
75
- self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg)
76
- self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg)
77
-
78
- @staticmethod
79
- def _build_proc_from_cfg(cfg):
80
- return (
81
- registry.get_processor_class(cfg.name).from_config(cfg)
82
- if cfg is not None
83
- else None
84
- )
85
-
86
- @classmethod
87
- def default_config_path(cls, type="default"):
88
- return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])
89
-
90
- def _download_data(self):
91
- self._download_ann()
92
- self._download_vis()
93
-
94
- def _download_ann(self):
95
- """
96
- Download annotation files if necessary.
97
- All the vision-language datasets should have annotations of unified format.
98
-
99
- storage_path can be:
100
- (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.
101
- (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.
102
-
103
- Local annotation paths should be relative.
104
- """
105
- anns = self.config.build_info.annotations
106
-
107
- splits = anns.keys()
108
-
109
- cache_root = registry.get_path("cache_root")
110
-
111
- for split in splits:
112
- info = anns[split]
113
-
114
- urls, storage_paths = info.get("url", None), info.storage
115
-
116
- if isinstance(urls, str):
117
- urls = [urls]
118
- if isinstance(storage_paths, str):
119
- storage_paths = [storage_paths]
120
-
121
- assert len(urls) == len(storage_paths)
122
-
123
- for url_or_filename, storage_path in zip(urls, storage_paths):
124
- # if storage_path is relative, make it full by prefixing with cache_root.
125
- if not os.path.isabs(storage_path):
126
- storage_path = os.path.join(cache_root, storage_path)
127
-
128
- dirname = os.path.dirname(storage_path)
129
- if not os.path.exists(dirname):
130
- os.makedirs(dirname)
131
-
132
- if os.path.isfile(url_or_filename):
133
- src, dst = url_or_filename, storage_path
134
- if not os.path.exists(dst):
135
- shutil.copyfile(src=src, dst=dst)
136
- else:
137
- logging.info("Using existing file {}.".format(dst))
138
- else:
139
- if os.path.isdir(storage_path):
140
- # if only dirname is provided, suffix with basename of URL.
141
- raise ValueError(
142
- "Expecting storage_path to be a file path, got directory {}".format(
143
- storage_path
144
- )
145
- )
146
- else:
147
- filename = os.path.basename(storage_path)
148
-
149
- download_url(url=url_or_filename, root=dirname, filename=filename)
150
-
151
- def _download_vis(self):
152
-
153
- storage_path = self.config.build_info.get(self.data_type).storage
154
- storage_path = utils.get_cache_path(storage_path)
155
-
156
- if not os.path.exists(storage_path):
157
- warnings.warn(
158
- f"""
159
- The specified path {storage_path} for visual inputs does not exist.
160
- Please provide a correct path to the visual inputs or
161
- refer to datasets/download_scripts/README.md for downloading instructions.
162
- """
163
- )
164
-
165
- def build(self):
166
- """
167
- Create by split datasets inheriting torch.utils.data.Datasets.
168
-
169
- # build() can be dataset-specific. Overwrite to customize.
170
- """
171
- self.build_processors()
172
-
173
- build_info = self.config.build_info
174
-
175
- ann_info = build_info.annotations
176
- vis_info = build_info.get(self.data_type)
177
-
178
- datasets = dict()
179
- for split in ann_info.keys():
180
- if split not in ["train", "val", "test"]:
181
- continue
182
-
183
- is_train = split == "train"
184
-
185
- # processors
186
- vis_processor = (
187
- self.vis_processors["train"]
188
- if is_train
189
- else self.vis_processors["eval"]
190
- )
191
- text_processor = (
192
- self.text_processors["train"]
193
- if is_train
194
- else self.text_processors["eval"]
195
- )
196
-
197
- # annotation path
198
- ann_paths = ann_info.get(split).storage
199
- if isinstance(ann_paths, str):
200
- ann_paths = [ann_paths]
201
-
202
- abs_ann_paths = []
203
- for ann_path in ann_paths:
204
- if not os.path.isabs(ann_path):
205
- ann_path = utils.get_cache_path(ann_path)
206
- abs_ann_paths.append(ann_path)
207
- ann_paths = abs_ann_paths
208
-
209
- # visual data storage path
210
- vis_path = os.path.join(vis_info.storage, split)
211
-
212
- if not os.path.isabs(vis_path):
213
- # vis_path = os.path.join(utils.get_cache_path(), vis_path)
214
- vis_path = utils.get_cache_path(vis_path)
215
-
216
- if not os.path.exists(vis_path):
217
- warnings.warn("storage path {} does not exist.".format(vis_path))
218
-
219
- # create datasets
220
- dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
221
- datasets[split] = dataset_cls(
222
- vis_processor=vis_processor,
223
- text_processor=text_processor,
224
- ann_paths=ann_paths,
225
- vis_root=vis_path,
226
- )
227
-
228
- return datasets
229
-
230
-
231
- def load_dataset_config(cfg_path):
232
- cfg = OmegaConf.load(cfg_path).datasets
233
- cfg = cfg[list(cfg.keys())[0]]
234
-
235
- return cfg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/scene.cpp DELETED
@@ -1,1035 +0,0 @@
1
- #include "scene.h"
2
- #include "aabb.h"
3
- #include "cuda_utils.h"
4
- #include "filter.h"
5
- #include "shape.h"
6
- #include <numeric>
7
- #include <algorithm>
8
- #include <cstring>
9
- #include <chrono>
10
- #include <cstddef>
11
-
12
- size_t align(size_t s) {
13
- auto a = alignof(std::max_align_t);
14
- return ((s + a - 1) / a) * a;
15
- }
16
-
17
- template <typename T>
18
- void allocate(bool use_gpu, T **p) {
19
- if (use_gpu) {
20
- #ifdef __NVCC__
21
- checkCuda(cudaMallocManaged(p, sizeof(T)));
22
- #else
23
- throw std::runtime_error("diffvg not compiled with GPU");
24
- assert(false);
25
- #endif
26
- } else {
27
- *p = (T*)malloc(sizeof(T));
28
- }
29
- }
30
-
31
- template <typename T>
32
- void allocate(bool use_gpu, size_t size, T **p) {
33
- if (use_gpu) {
34
- #ifdef __NVCC__
35
- checkCuda(cudaMallocManaged(p, size * sizeof(T)));
36
- #else
37
- throw std::runtime_error("diffvg not compiled with GPU");
38
- assert(false);
39
- #endif
40
- } else {
41
- *p = (T*)malloc(size * sizeof(T));
42
- }
43
- }
44
-
45
- void copy_and_init_shapes(Scene &scene,
46
- const std::vector<const Shape *> &shape_list) {
47
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
48
- switch (shape_list[shape_id]->type) {
49
- case ShapeType::Circle: {
50
- Circle *p = (Circle *)scene.shapes[shape_id].ptr;
51
- const Circle *p_ = (const Circle*)(shape_list[shape_id]->ptr);
52
- *p = *p_;
53
- Circle *d_p = (Circle *)scene.d_shapes[shape_id].ptr;
54
- d_p->radius = 0;
55
- d_p->center = Vector2f{0, 0};
56
- break;
57
- } case ShapeType::Ellipse: {
58
- Ellipse *p = (Ellipse *)scene.shapes[shape_id].ptr;
59
- const Ellipse *p_ = (const Ellipse*)(shape_list[shape_id]->ptr);
60
- *p = *p_;
61
- Ellipse *d_p = (Ellipse *)scene.d_shapes[shape_id].ptr;
62
- d_p->radius = Vector2f{0, 0};
63
- d_p->center = Vector2f{0, 0};
64
- break;
65
- } case ShapeType::Path: {
66
- Path *p = (Path *)scene.shapes[shape_id].ptr;
67
- const Path *p_ = (const Path*)(shape_list[shape_id]->ptr);
68
- p->num_points = p_->num_points;
69
- p->num_base_points = p_->num_base_points;
70
- for (int i = 0; i < p_->num_base_points; i++) {
71
- p->num_control_points[i] = p_->num_control_points[i];
72
- }
73
- for (int i = 0; i < 2 * p_->num_points; i++) {
74
- p->points[i] = p_->points[i];
75
- }
76
- p->is_closed = p_->is_closed;
77
- p->use_distance_approx = p_->use_distance_approx;
78
- Path *d_p = (Path *)scene.d_shapes[shape_id].ptr;
79
- d_p->num_points = p_->num_points;
80
- d_p->num_base_points = p_->num_base_points;
81
- for (int i = 0; i < 2 * p_->num_points; i++) {
82
- d_p->points[i] = 0;
83
- }
84
- d_p->is_closed = p_->is_closed;
85
- if (p_->thickness != nullptr) {
86
- for (int i = 0; i < p_->num_points; i++) {
87
- p->thickness[i] = p_->thickness[i];
88
- d_p->thickness[i] = 0;
89
- }
90
- }
91
- d_p->use_distance_approx = p_->use_distance_approx;
92
- break;
93
- } case ShapeType::Rect: {
94
- Rect *p = (Rect *)scene.shapes[shape_id].ptr;
95
- const Rect *p_ = (const Rect*)(shape_list[shape_id]->ptr);
96
- *p = *p_;
97
- Rect *d_p = (Rect *)scene.d_shapes[shape_id].ptr;
98
- d_p->p_min = Vector2f{0, 0};
99
- d_p->p_max = Vector2f{0, 0};
100
- break;
101
- } default: {
102
- assert(false);
103
- break;
104
- }
105
- }
106
- scene.shapes[shape_id].type = shape_list[shape_id]->type;
107
- scene.shapes[shape_id].stroke_width = shape_list[shape_id]->stroke_width;
108
- scene.d_shapes[shape_id].type = shape_list[shape_id]->type;
109
- scene.d_shapes[shape_id].stroke_width = 0;
110
- }
111
- }
112
-
113
- std::vector<float>
114
- compute_shape_length(const std::vector<const Shape *> &shape_list) {
115
- int num_shapes = (int)shape_list.size();
116
- std::vector<float> shape_length_list(num_shapes, 0.f);
117
- for (int shape_id = 0; shape_id < num_shapes; shape_id++) {
118
- auto shape_length = 0.f;
119
- switch (shape_list[shape_id]->type) {
120
- case ShapeType::Circle: {
121
- const Circle *p_ = (const Circle*)(shape_list[shape_id]->ptr);
122
- shape_length += float(2.f * M_PI) * p_->radius;
123
- break;
124
- } case ShapeType::Ellipse: {
125
- const Ellipse *p_ = (const Ellipse*)(shape_list[shape_id]->ptr);
126
- // https://en.wikipedia.org/wiki/Ellipse#Circumference
127
- // Ramanujan's ellipse circumference approximation
128
- auto a = p_->radius.x;
129
- auto b = p_->radius.y;
130
- shape_length += float(M_PI) * (3 * (a + b) - sqrt((3 * a + b) * (a + 3 * b)));
131
- break;
132
- } case ShapeType::Path: {
133
- const Path *p_ = (const Path*)(shape_list[shape_id]->ptr);
134
- auto length = 0.f;
135
- auto point_id = 0;
136
- for (int i = 0; i < p_->num_base_points; i++) {
137
- if (p_->num_control_points[i] == 0) {
138
- // Straight line
139
- auto i0 = point_id;
140
- assert(i0 < p_->num_points);
141
- auto i1 = (i0 + 1) % p_->num_points;
142
- point_id += 1;
143
- auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]};
144
- auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]};
145
- length += distance(p1, p0);
146
- } else if (p_->num_control_points[i] == 1) {
147
- // Quadratic Bezier curve
148
- auto i0 = point_id;
149
- auto i1 = i0 + 1;
150
- auto i2 = (i0 + 2) % p_->num_points;
151
- point_id += 2;
152
- auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]};
153
- auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]};
154
- auto p2 = Vector2f{p_->points[2 * i2], p_->points[2 * i2 + 1]};
155
- auto eval = [&](float t) -> Vector2f {
156
- auto tt = 1 - t;
157
- return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2;
158
- };
159
- // We use 3-point samples to approximate the length
160
- auto v0 = p0;
161
- auto v1 = eval(0.5f);
162
- auto v2 = p2;
163
- length += distance(v1, v0) + distance(v1, v2);
164
- } else if (p_->num_control_points[i] == 2) {
165
- // Cubic Bezier curve
166
- auto i0 = point_id;
167
- auto i1 = i0 + 1;
168
- auto i2 = i0 + 2;
169
- auto i3 = (i0 + 3) % p_->num_points;
170
- point_id += 3;
171
- auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]};
172
- auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]};
173
- auto p2 = Vector2f{p_->points[2 * i2], p_->points[2 * i2 + 1]};
174
- auto p3 = Vector2f{p_->points[2 * i3], p_->points[2 * i3 + 1]};
175
- auto eval = [&](float t) -> Vector2f {
176
- auto tt = 1 - t;
177
- return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3;
178
- };
179
- // We use 4-point samples to approximate the length
180
- auto v0 = p0;
181
- auto v1 = eval(1.f/3.f);
182
- auto v2 = eval(2.f/3.f);
183
- auto v3 = p3;
184
- length += distance(v1, v0) + distance(v1, v2) + distance(v2, v3);
185
- } else {
186
- assert(false);
187
- }
188
- }
189
- assert(isfinite(length));
190
- shape_length += length;
191
- break;
192
- } case ShapeType::Rect: {
193
- const Rect *p_ = (const Rect*)(shape_list[shape_id]->ptr);
194
- shape_length += 2 * (p_->p_max.x - p_->p_min.x + p_->p_max.y - p_->p_min.y);
195
- break;
196
- } default: {
197
- assert(false);
198
- break;
199
- }
200
- }
201
- assert(isfinite(shape_length));
202
- shape_length_list[shape_id] = shape_length;
203
- }
204
- return shape_length_list;
205
- }
206
-
207
- void build_shape_cdfs(Scene &scene,
208
- const std::vector<const ShapeGroup *> &shape_group_list,
209
- const std::vector<float> &shape_length_list) {
210
- int sample_id = 0;
211
- for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) {
212
- const ShapeGroup *shape_group = shape_group_list[shape_group_id];
213
- for (int i = 0; i < shape_group->num_shapes; i++) {
214
- int shape_id = shape_group->shape_ids[i];
215
- float length = shape_length_list[shape_id];
216
- scene.sample_shape_id[sample_id] = shape_id;
217
- if (sample_id == 0) {
218
- scene.sample_shapes_cdf[sample_id] = length;
219
- } else {
220
- scene.sample_shapes_cdf[sample_id] = length +
221
- scene.sample_shapes_cdf[sample_id - 1];
222
- }
223
- assert(isfinite(length));
224
- scene.sample_shapes_pmf[sample_id] = length;
225
- scene.sample_group_id[sample_id] = shape_group_id;
226
- sample_id++;
227
- }
228
- }
229
- assert(sample_id == scene.num_total_shapes);
230
- auto normalization = scene.sample_shapes_cdf[scene.num_total_shapes - 1];
231
- if (normalization <= 0) {
232
- char buf[256];
233
- sprintf(buf, "The total length of the shape boundaries in the scene is equal or less than 0. Length = %f", normalization);
234
- throw std::runtime_error(buf);
235
- }
236
- if (!isfinite(normalization)) {
237
- char buf[256];
238
- sprintf(buf, "The total length of the shape boundaries in the scene is not a number. Length = %f", normalization);
239
- throw std::runtime_error(buf);
240
- }
241
- assert(normalization > 0);
242
- for (int sample_id = 0; sample_id < scene.num_total_shapes; sample_id++) {
243
- scene.sample_shapes_cdf[sample_id] /= normalization;
244
- scene.sample_shapes_pmf[sample_id] /= normalization;
245
- }
246
- }
247
-
248
- void build_path_cdfs(Scene &scene,
249
- const std::vector<const Shape *> &shape_list,
250
- const std::vector<float> &shape_length_list) {
251
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
252
- if (shape_list[shape_id]->type == ShapeType::Path) {
253
- const Path &path = shape_list[shape_id]->as_path();
254
- float *pmf = scene.path_length_pmf[shape_id];
255
- float *cdf = scene.path_length_cdf[shape_id];
256
- int *point_id_map = scene.path_point_id_map[shape_id];
257
- auto path_length = shape_length_list[shape_id];
258
- auto inv_length = 1.f / path_length;
259
- auto point_id = 0;
260
- for (int i = 0; i < path.num_base_points; i++) {
261
- point_id_map[i] = point_id;
262
- if (path.num_control_points[i] == 0) {
263
- // Straight line
264
- auto i0 = point_id;
265
- auto i1 = (i0 + 1) % path.num_points;
266
- point_id += 1;
267
- auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
268
- auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
269
- auto d = distance(p0, p1) * inv_length;
270
- pmf[i] = d;
271
- if (i == 0) {
272
- cdf[i] = d;
273
- } else {
274
- cdf[i] = d + cdf[i - 1];
275
- }
276
- } else if (path.num_control_points[i] == 1) {
277
- // Quadratic Bezier curve
278
- auto i0 = point_id;
279
- auto i1 = i0 + 1;
280
- auto i2 = (i0 + 2) % path.num_points;
281
- point_id += 2;
282
- auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
283
- auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
284
- auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
285
- auto eval = [&](float t) -> Vector2f {
286
- auto tt = 1 - t;
287
- return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2;
288
- };
289
- // We use 3-point samples to approximate the length
290
- auto v0 = p0;
291
- auto v1 = eval(0.5f);
292
- auto v2 = p2;
293
- auto d = (distance(v0, v1) + distance(v1, v2)) * inv_length;
294
- pmf[i] = d;
295
- if (i == 0) {
296
- cdf[i] = d;
297
- } else {
298
- cdf[i] = d + cdf[i - 1];
299
- }
300
- } else if (path.num_control_points[i] == 2) {
301
- // Cubic Bezier curve
302
- auto i0 = point_id;
303
- auto i1 = point_id + 1;
304
- auto i2 = point_id + 2;
305
- auto i3 = (point_id + 3) % path.num_points;
306
- point_id += 3;
307
- auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
308
- auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
309
- auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
310
- auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]};
311
- auto eval = [&](float t) -> Vector2f {
312
- auto tt = 1 - t;
313
- return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3;
314
- };
315
- // We use 4-point samples to approximate the length
316
- auto v0 = p0;
317
- auto v1 = eval(1.f/3.f);
318
- auto v2 = eval(2.f/3.f);
319
- auto v3 = p3;
320
- auto d = (distance(v1, v0) + distance(v1, v2) + distance(v2, v3)) * inv_length;
321
- pmf[i] = d;
322
- if (i == 0) {
323
- cdf[i] = d;
324
- } else {
325
- cdf[i] = d + cdf[i - 1];
326
- }
327
- } else {
328
- assert(false);
329
- }
330
- }
331
- }
332
- }
333
- }
334
-
335
- void copy_and_init_shape_groups(Scene &scene,
336
- const std::vector<const ShapeGroup *> &shape_group_list) {
337
- for (int group_id = 0; group_id < scene.num_shape_groups; group_id++) {
338
- const ShapeGroup *shape_group = shape_group_list[group_id];
339
- auto copy_and_init_color = [&](const ColorType &color_type, void *color_ptr, void *target_ptr, void *d_target_ptr) {
340
- switch (color_type) {
341
- case ColorType::Constant: {
342
- Constant *c = (Constant*)target_ptr;
343
- Constant *d_c = (Constant*)d_target_ptr;
344
- const Constant *c_ = (const Constant*)color_ptr;
345
- *c = *c_;
346
- d_c->color = Vector4{0, 0, 0, 0};
347
- break;
348
- } case ColorType::LinearGradient: {
349
- LinearGradient *c = (LinearGradient*)target_ptr;
350
- LinearGradient *d_c = (LinearGradient*)d_target_ptr;
351
- const LinearGradient *c_ = (const LinearGradient*)color_ptr;
352
- c->begin = c_->begin;
353
- c->end = c_->end;
354
- c->num_stops = c_->num_stops;
355
- for (int i = 0; i < c_->num_stops; i++) {
356
- c->stop_offsets[i] = c_->stop_offsets[i];
357
- }
358
- for (int i = 0; i < 4 * c_->num_stops; i++) {
359
- c->stop_colors[i] = c_->stop_colors[i];
360
- }
361
- d_c->begin = Vector2f{0, 0};
362
- d_c->end = Vector2f{0, 0};
363
- d_c->num_stops = c_->num_stops;
364
- for (int i = 0; i < c_->num_stops; i++) {
365
- d_c->stop_offsets[i] = 0;
366
- }
367
- for (int i = 0; i < 4 * c_->num_stops; i++) {
368
- d_c->stop_colors[i] = 0;
369
- }
370
- break;
371
- } case ColorType::RadialGradient: {
372
- RadialGradient *c = (RadialGradient*)target_ptr;
373
- RadialGradient *d_c = (RadialGradient*)d_target_ptr;
374
- const RadialGradient *c_ = (const RadialGradient*)color_ptr;
375
- c->center = c_->center;
376
- c->radius = c_->radius;
377
- c->num_stops = c_->num_stops;
378
- for (int i = 0; i < c_->num_stops; i++) {
379
- c->stop_offsets[i] = c_->stop_offsets[i];
380
- }
381
- for (int i = 0; i < 4 * c_->num_stops; i++) {
382
- c->stop_colors[i] = c_->stop_colors[i];
383
- }
384
- d_c->center = Vector2f{0, 0};
385
- d_c->radius = Vector2f{0, 0};
386
- d_c->num_stops = c_->num_stops;
387
- for (int i = 0; i < c_->num_stops; i++) {
388
- d_c->stop_offsets[i] = 0;
389
- }
390
- for (int i = 0; i < 4 * c_->num_stops; i++) {
391
- d_c->stop_colors[i] = 0;
392
- }
393
- break;
394
- } default: {
395
- assert(false);
396
- }
397
- }
398
- };
399
- for (int i = 0; i < shape_group->num_shapes; i++) {
400
- scene.shape_groups[group_id].shape_ids[i] = shape_group->shape_ids[i];
401
- }
402
- scene.shape_groups[group_id].num_shapes = shape_group->num_shapes;
403
- scene.shape_groups[group_id].use_even_odd_rule = shape_group->use_even_odd_rule;
404
- scene.shape_groups[group_id].canvas_to_shape = shape_group->canvas_to_shape;
405
- scene.shape_groups[group_id].shape_to_canvas = shape_group->shape_to_canvas;
406
- scene.d_shape_groups[group_id].shape_ids = nullptr;
407
- scene.d_shape_groups[group_id].num_shapes = shape_group->num_shapes;
408
- scene.d_shape_groups[group_id].use_even_odd_rule = shape_group->use_even_odd_rule;
409
- scene.d_shape_groups[group_id].canvas_to_shape = Matrix3x3f{};
410
- scene.d_shape_groups[group_id].shape_to_canvas = Matrix3x3f{};
411
-
412
- scene.shape_groups[group_id].fill_color_type = shape_group->fill_color_type;
413
- scene.d_shape_groups[group_id].fill_color_type = shape_group->fill_color_type;
414
- if (shape_group->fill_color != nullptr) {
415
- copy_and_init_color(shape_group->fill_color_type,
416
- shape_group->fill_color,
417
- scene.shape_groups[group_id].fill_color,
418
- scene.d_shape_groups[group_id].fill_color);
419
- }
420
- scene.shape_groups[group_id].stroke_color_type = shape_group->stroke_color_type;
421
- scene.d_shape_groups[group_id].stroke_color_type = shape_group->stroke_color_type;
422
- if (shape_group->stroke_color != nullptr) {
423
- copy_and_init_color(shape_group->stroke_color_type,
424
- shape_group->stroke_color,
425
- scene.shape_groups[group_id].stroke_color,
426
- scene.d_shape_groups[group_id].stroke_color);
427
- }
428
- }
429
- }
430
-
431
- DEVICE uint32_t morton2D(const Vector2f &p, int canvas_width, int canvas_height) {
432
- auto scene_bounds = Vector2f{canvas_width, canvas_height};
433
- auto pp = p / scene_bounds;
434
- TVector2<uint32_t> pp_i{pp.x * 1023, pp.y * 1023};
435
- return (expand_bits(pp_i.x) << 1u) |
436
- (expand_bits(pp_i.y) << 0u);
437
- }
438
-
439
- template <bool sort>
440
- void build_bvh(const Scene &scene, BVHNode *nodes, int num_primitives) {
441
- auto bvh_size = 2 * num_primitives - 1;
442
- if (bvh_size > 1) {
443
- if (sort) {
444
- // Sort by Morton code
445
- std::sort(nodes, nodes + num_primitives,
446
- [&] (const BVHNode &n0, const BVHNode &n1) {
447
- auto p0 = 0.5f * (n0.box.p_min + n0.box.p_max);
448
- auto p1 = 0.5f * (n1.box.p_min + n1.box.p_max);
449
- auto m0 = morton2D(p0, scene.canvas_width, scene.canvas_height);
450
- auto m1 = morton2D(p1, scene.canvas_width, scene.canvas_height);
451
- return m0 < m1;
452
- });
453
- }
454
- for (int i = num_primitives; i < bvh_size; i++) {
455
- nodes[i] = BVHNode{-1, -1, AABB{}, 0.f};
456
- }
457
- int prev_beg = 0;
458
- int prev_end = num_primitives;
459
- // For handling odd number of nodes at a level
460
- int leftover = prev_end % 2 == 0 ? -1 : prev_end - 1;
461
- while (prev_end - prev_beg >= 1 || leftover != -1) {
462
- int length = (prev_end - prev_beg) / 2;
463
- if ((prev_end - prev_beg) % 2 == 1 && leftover != -1 &&
464
- leftover != prev_end - 1) {
465
- length += 1;
466
- }
467
- for (int i = 0; i < length; i++) {
468
- BVHNode node;
469
- node.child0 = prev_beg + 2 * i;
470
- node.child1 = prev_beg + 2 * i + 1;
471
- if (node.child1 >= prev_end) {
472
- assert(leftover != -1);
473
- node.child1 = leftover;
474
- leftover = -1;
475
- }
476
- AABB child0_box = nodes[node.child0].box;
477
- AABB child1_box = nodes[node.child1].box;
478
- node.box = merge(child0_box, child1_box);
479
- node.max_radius = std::max(nodes[node.child0].max_radius,
480
- nodes[node.child1].max_radius);
481
- nodes[prev_end + i] = node;
482
- }
483
- if (length == 1 && leftover == -1) {
484
- break;
485
- }
486
- prev_beg = prev_end;
487
- prev_end = prev_beg + length;
488
- if (length % 2 == 1 && leftover == -1) {
489
- leftover = prev_end - 1;
490
- }
491
- }
492
- }
493
- assert(nodes[2 * num_primitives - 2].child0 != -1);
494
- }
495
-
496
- void compute_bounding_boxes(Scene &scene,
497
- const std::vector<const Shape *> &shape_list,
498
- const std::vector<const ShapeGroup *> &shape_group_list) {
499
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
500
- switch (shape_list[shape_id]->type) {
501
- case ShapeType::Circle: {
502
- const Circle *p = (const Circle*)(shape_list[shape_id]->ptr);
503
- scene.shapes_bbox[shape_id] = AABB{p->center - p->radius,
504
- p->center + p->radius};
505
- break;
506
- } case ShapeType::Ellipse: {
507
- const Ellipse *p = (const Ellipse*)(shape_list[shape_id]->ptr);
508
- scene.shapes_bbox[shape_id] = AABB{p->center - p->radius,
509
- p->center + p->radius};
510
- break;
511
- } case ShapeType::Path: {
512
- const Path *p = (const Path*)(shape_list[shape_id]->ptr);
513
- AABB box;
514
- if (p->num_points > 0) {
515
- box = AABB{Vector2f{p->points[0], p->points[1]},
516
- Vector2f{p->points[0], p->points[1]}};
517
- }
518
- for (int i = 1; i < p->num_points; i++) {
519
- box = merge(box, Vector2f{p->points[2 * i], p->points[2 * i + 1]});
520
- }
521
- scene.shapes_bbox[shape_id] = box;
522
- std::vector<AABB> boxes(p->num_base_points);
523
- std::vector<float> thickness(p->num_base_points);
524
- std::vector<int> first_point_id(p->num_base_points);
525
- auto r = shape_list[shape_id]->stroke_width;
526
- auto point_id = 0;
527
- for (int i = 0; i < p->num_base_points; i++) {
528
- first_point_id[i] = point_id;
529
- if (p->num_control_points[i] == 0) {
530
- // Straight line
531
- auto i0 = point_id;
532
- auto i1 = (i0 + 1) % p->num_points;
533
- point_id += 1;
534
- auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]};
535
- auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]};
536
- boxes[i] = AABB();
537
- boxes[i] = merge(boxes[i], p0);
538
- boxes[i] = merge(boxes[i], p1);
539
- auto r0 = r;
540
- auto r1 = r;
541
- // override radius if path has thickness
542
- if (p->thickness != nullptr) {
543
- r0 = p->thickness[i0];
544
- r1 = p->thickness[i1];
545
- }
546
- thickness[i] = max(r0, r1);
547
- } else if (p->num_control_points[i] == 1) {
548
- // Quadratic Bezier curve
549
- auto i0 = point_id;
550
- auto i1 = i0 + 1;
551
- auto i2 = (i0 + 2) % p->num_points;
552
- point_id += 2;
553
- auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]};
554
- auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]};
555
- auto p2 = Vector2f{p->points[2 * i2], p->points[2 * i2 + 1]};
556
- boxes[i] = AABB();
557
- boxes[i] = merge(boxes[i], p0);
558
- boxes[i] = merge(boxes[i], p1);
559
- boxes[i] = merge(boxes[i], p2);
560
- auto r0 = r;
561
- auto r1 = r;
562
- auto r2 = r;
563
- // override radius if path has thickness
564
- if (p->thickness != nullptr) {
565
- r0 = p->thickness[i0];
566
- r1 = p->thickness[i1];
567
- r2 = p->thickness[i2];
568
- }
569
- thickness[i] = max(max(r0, r1), r2);
570
- } else if (p->num_control_points[i] == 2) {
571
- // Cubic Bezier curve
572
- auto i0 = point_id;
573
- auto i1 = i0 + 1;
574
- auto i2 = i0 + 2;
575
- auto i3 = (i0 + 3) % p->num_points;
576
- point_id += 3;
577
- auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]};
578
- auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]};
579
- auto p2 = Vector2f{p->points[2 * i2], p->points[2 * i2 + 1]};
580
- auto p3 = Vector2f{p->points[2 * i3], p->points[2 * i3 + 1]};
581
- boxes[i] = AABB();
582
- boxes[i] = merge(boxes[i], p0);
583
- boxes[i] = merge(boxes[i], p1);
584
- boxes[i] = merge(boxes[i], p2);
585
- boxes[i] = merge(boxes[i], p3);
586
- auto r0 = r;
587
- auto r1 = r;
588
- auto r2 = r;
589
- auto r3 = r;
590
- // override radius if path has thickness
591
- if (p->thickness != nullptr) {
592
- r0 = p->thickness[i0];
593
- r1 = p->thickness[i1];
594
- r2 = p->thickness[i2];
595
- r3 = p->thickness[i3];
596
- }
597
- thickness[i] = max(max(max(r0, r1), r2), r3);
598
- } else {
599
- assert(false);
600
- }
601
- }
602
- // Sort the boxes by y
603
- std::vector<int> idx(boxes.size());
604
- std::iota(idx.begin(), idx.end(), 0);
605
- std::sort(idx.begin(), idx.end(), [&](int i0, int i1) {
606
- const AABB &b0 = boxes[i0];
607
- const AABB &b1 = boxes[i1];
608
- auto b0y = 0.5f * (b0.p_min.y + b0.p_max.y);
609
- auto b1y = 0.5f * (b1.p_min.y + b1.p_max.y);
610
- return b0y < b1y;
611
- });
612
- BVHNode *nodes = scene.path_bvhs[shape_id];
613
- for (int i = 0; i < (int)idx.size(); i++) {
614
- nodes[i] = BVHNode{idx[i],
615
- -(first_point_id[idx[i]]+1),
616
- boxes[idx[i]],
617
- thickness[idx[i]]};
618
- }
619
- build_bvh<false /*sort*/>(scene, nodes, boxes.size());
620
- break;
621
- } case ShapeType::Rect: {
622
- const Rect *p = (const Rect*)(shape_list[shape_id]->ptr);
623
- scene.shapes_bbox[shape_id] = AABB{p->p_min, p->p_max};
624
- break;
625
- } default: {
626
- assert(false);
627
- break;
628
- }
629
- }
630
- }
631
-
632
- for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) {
633
- const ShapeGroup *shape_group = shape_group_list[shape_group_id];
634
- // Build a BVH for each shape group
635
- BVHNode *nodes = scene.shape_groups_bvh_nodes[shape_group_id];
636
- for (int i = 0; i < shape_group->num_shapes; i++) {
637
- auto shape_id = shape_group->shape_ids[i];
638
- auto r = shape_group->stroke_color == nullptr ? 0 : shape_list[shape_id]->stroke_width;
639
- nodes[i] = BVHNode{shape_id,
640
- -1,
641
- scene.shapes_bbox[shape_id],
642
- r};
643
- }
644
- build_bvh<true /*sort*/>(scene, nodes, shape_group->num_shapes);
645
- }
646
-
647
- BVHNode *nodes = scene.bvh_nodes;
648
- for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) {
649
- const ShapeGroup *shape_group = shape_group_list[shape_group_id];
650
- auto max_radius = shape_list[shape_group->shape_ids[0]]->stroke_width;
651
- if (shape_list[shape_group->shape_ids[0]]->type == ShapeType::Path) {
652
- const Path *p = (const Path*)(shape_list[shape_group->shape_ids[0]]->ptr);
653
- if (p->thickness != nullptr) {
654
- const BVHNode *nodes = scene.path_bvhs[shape_group->shape_ids[0]];
655
- max_radius = nodes[0].max_radius;
656
- }
657
- }
658
- for (int i = 1; i < shape_group->num_shapes; i++) {
659
- auto shape_id = shape_group->shape_ids[i];
660
- auto shape = shape_list[shape_id];
661
- auto r = shape->stroke_width;
662
- if (shape->type == ShapeType::Path) {
663
- const Path *p = (const Path*)(shape_list[shape_id]->ptr);
664
- if (p->thickness != nullptr) {
665
- const BVHNode *nodes = scene.path_bvhs[shape_id];
666
- r = nodes[0].max_radius;
667
- }
668
- }
669
- max_radius = std::max(max_radius, r);
670
- }
671
- // Fetch group bbox from BVH
672
- auto bbox = scene.shape_groups_bvh_nodes[shape_group_id][2 * shape_group->num_shapes - 2].box;
673
- // Transform box from local to world space
674
- nodes[shape_group_id].child0 = shape_group_id;
675
- nodes[shape_group_id].child1 = -1;
676
- nodes[shape_group_id].box = transform(shape_group->shape_to_canvas, bbox);
677
- if (shape_group->stroke_color == nullptr) {
678
- nodes[shape_group_id].max_radius = 0;
679
- } else {
680
- nodes[shape_group_id].max_radius = max_radius;
681
- }
682
- }
683
- build_bvh<true /*sort*/>(scene, nodes, shape_group_list.size());
684
- }
685
-
686
- template <bool alloc_mode>
687
- size_t allocate_buffers(Scene &scene,
688
- const std::vector<const Shape *> &shape_list,
689
- const std::vector<const ShapeGroup *> &shape_group_list) {
690
- auto num_shapes = shape_list.size();
691
- auto num_shape_groups = shape_group_list.size();
692
-
693
- size_t buffer_size = 0;
694
- if (alloc_mode) scene.shapes = (Shape*)&scene.buffer[buffer_size];
695
- buffer_size += align(sizeof(Shape) * num_shapes);
696
- if (alloc_mode) scene.d_shapes = (Shape*)&scene.buffer[buffer_size];
697
- buffer_size += align(sizeof(Shape) * num_shapes);
698
- if (alloc_mode) scene.shape_groups = (ShapeGroup*)&scene.buffer[buffer_size];
699
- buffer_size += align(sizeof(ShapeGroup) * num_shape_groups);
700
- if (alloc_mode) scene.d_shape_groups = (ShapeGroup*)&scene.buffer[buffer_size];
701
- buffer_size += align(sizeof(ShapeGroup) * num_shape_groups);
702
- if (alloc_mode) scene.sample_shapes_cdf = (float*)&scene.buffer[buffer_size];
703
- buffer_size += align(sizeof(float) * scene.num_total_shapes);
704
- if (alloc_mode) scene.sample_shapes_pmf = (float*)&scene.buffer[buffer_size];
705
- buffer_size += align(sizeof(float) * scene.num_total_shapes);
706
- if (alloc_mode) scene.sample_shape_id = (int*)&scene.buffer[buffer_size];
707
- buffer_size += align(sizeof(int) * scene.num_total_shapes);
708
- if (alloc_mode) scene.sample_group_id = (int*)&scene.buffer[buffer_size];
709
- buffer_size += align(sizeof(int) * scene.num_total_shapes);
710
- if (alloc_mode) scene.shapes_length = (float*)&scene.buffer[buffer_size];
711
- buffer_size += align(sizeof(float) * num_shapes);
712
- if (alloc_mode) scene.path_length_cdf = (float**)&scene.buffer[buffer_size];
713
- buffer_size += align(sizeof(float*) * num_shapes);
714
- if (alloc_mode) scene.path_length_pmf = (float**)&scene.buffer[buffer_size];
715
- buffer_size += align(sizeof(float*) * num_shapes);
716
- if (alloc_mode) scene.path_point_id_map = (int**)&scene.buffer[buffer_size];
717
- buffer_size += align(sizeof(int*) * num_shapes);
718
- if (alloc_mode) scene.filter = (Filter*)&scene.buffer[buffer_size];
719
- buffer_size += align(sizeof(Filter));
720
- if (alloc_mode) scene.d_filter = (DFilter*)&scene.buffer[buffer_size];
721
- buffer_size += align(sizeof(DFilter));
722
- if (alloc_mode) scene.shapes_bbox = (AABB*)&scene.buffer[buffer_size];
723
- buffer_size += align(sizeof(AABB) * num_shapes);
724
- if (alloc_mode) scene.path_bvhs = (BVHNode**)&scene.buffer[buffer_size];
725
- buffer_size += align(sizeof(BVHNode*) * num_shapes);
726
- if (alloc_mode) scene.shape_groups_bvh_nodes = (BVHNode**)&scene.buffer[buffer_size];
727
- buffer_size += align(sizeof(BVHNode*) * num_shape_groups);
728
- if (alloc_mode) scene.bvh_nodes = (BVHNode*)&scene.buffer[buffer_size];
729
- buffer_size += align(sizeof(BVHNode) * (2 * num_shape_groups - 1));
730
-
731
- if (alloc_mode) {
732
- for (int i = 0; i < num_shapes; i++) {
733
- scene.path_length_cdf[i] = nullptr;
734
- scene.path_length_pmf[i] = nullptr;
735
- scene.path_point_id_map[i] = nullptr;
736
- scene.path_bvhs[i] = nullptr;
737
- }
738
- }
739
-
740
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
741
- switch (shape_list[shape_id]->type) {
742
- case ShapeType::Circle: {
743
- if (alloc_mode) scene.shapes[shape_id].ptr = (Circle*)&scene.buffer[buffer_size];
744
- buffer_size += align(sizeof(Circle)); // scene.shapes[shape_id].ptr
745
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Circle*)&scene.buffer[buffer_size];
746
- buffer_size += align(sizeof(Circle)); // scene.d_shapes[shape_id].ptr
747
- break;
748
- } case ShapeType::Ellipse: {
749
- if (alloc_mode) scene.shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
750
- buffer_size += align(sizeof(Ellipse)); // scene.shapes[shape_id].ptr
751
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
752
- buffer_size += align(sizeof(Ellipse)); // scene.d_shapes[shape_id].ptr
753
- break;
754
- } case ShapeType::Path: {
755
- if (alloc_mode) scene.shapes[shape_id].ptr = (Path*)&scene.buffer[buffer_size];
756
- buffer_size += align(sizeof(Path)); // scene.shapes[shape_id].ptr
757
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Path*)&scene.buffer[buffer_size];
758
- buffer_size += align(sizeof(Path)); // scene.d_shapes[shape_id].ptr
759
-
760
- const Path *p_ = (const Path*)(shape_list[shape_id]->ptr);
761
- Path *p = nullptr, *d_p = nullptr;
762
- if (alloc_mode) p = (Path*)scene.shapes[shape_id].ptr;
763
- if (alloc_mode) d_p = (Path*)scene.d_shapes[shape_id].ptr;
764
- if (alloc_mode) p->num_control_points = (int*)&scene.buffer[buffer_size];
765
- buffer_size += align(sizeof(int) * p_->num_base_points); // p->num_control_points
766
- if (alloc_mode) p->points = (float*)&scene.buffer[buffer_size];
767
- buffer_size += align(sizeof(float) * (2 * p_->num_points)); // p->points
768
- if (alloc_mode) d_p->points = (float*)&scene.buffer[buffer_size];
769
- buffer_size += align(sizeof(float) * (2 * p_->num_points)); // d_p->points
770
- if (p_->thickness != nullptr) {
771
- if (alloc_mode) p->thickness = (float*)&scene.buffer[buffer_size];
772
- buffer_size += align(sizeof(float) * p_->num_points); // p->thickness
773
- if (alloc_mode) d_p->thickness = (float*)&scene.buffer[buffer_size];
774
- buffer_size += align(sizeof(float) * p_->num_points); // d_p->thickness
775
- } else {
776
- if (alloc_mode) p->thickness = nullptr;
777
- if (alloc_mode) d_p->thickness = nullptr;
778
- }
779
- if (alloc_mode) scene.path_length_pmf[shape_id] = (float*)&scene.buffer[buffer_size];
780
- buffer_size += align(sizeof(float) * p_->num_base_points); // scene.path_length_pmf
781
- if (alloc_mode) scene.path_length_cdf[shape_id] = (float*)&scene.buffer[buffer_size];
782
- buffer_size += align(sizeof(float) * p_->num_base_points); // scene.path_length_cdf
783
- if (alloc_mode) scene.path_point_id_map[shape_id] = (int*)&scene.buffer[buffer_size];
784
- buffer_size += align(sizeof(int) * p_->num_base_points); // scene.path_point_id_map
785
- if (alloc_mode) scene.path_bvhs[shape_id] = (BVHNode*)&scene.buffer[buffer_size];
786
- buffer_size += align(sizeof(BVHNode) * (2 * p_->num_base_points - 1));
787
- break;
788
- } case ShapeType::Rect: {
789
- if (alloc_mode) scene.shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
790
- buffer_size += align(sizeof(Rect)); // scene.shapes[shape_id].ptr
791
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
792
- buffer_size += align(sizeof(Rect)); // scene.d_shapes[shape_id].ptr
793
- break;
794
- } default: {
795
- assert(false);
796
- break;
797
- }
798
- }
799
- }
800
-
801
- for (int group_id = 0; group_id < scene.num_shape_groups; group_id++) {
802
- const ShapeGroup *shape_group = shape_group_list[group_id];
803
- if (shape_group->fill_color != nullptr) {
804
- switch (shape_group->fill_color_type) {
805
- case ColorType::Constant: {
806
- if (alloc_mode) scene.shape_groups[group_id].fill_color = (Constant*)&scene.buffer[buffer_size];
807
- buffer_size += align(sizeof(Constant)); // color
808
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (Constant*)&scene.buffer[buffer_size];
809
- buffer_size += align(sizeof(Constant)); // d_color
810
- break;
811
- } case ColorType::LinearGradient: {
812
- if (alloc_mode) scene.shape_groups[group_id].fill_color = (LinearGradient*)&scene.buffer[buffer_size];
813
- buffer_size += align(sizeof(LinearGradient)); // color
814
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (LinearGradient*)&scene.buffer[buffer_size];
815
- buffer_size += align(sizeof(LinearGradient)); // d_color
816
-
817
- const LinearGradient *c_ = (const LinearGradient *)shape_group->fill_color;
818
- LinearGradient *c = nullptr, *d_c = nullptr;
819
- if (alloc_mode) c = (LinearGradient *)scene.shape_groups[group_id].fill_color;
820
- if (alloc_mode) d_c = (LinearGradient *)scene.d_shape_groups[group_id].fill_color;
821
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
822
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
823
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
824
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
825
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
826
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
827
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
828
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
829
- break;
830
- } case ColorType::RadialGradient: {
831
- if (alloc_mode) scene.shape_groups[group_id].fill_color = (RadialGradient*)&scene.buffer[buffer_size];
832
- buffer_size += align(sizeof(RadialGradient)); // color
833
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (RadialGradient*)&scene.buffer[buffer_size];
834
- buffer_size += align(sizeof(RadialGradient)); // d_color
835
-
836
- const RadialGradient *c_ = (const RadialGradient *)shape_group->fill_color;
837
- RadialGradient *c = nullptr, *d_c = nullptr;
838
- if (alloc_mode) c = (RadialGradient *)scene.shape_groups[group_id].fill_color;
839
- if (alloc_mode) d_c = (RadialGradient *)scene.d_shape_groups[group_id].fill_color;
840
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
841
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
842
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
843
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
844
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
845
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
846
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
847
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
848
- break;
849
- } default: {
850
- assert(false);
851
- }
852
- }
853
- } else {
854
- if (alloc_mode) scene.shape_groups[group_id].fill_color = nullptr;
855
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = nullptr;
856
- }
857
- if (shape_group->stroke_color != nullptr) {
858
- switch (shape_group->stroke_color_type) {
859
- case ColorType::Constant: {
860
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (Constant*)&scene.buffer[buffer_size];
861
- buffer_size += align(sizeof(Constant)); // color
862
- if (alloc_mode) scene.d_shape_groups[group_id].stroke_color = (Constant*)&scene.buffer[buffer_size];
863
- buffer_size += align(sizeof(Constant)); // d_color
864
- break;
865
- } case ColorType::LinearGradient: {
866
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (LinearGradient*)&scene.buffer[buffer_size];
867
- buffer_size += align(sizeof(LinearGradient)); // color
868
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (LinearGradient*)&scene.buffer[buffer_size];
869
- buffer_size += align(sizeof(LinearGradient)); // d_color
870
-
871
- const LinearGradient *c_ = (const LinearGradient *)shape_group->stroke_color;
872
- LinearGradient *c = nullptr, *d_c = nullptr;
873
- if (alloc_mode) c = (LinearGradient *)scene.shape_groups[group_id].stroke_color;
874
- if (alloc_mode) d_c = (LinearGradient *)scene.d_shape_groups[group_id].stroke_color;
875
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
876
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
877
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
878
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
879
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
880
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
881
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
882
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
883
- break;
884
- } case ColorType::RadialGradient: {
885
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (RadialGradient*)&scene.buffer[buffer_size];
886
- buffer_size += align(sizeof(RadialGradient)); // color
887
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (RadialGradient*)&scene.buffer[buffer_size];
888
- buffer_size += align(sizeof(RadialGradient)); // d_color
889
-
890
- const RadialGradient *c_ = (const RadialGradient *)shape_group->stroke_color;
891
- RadialGradient *c = nullptr, *d_c = nullptr;
892
- if (alloc_mode) c = (RadialGradient *)scene.shape_groups[group_id].stroke_color;
893
- if (alloc_mode) d_c = (RadialGradient *)scene.d_shape_groups[group_id].stroke_color;
894
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
895
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
896
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
897
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
898
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
899
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
900
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
901
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
902
- break;
903
- } default: {
904
- assert(false);
905
- }
906
- }
907
- } else {
908
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = nullptr;
909
- if (alloc_mode) scene.d_shape_groups[group_id].stroke_color = nullptr;
910
- }
911
- if (alloc_mode) scene.shape_groups[group_id].shape_ids = (int*)&scene.buffer[buffer_size];
912
- buffer_size += align(sizeof(int) * shape_group->num_shapes); // shape_group->shape_ids
913
- if (alloc_mode) scene.shape_groups_bvh_nodes[group_id] = (BVHNode*)&scene.buffer[buffer_size];
914
- buffer_size += align(sizeof(BVHNode) * (2 * shape_group->num_shapes - 1)); // scene.shape_groups_bvh_nodes[group_id]
915
- }
916
- return buffer_size;
917
- }
918
-
919
- Scene::Scene(int canvas_width,
920
- int canvas_height,
921
- const std::vector<const Shape *> &shape_list,
922
- const std::vector<const ShapeGroup *> &shape_group_list,
923
- const Filter &filter,
924
- bool use_gpu,
925
- int gpu_index)
926
- : canvas_width(canvas_width),
927
- canvas_height(canvas_height),
928
- num_shapes(shape_list.size()),
929
- num_shape_groups(shape_group_list.size()),
930
- use_gpu(use_gpu),
931
- gpu_index(gpu_index) {
932
- if (num_shapes == 0) {
933
- return;
934
- }
935
- // Shape group may reuse some of the shapes,
936
- // record the total number of shapes.
937
- int num_total_shapes = 0;
938
- for (const ShapeGroup *sg : shape_group_list) {
939
- num_total_shapes += sg->num_shapes;
940
- }
941
- this->num_total_shapes = num_total_shapes;
942
-
943
- // Memory initialization
944
- #ifdef __NVCC__
945
- int old_device_id = -1;
946
- #endif
947
- if (use_gpu) {
948
- #ifdef __NVCC__
949
- checkCuda(cudaGetDevice(&old_device_id));
950
- if (gpu_index != -1) {
951
- checkCuda(cudaSetDevice(gpu_index));
952
- }
953
- #else
954
- throw std::runtime_error("diffvg not compiled with GPU");
955
- assert(false);
956
- #endif
957
- }
958
-
959
- size_t buffer_size = allocate_buffers<false /*alloc_mode*/>(*this, shape_list, shape_group_list);
960
- // Allocate a huge buffer for everything
961
- allocate<uint8_t>(use_gpu, buffer_size, &buffer);
962
- // memset(buffer, 111, buffer_size);
963
- // Actually distribute the buffer
964
- allocate_buffers<true /*alloc_mode*/>(*this, shape_list, shape_group_list);
965
- copy_and_init_shapes(*this, shape_list);
966
- copy_and_init_shape_groups(*this, shape_group_list);
967
-
968
- std::vector<float> shape_length_list = compute_shape_length(shape_list);
969
- // Copy shape_length
970
- if (use_gpu) {
971
- #ifdef __NVCC__
972
- checkCuda(cudaMemcpy(this->shapes_length, &shape_length_list[0], num_shapes * sizeof(float), cudaMemcpyHostToDevice));
973
- #else
974
- throw std::runtime_error("diffvg not compiled with GPU");
975
- assert(false);
976
- #endif
977
- } else {
978
- memcpy(this->shapes_length, &shape_length_list[0], num_shapes * sizeof(float));
979
- }
980
- build_shape_cdfs(*this, shape_group_list, shape_length_list);
981
- build_path_cdfs(*this, shape_list, shape_length_list);
982
- compute_bounding_boxes(*this, shape_list, shape_group_list);
983
-
984
- // Filter initialization
985
- *(this->filter) = filter;
986
- this->d_filter->radius = 0;
987
-
988
- if (use_gpu) {
989
- #ifdef __NVCC__
990
- if (old_device_id != -1) {
991
- checkCuda(cudaSetDevice(old_device_id));
992
- }
993
- #else
994
- throw std::runtime_error("diffvg not compiled with GPU");
995
- assert(false);
996
- #endif
997
- }
998
- }
999
-
1000
- Scene::~Scene() {
1001
- if (num_shapes == 0) {
1002
- return;
1003
- }
1004
- if (use_gpu) {
1005
- #ifdef __NVCC__
1006
- int old_device_id = -1;
1007
- checkCuda(cudaGetDevice(&old_device_id));
1008
- if (gpu_index != -1) {
1009
- checkCuda(cudaSetDevice(gpu_index));
1010
- }
1011
-
1012
- checkCuda(cudaFree(buffer));
1013
-
1014
- checkCuda(cudaSetDevice(old_device_id));
1015
- #else
1016
- // Don't throw because C++ don't want a destructor to throw.
1017
- std::cerr << "diffvg not compiled with GPU";
1018
- exit(1);
1019
- #endif
1020
- } else {
1021
- free(buffer);
1022
- }
1023
- }
1024
-
1025
- Shape Scene::get_d_shape(int shape_id) const {
1026
- return d_shapes[shape_id];
1027
- }
1028
-
1029
- ShapeGroup Scene::get_d_shape_group(int group_id) const {
1030
- return d_shape_groups[group_id];
1031
- }
1032
-
1033
- float Scene::get_d_filter_radius() const {
1034
- return d_filter->radius;
1035
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/execution_policy.h DELETED
@@ -1,157 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- /*! \file thrust/system/cpp/execution_policy.h
20
- * \brief Execution policies for Thrust's standard C++ system.
21
- */
22
-
23
- #include <thrust/detail/config.h>
24
-
25
- // get the execution policies definitions first
26
- #include <thrust/system/cpp/detail/execution_policy.h>
27
-
28
- // get the definition of par
29
- #include <thrust/system/cpp/detail/par.h>
30
-
31
- // now get all the algorithm definitions
32
-
33
- #include <thrust/system/cpp/detail/adjacent_difference.h>
34
- #include <thrust/system/cpp/detail/assign_value.h>
35
- #include <thrust/system/cpp/detail/binary_search.h>
36
- #include <thrust/system/cpp/detail/copy.h>
37
- #include <thrust/system/cpp/detail/copy_if.h>
38
- #include <thrust/system/cpp/detail/count.h>
39
- #include <thrust/system/cpp/detail/equal.h>
40
- #include <thrust/system/cpp/detail/extrema.h>
41
- #include <thrust/system/cpp/detail/fill.h>
42
- #include <thrust/system/cpp/detail/find.h>
43
- #include <thrust/system/cpp/detail/for_each.h>
44
- #include <thrust/system/cpp/detail/gather.h>
45
- #include <thrust/system/cpp/detail/generate.h>
46
- #include <thrust/system/cpp/detail/get_value.h>
47
- #include <thrust/system/cpp/detail/inner_product.h>
48
- #include <thrust/system/cpp/detail/iter_swap.h>
49
- #include <thrust/system/cpp/detail/logical.h>
50
- #include <thrust/system/cpp/detail/malloc_and_free.h>
51
- #include <thrust/system/cpp/detail/merge.h>
52
- #include <thrust/system/cpp/detail/mismatch.h>
53
- #include <thrust/system/cpp/detail/partition.h>
54
- #include <thrust/system/cpp/detail/reduce.h>
55
- #include <thrust/system/cpp/detail/reduce_by_key.h>
56
- #include <thrust/system/cpp/detail/remove.h>
57
- #include <thrust/system/cpp/detail/replace.h>
58
- #include <thrust/system/cpp/detail/reverse.h>
59
- #include <thrust/system/cpp/detail/scan.h>
60
- #include <thrust/system/cpp/detail/scan_by_key.h>
61
- #include <thrust/system/cpp/detail/scatter.h>
62
- #include <thrust/system/cpp/detail/sequence.h>
63
- #include <thrust/system/cpp/detail/set_operations.h>
64
- #include <thrust/system/cpp/detail/sort.h>
65
- #include <thrust/system/cpp/detail/swap_ranges.h>
66
- #include <thrust/system/cpp/detail/tabulate.h>
67
- #include <thrust/system/cpp/detail/transform.h>
68
- #include <thrust/system/cpp/detail/transform_reduce.h>
69
- #include <thrust/system/cpp/detail/transform_scan.h>
70
- #include <thrust/system/cpp/detail/uninitialized_copy.h>
71
- #include <thrust/system/cpp/detail/uninitialized_fill.h>
72
- #include <thrust/system/cpp/detail/unique.h>
73
- #include <thrust/system/cpp/detail/unique_by_key.h>
74
-
75
-
76
- // define these entities here for the purpose of Doxygenating them
77
- // they are actually defined elsewhere
78
- #if 0
79
- namespace thrust
80
- {
81
- namespace system
82
- {
83
- namespace cpp
84
- {
85
-
86
-
87
- /*! \addtogroup execution_policies
88
- * \{
89
- */
90
-
91
-
92
- /*! \p thrust::system::cpp::execution_policy is the base class for all Thrust parallel execution
93
- * policies which are derived from Thrust's standard C++ backend system.
94
- */
95
- template<typename DerivedPolicy>
96
- struct execution_policy : thrust::execution_policy<DerivedPolicy>
97
- {};
98
-
99
-
100
- /*! \p thrust::system::cpp::tag is a type representing Thrust's standard C++ backend system in C++'s type system.
101
- * Iterators "tagged" with a type which is convertible to \p cpp::tag assert that they may be
102
- * "dispatched" to algorithm implementations in the \p cpp system.
103
- */
104
- struct tag : thrust::system::cpp::execution_policy<tag> { unspecified };
105
-
106
-
107
- /*!
108
- * \p thrust::system::cpp::par is the parallel execution policy associated with Thrust's standard
109
- * C++ backend system.
110
- *
111
- * Instead of relying on implicit algorithm dispatch through iterator system tags, users may
112
- * directly target Thrust's C++ backend system by providing \p thrust::cpp::par as an algorithm
113
- * parameter.
114
- *
115
- * Explicit dispatch can be useful in avoiding the introduction of data copies into containers such
116
- * as \p thrust::cpp::vector.
117
- *
118
- * The type of \p thrust::cpp::par is implementation-defined.
119
- *
120
- * The following code snippet demonstrates how to use \p thrust::cpp::par to explicitly dispatch an
121
- * invocation of \p thrust::for_each to the standard C++ backend system:
122
- *
123
- * \code
124
- * #include <thrust/for_each.h>
125
- * #include <thrust/system/cpp/execution_policy.h>
126
- * #include <cstdio>
127
- *
128
- * struct printf_functor
129
- * {
130
- * __host__ __device__
131
- * void operator()(int x)
132
- * {
133
- * printf("%d\n", x);
134
- * }
135
- * };
136
- * ...
137
- * int vec[3];
138
- * vec[0] = 0; vec[1] = 1; vec[2] = 2;
139
- *
140
- * thrust::for_each(thrust::cpp::par, vec.begin(), vec.end(), printf_functor());
141
- *
142
- * // 0 1 2 is printed to standard output in some unspecified order
143
- * \endcode
144
- */
145
- static const unspecified par;
146
-
147
-
148
- /*! \}
149
- */
150
-
151
-
152
- } // end cpp
153
- } // end system
154
- } // end thrust
155
- #endif
156
-
157
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/uninitialized_fill.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special unintialized_fill functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Catspin/2_ai_chat/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Boxai
3
- emoji: 👀
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attrs/exceptions.py DELETED
@@ -1,3 +0,0 @@
1
- # SPDX-License-Identifier: MIT
2
-
3
- from attr.exceptions import * # noqa
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_textwrap.py DELETED
@@ -1,49 +0,0 @@
1
- import textwrap
2
- import typing as t
3
- from contextlib import contextmanager
4
-
5
-
6
- class TextWrapper(textwrap.TextWrapper):
7
- def _handle_long_word(
8
- self,
9
- reversed_chunks: t.List[str],
10
- cur_line: t.List[str],
11
- cur_len: int,
12
- width: int,
13
- ) -> None:
14
- space_left = max(width - cur_len, 1)
15
-
16
- if self.break_long_words:
17
- last = reversed_chunks[-1]
18
- cut = last[:space_left]
19
- res = last[space_left:]
20
- cur_line.append(cut)
21
- reversed_chunks[-1] = res
22
- elif not cur_line:
23
- cur_line.append(reversed_chunks.pop())
24
-
25
- @contextmanager
26
- def extra_indent(self, indent: str) -> t.Iterator[None]:
27
- old_initial_indent = self.initial_indent
28
- old_subsequent_indent = self.subsequent_indent
29
- self.initial_indent += indent
30
- self.subsequent_indent += indent
31
-
32
- try:
33
- yield
34
- finally:
35
- self.initial_indent = old_initial_indent
36
- self.subsequent_indent = old_subsequent_indent
37
-
38
- def indent_only(self, text: str) -> str:
39
- rv = []
40
-
41
- for idx, line in enumerate(text.splitlines()):
42
- indent = self.initial_indent
43
-
44
- if idx > 0:
45
- indent = self.subsequent_indent
46
-
47
- rv.append(f"{indent}{line}")
48
-
49
- return "\n".join(rv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py DELETED
@@ -1,217 +0,0 @@
1
- from __future__ import absolute_import, division, print_function
2
-
3
- from contextlib import contextmanager
4
- from ctypes import (
5
- CFUNCTYPE,
6
- POINTER,
7
- c_int,
8
- c_longlong,
9
- c_void_p,
10
- cast,
11
- create_string_buffer,
12
- )
13
-
14
- import libarchive
15
- import libarchive.ffi as ffi
16
-
17
- from fsspec import open_files
18
- from fsspec.archive import AbstractArchiveFileSystem
19
- from fsspec.implementations.memory import MemoryFile
20
- from fsspec.utils import DEFAULT_BLOCK_SIZE
21
-
22
- # Libarchive requires seekable files or memory only for certain archive
23
- # types. However, since we read the directory first to cache the contents
24
- # and also allow random access to any file, the file-like object needs
25
- # to be seekable no matter what.
26
-
27
- # Seek call-backs (not provided in the libarchive python wrapper)
28
- SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int)
29
- read_set_seek_callback = ffi.ffi(
30
- "read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int
31
- )
32
- new_api = hasattr(ffi, "NO_OPEN_CB")
33
-
34
-
35
- @contextmanager
36
- def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size):
37
- """Read an archive from a seekable file-like object.
38
-
39
- The `file` object must support the standard `readinto` and 'seek' methods.
40
- """
41
- buf = create_string_buffer(block_size)
42
- buf_p = cast(buf, c_void_p)
43
-
44
- def read_func(archive_p, context, ptrptr):
45
- # readinto the buffer, returns number of bytes read
46
- length = file.readinto(buf)
47
- # write the address of the buffer into the pointer
48
- ptrptr = cast(ptrptr, POINTER(c_void_p))
49
- ptrptr[0] = buf_p
50
- # tell libarchive how much data was written into the buffer
51
- return length
52
-
53
- def seek_func(archive_p, context, offset, whence):
54
- file.seek(offset, whence)
55
- # tell libarchvie the current position
56
- return file.tell()
57
-
58
- read_cb = ffi.READ_CALLBACK(read_func)
59
- seek_cb = SEEK_CALLBACK(seek_func)
60
-
61
- if new_api:
62
- open_cb = ffi.NO_OPEN_CB
63
- close_cb = ffi.NO_CLOSE_CB
64
- else:
65
- open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB)
66
- close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB)
67
-
68
- with libarchive.read.new_archive_read(format_name, filter_name) as archive_p:
69
- read_set_seek_callback(archive_p, seek_cb)
70
- ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
71
- yield libarchive.read.ArchiveRead(archive_p)
72
-
73
-
74
- class LibArchiveFileSystem(AbstractArchiveFileSystem):
75
- """Compressed archives as a file-system (read-only)
76
-
77
- Supports the following formats:
78
- tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar
79
- Microsoft CAB, 7-Zip, WARC
80
-
81
- See the libarchive documentation for further restrictions.
82
- https://www.libarchive.org/
83
-
84
- Keeps file object open while instance lives. It only works in seekable
85
- file-like objects. In case the filesystem does not support this kind of
86
- file object, it is recommended to cache locally.
87
-
88
- This class is pickleable, but not necessarily thread-safe (depends on the
89
- platform). See libarchive documentation for details.
90
- """
91
-
92
- root_marker = ""
93
- protocol = "libarchive"
94
- cachable = False
95
-
96
- def __init__(
97
- self,
98
- fo="",
99
- mode="r",
100
- target_protocol=None,
101
- target_options=None,
102
- block_size=DEFAULT_BLOCK_SIZE,
103
- **kwargs,
104
- ):
105
- """
106
- Parameters
107
- ----------
108
- fo: str or file-like
109
- Contains ZIP, and must exist. If a str, will fetch file using
110
- :meth:`~fsspec.open_files`, which must return one file exactly.
111
- mode: str
112
- Currently, only 'r' accepted
113
- target_protocol: str (optional)
114
- If ``fo`` is a string, this value can be used to override the
115
- FS protocol inferred from a URL
116
- target_options: dict (optional)
117
- Kwargs passed when instantiating the target FS, if ``fo`` is
118
- a string.
119
- """
120
- super().__init__(self, **kwargs)
121
- if mode != "r":
122
- raise ValueError("Only read from archive files accepted")
123
- if isinstance(fo, str):
124
- files = open_files(fo, protocol=target_protocol, **(target_options or {}))
125
- if len(files) != 1:
126
- raise ValueError(
127
- 'Path "{}" did not resolve to exactly'
128
- 'one file: "{}"'.format(fo, files)
129
- )
130
- fo = files[0]
131
- self.of = fo
132
- self.fo = fo.__enter__() # the whole instance is a context
133
- self.block_size = block_size
134
- self.dir_cache = None
135
-
136
- @contextmanager
137
- def _open_archive(self):
138
- self.fo.seek(0)
139
- with custom_reader(self.fo, block_size=self.block_size) as arc:
140
- yield arc
141
-
142
- @classmethod
143
- def _strip_protocol(cls, path):
144
- # file paths are always relative to the archive root
145
- return super()._strip_protocol(path).lstrip("/")
146
-
147
- def _get_dirs(self):
148
- fields = {
149
- "name": "pathname",
150
- "size": "size",
151
- "created": "ctime",
152
- "mode": "mode",
153
- "uid": "uid",
154
- "gid": "gid",
155
- "mtime": "mtime",
156
- }
157
-
158
- if self.dir_cache is not None:
159
- return
160
-
161
- self.dir_cache = {}
162
- list_names = []
163
- with self._open_archive() as arc:
164
- for entry in arc:
165
- if not entry.isdir and not entry.isfile:
166
- # Skip symbolic links, fifo entries, etc.
167
- continue
168
- self.dir_cache.update(
169
- {
170
- dirname
171
- + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
172
- for dirname in self._all_dirnames(set(entry.name))
173
- }
174
- )
175
- f = {key: getattr(entry, fields[key]) for key in fields}
176
- f["type"] = "directory" if entry.isdir else "file"
177
- list_names.append(entry.name)
178
-
179
- self.dir_cache[f["name"]] = f
180
- # libarchive does not seem to return an entry for the directories (at least
181
- # not in all formats), so get the directories names from the files names
182
- self.dir_cache.update(
183
- {
184
- dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
185
- for dirname in self._all_dirnames(list_names)
186
- }
187
- )
188
-
189
- def _open(
190
- self,
191
- path,
192
- mode="rb",
193
- block_size=None,
194
- autocommit=True,
195
- cache_options=None,
196
- **kwargs,
197
- ):
198
- path = self._strip_protocol(path)
199
- if mode != "rb":
200
- raise NotImplementedError
201
-
202
- data = bytes()
203
- with self._open_archive() as arc:
204
- for entry in arc:
205
- if entry.pathname != path:
206
- continue
207
-
208
- if entry.size == 0:
209
- # empty file, so there are no blocks
210
- break
211
-
212
- for block in entry.get_blocks(entry.size):
213
- data = block
214
- break
215
- else:
216
- raise ValueError
217
- return MemoryFile(fs=self, path=path, data=data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaleChen/AutoGPT/autogpt/commands/times.py DELETED
@@ -1,10 +0,0 @@
1
- from datetime import datetime
2
-
3
-
4
- def get_datetime() -> str:
5
- """Return the current date and time
6
-
7
- Returns:
8
- str: The current date and time
9
- """
10
- return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")